]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
Merge branch 'for-linus' into for-next
authorAl Viro <viro@zeniv.linux.org.uk>
Sun, 12 Apr 2015 02:29:51 +0000 (22:29 -0400)
committerAl Viro <viro@zeniv.linux.org.uk>
Sun, 12 Apr 2015 02:29:51 +0000 (22:29 -0400)
1523 files changed:
Documentation/ABI/testing/sysfs-class-net
Documentation/ABI/testing/sysfs-class-net-queues
Documentation/devicetree/bindings/net/apm-xgene-enet.txt
Documentation/devicetree/bindings/net/dsa/dsa.txt
Documentation/devicetree/bindings/net/ieee802154/at86rf230.txt
Documentation/devicetree/bindings/net/ieee802154/cc2520.txt
Documentation/devicetree/bindings/net/keystone-netcp.txt
Documentation/devicetree/bindings/net/macb.txt
Documentation/filesystems/Locking
Documentation/filesystems/porting
Documentation/filesystems/vfs.txt
Documentation/input/alps.txt
Documentation/input/event-codes.txt
Documentation/input/multi-touch-protocol.txt
Documentation/networking/can.txt
Documentation/networking/filter.txt
Documentation/networking/ip-sysctl.txt
Documentation/networking/ipvs-sysctl.txt
Documentation/networking/mpls-sysctl.txt [new file with mode: 0644]
Documentation/networking/packet_mmap.txt
Documentation/networking/pktgen.txt
Documentation/networking/s2io.txt
Documentation/networking/scaling.txt
Documentation/networking/vxge.txt
MAINTAINERS
Makefile
arch/arc/kernel/process.c
arch/arm/boot/dts/at91sam9260.dtsi
arch/arm/boot/dts/at91sam9263.dtsi
arch/arm/boot/dts/at91sam9g45.dtsi
arch/arm/boot/dts/at91sam9x5_macb0.dtsi
arch/arm/boot/dts/at91sam9x5_macb1.dtsi
arch/arm/boot/dts/sama5d3_emac.dtsi
arch/arm64/boot/dts/apm/apm-mustang.dts
arch/arm64/boot/dts/apm/apm-storm.dtsi
arch/c6x/kernel/process.c
arch/frv/kernel/signal.c
arch/hexagon/kernel/process.c
arch/m32r/kernel/signal.c
arch/metag/include/asm/processor.h
arch/microblaze/kernel/signal.c
arch/nios2/kernel/process.c
arch/openrisc/kernel/process.c
arch/powerpc/Kconfig
arch/powerpc/include/asm/asm-compat.h
arch/powerpc/include/asm/ppc-opcode.h
arch/powerpc/net/Makefile
arch/powerpc/net/bpf_jit.h
arch/powerpc/net/bpf_jit_asm.S [moved from arch/powerpc/net/bpf_jit_64.S with 76% similarity]
arch/powerpc/net/bpf_jit_comp.c
arch/s390/hypfs/inode.c
arch/s390/include/asm/irq.h
arch/s390/kernel/irq.c
arch/sh/kernel/signal_32.c
arch/sh/kernel/signal_64.c
arch/tile/gxio/mpipe.c
arch/tile/include/gxio/mpipe.h
arch/x86/kernel/cpu/perf_event_intel.c
arch/x86/kernel/entry_64.S
arch/x86/kernel/kgdb.c
arch/x86/kernel/reboot.c
arch/xtensa/kernel/signal.c
block/blk-map.c
block/blk-settings.c
block/scsi_ioctl.c
crypto/af_alg.c
crypto/algif_hash.c
crypto/algif_rng.c
crypto/algif_skcipher.c
drivers/atm/nicstar.c
drivers/bcma/Kconfig
drivers/bcma/Makefile
drivers/bcma/bcma_private.h
drivers/bcma/driver_gpio.c
drivers/bcma/driver_pci.c
drivers/bcma/driver_pci_host.c
drivers/bcma/driver_pcie2.c
drivers/bcma/host_pci.c
drivers/bcma/main.c
drivers/block/loop.c
drivers/bluetooth/ath3k.c
drivers/bluetooth/btmrvl_drv.h
drivers/bluetooth/btmrvl_main.c
drivers/bluetooth/btusb.c
drivers/bluetooth/hci_ldisc.c
drivers/bluetooth/hci_uart.h
drivers/char/mem.c
drivers/char/raw.c
drivers/char/tile-srom.c
drivers/firewire/net.c
drivers/firmware/dmi_scan.c
drivers/iio/accel/bma180.c
drivers/iio/accel/bmc150-accel.c
drivers/iio/accel/kxcjk-1013.c
drivers/iio/adc/Kconfig
drivers/iio/adc/at91_adc.c
drivers/iio/adc/ti_am335x_adc.c
drivers/iio/adc/vf610_adc.c
drivers/iio/gyro/bmg160.c
drivers/iio/imu/adis_trigger.c
drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c
drivers/iio/imu/kmx61.c
drivers/iio/industrialio-core.c
drivers/iio/industrialio-event.c
drivers/iio/proximity/sx9500.c
drivers/infiniband/hw/ipath/ipath_file_ops.c
drivers/infiniband/hw/mlx4/main.c
drivers/infiniband/hw/mlx5/ah.c
drivers/infiniband/hw/mlx5/cq.c
drivers/infiniband/hw/mlx5/doorbell.c
drivers/infiniband/hw/mlx5/mad.c
drivers/infiniband/hw/mlx5/main.c
drivers/infiniband/hw/mlx5/mem.c
drivers/infiniband/hw/mlx5/mlx5_ib.h
drivers/infiniband/hw/mlx5/mr.c
drivers/infiniband/hw/mlx5/odp.c
drivers/infiniband/hw/mlx5/qp.c
drivers/infiniband/hw/mlx5/srq.c
drivers/infiniband/hw/mlx5/user.h
drivers/infiniband/hw/qib/qib_file_ops.c
drivers/infiniband/ulp/ipoib/ipoib_main.c
drivers/infiniband/ulp/ipoib/ipoib_vlan.c
drivers/input/mouse/alps.c
drivers/input/mouse/synaptics.c
drivers/isdn/gigaset/ev-layer.c
drivers/isdn/i4l/isdn_net.c
drivers/isdn/mISDN/socket.c
drivers/media/dvb-core/dvb_net.c
drivers/misc/mei/amthif.c
drivers/misc/mei/main.c
drivers/misc/mei/pci-me.c
drivers/net/arcnet/arcnet.c
drivers/net/bonding/bond_3ad.c
drivers/net/bonding/bond_main.c
drivers/net/bonding/bond_procfs.c
drivers/net/caif/caif_serial.c
drivers/net/can/at91_can.c
drivers/net/can/bfin_can.c
drivers/net/can/cc770/cc770_platform.c
drivers/net/can/grcan.c
drivers/net/can/led.c
drivers/net/can/m_can/m_can.c
drivers/net/can/mscan/mpc5xxx_can.c
drivers/net/can/sja1000/sja1000_platform.c
drivers/net/can/usb/ems_usb.c
drivers/net/can/usb/esd_usb2.c
drivers/net/can/usb/kvaser_usb.c
drivers/net/can/usb/peak_usb/pcan_usb_fd.c
drivers/net/can/xilinx_can.c
drivers/net/dsa/Kconfig
drivers/net/dsa/bcm_sf2.c
drivers/net/dsa/bcm_sf2.h
drivers/net/dsa/bcm_sf2_regs.h
drivers/net/dsa/mv88e6123_61_65.c
drivers/net/dsa/mv88e6131.c
drivers/net/dsa/mv88e6171.c
drivers/net/dsa/mv88e6352.c
drivers/net/dsa/mv88e6xxx.c
drivers/net/dsa/mv88e6xxx.h
drivers/net/ethernet/adi/bfin_mac.c
drivers/net/ethernet/aeroflex/greth.c
drivers/net/ethernet/allwinner/sun4i-emac.c
drivers/net/ethernet/altera/altera_tse_main.c
drivers/net/ethernet/amd/amd8111e.c
drivers/net/ethernet/amd/amd8111e.h
drivers/net/ethernet/amd/pcnet32.c
drivers/net/ethernet/amd/xgbe/xgbe-common.h
drivers/net/ethernet/amd/xgbe/xgbe-dev.c
drivers/net/ethernet/amd/xgbe/xgbe-drv.c
drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
drivers/net/ethernet/amd/xgbe/xgbe-main.c
drivers/net/ethernet/amd/xgbe/xgbe-ptp.c
drivers/net/ethernet/amd/xgbe/xgbe.h
drivers/net/ethernet/apm/xgene/xgene_enet_hw.h
drivers/net/ethernet/apm/xgene/xgene_enet_main.c
drivers/net/ethernet/apm/xgene/xgene_enet_main.h
drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c
drivers/net/ethernet/apple/bmac.c
drivers/net/ethernet/apple/mace.c
drivers/net/ethernet/apple/macmace.c
drivers/net/ethernet/atheros/atl1c/atl1c_hw.c
drivers/net/ethernet/atheros/atl1c/atl1c_main.c
drivers/net/ethernet/broadcom/Kconfig
drivers/net/ethernet/broadcom/bgmac.c
drivers/net/ethernet/broadcom/bgmac.h
drivers/net/ethernet/broadcom/bnx2.c
drivers/net/ethernet/broadcom/bnx2.h
drivers/net/ethernet/broadcom/bnx2_fw.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
drivers/net/ethernet/broadcom/cnic.c
drivers/net/ethernet/broadcom/cnic_if.h
drivers/net/ethernet/broadcom/genet/bcmgenet.c
drivers/net/ethernet/broadcom/genet/bcmgenet.h
drivers/net/ethernet/broadcom/genet/bcmmii.c
drivers/net/ethernet/broadcom/tg3.c
drivers/net/ethernet/brocade/Kconfig
drivers/net/ethernet/brocade/Makefile
drivers/net/ethernet/brocade/bna/Kconfig
drivers/net/ethernet/brocade/bna/Makefile
drivers/net/ethernet/brocade/bna/bfa_cee.c
drivers/net/ethernet/brocade/bna/bfa_cee.h
drivers/net/ethernet/brocade/bna/bfa_cs.h
drivers/net/ethernet/brocade/bna/bfa_defs.h
drivers/net/ethernet/brocade/bna/bfa_defs_cna.h
drivers/net/ethernet/brocade/bna/bfa_defs_mfg_comm.h
drivers/net/ethernet/brocade/bna/bfa_defs_status.h
drivers/net/ethernet/brocade/bna/bfa_ioc.c
drivers/net/ethernet/brocade/bna/bfa_ioc.h
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
drivers/net/ethernet/brocade/bna/bfa_msgq.c
drivers/net/ethernet/brocade/bna/bfa_msgq.h
drivers/net/ethernet/brocade/bna/bfi.h
drivers/net/ethernet/brocade/bna/bfi_cna.h
drivers/net/ethernet/brocade/bna/bfi_enet.h
drivers/net/ethernet/brocade/bna/bfi_reg.h
drivers/net/ethernet/brocade/bna/bna.h
drivers/net/ethernet/brocade/bna/bna_enet.c
drivers/net/ethernet/brocade/bna/bna_hw_defs.h
drivers/net/ethernet/brocade/bna/bna_tx_rx.c
drivers/net/ethernet/brocade/bna/bna_types.h
drivers/net/ethernet/brocade/bna/bnad.c
drivers/net/ethernet/brocade/bna/bnad.h
drivers/net/ethernet/brocade/bna/bnad_debugfs.c
drivers/net/ethernet/brocade/bna/bnad_ethtool.c
drivers/net/ethernet/brocade/bna/cna.h
drivers/net/ethernet/brocade/bna/cna_fwimg.c
drivers/net/ethernet/cadence/Kconfig
drivers/net/ethernet/cadence/Makefile
drivers/net/ethernet/cadence/at91_ether.c [deleted file]
drivers/net/ethernet/cadence/macb.c
drivers/net/ethernet/cadence/macb.h
drivers/net/ethernet/calxeda/xgmac.c
drivers/net/ethernet/chelsio/Kconfig
drivers/net/ethernet/chelsio/cxgb/cxgb2.c
drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
drivers/net/ethernet/chelsio/cxgb3/t3_hw.c
drivers/net/ethernet/chelsio/cxgb4/Makefile
drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_fcoe.c [new file with mode: 0644]
drivers/net/ethernet/chelsio/cxgb4/cxgb4_fcoe.h [new file with mode: 0644]
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
drivers/net/ethernet/chelsio/cxgb4/sge.c
drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
drivers/net/ethernet/chelsio/cxgb4vf/sge.c
drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
drivers/net/ethernet/cirrus/cs89x0.c
drivers/net/ethernet/cisco/enic/enic_main.c
drivers/net/ethernet/dec/tulip/dmfe.c
drivers/net/ethernet/dec/tulip/uli526x.c
drivers/net/ethernet/emulex/benet/be.h
drivers/net/ethernet/emulex/benet/be_cmds.c
drivers/net/ethernet/emulex/benet/be_cmds.h
drivers/net/ethernet/emulex/benet/be_ethtool.c
drivers/net/ethernet/emulex/benet/be_main.c
drivers/net/ethernet/ethoc.c
drivers/net/ethernet/freescale/Kconfig
drivers/net/ethernet/freescale/fec_mpc52xx.c
drivers/net/ethernet/freescale/fec_mpc52xx_phy.c
drivers/net/ethernet/freescale/fec_ptp.c
drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c
drivers/net/ethernet/freescale/fs_enet/mii-fec.c
drivers/net/ethernet/freescale/fsl_pq_mdio.c
drivers/net/ethernet/freescale/gianfar.c
drivers/net/ethernet/freescale/gianfar.h
drivers/net/ethernet/freescale/gianfar_ptp.c
drivers/net/ethernet/freescale/ucc_geth.c
drivers/net/ethernet/freescale/xgmac_mdio.c
drivers/net/ethernet/ibm/ehea/ehea_main.c
drivers/net/ethernet/ibm/emac/core.c
drivers/net/ethernet/ibm/emac/mal.c
drivers/net/ethernet/ibm/emac/rgmii.c
drivers/net/ethernet/ibm/emac/tah.c
drivers/net/ethernet/ibm/emac/zmii.c
drivers/net/ethernet/intel/e100.c
drivers/net/ethernet/intel/e1000/e1000_main.c
drivers/net/ethernet/intel/e1000e/defines.h
drivers/net/ethernet/intel/e1000e/e1000.h
drivers/net/ethernet/intel/e1000e/ethtool.c
drivers/net/ethernet/intel/e1000e/hw.h
drivers/net/ethernet/intel/e1000e/ich8lan.c
drivers/net/ethernet/intel/e1000e/ich8lan.h
drivers/net/ethernet/intel/e1000e/netdev.c
drivers/net/ethernet/intel/e1000e/ptp.c
drivers/net/ethernet/intel/e1000e/regs.h
drivers/net/ethernet/intel/fm10k/fm10k.h
drivers/net/ethernet/intel/fm10k/fm10k_common.c
drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
drivers/net/ethernet/intel/fm10k/fm10k_iov.c
drivers/net/ethernet/intel/fm10k/fm10k_main.c
drivers/net/ethernet/intel/fm10k/fm10k_mbx.c
drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
drivers/net/ethernet/intel/fm10k/fm10k_pci.c
drivers/net/ethernet/intel/fm10k/fm10k_pf.c
drivers/net/ethernet/intel/fm10k/fm10k_ptp.c
drivers/net/ethernet/intel/fm10k/fm10k_tlv.c
drivers/net/ethernet/intel/fm10k/fm10k_type.h
drivers/net/ethernet/intel/fm10k/fm10k_vf.c
drivers/net/ethernet/intel/i40e/Makefile
drivers/net/ethernet/intel/i40e/i40e.h
drivers/net/ethernet/intel/i40e/i40e_adminq.c
drivers/net/ethernet/intel/i40e/i40e_adminq.h
drivers/net/ethernet/intel/i40e/i40e_common.c
drivers/net/ethernet/intel/i40e/i40e_dcb.c
drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
drivers/net/ethernet/intel/i40e/i40e_debugfs.c
drivers/net/ethernet/intel/i40e/i40e_ethtool.c
drivers/net/ethernet/intel/i40e/i40e_fcoe.c
drivers/net/ethernet/intel/i40e/i40e_fcoe.h
drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c
drivers/net/ethernet/intel/i40e/i40e_main.c
drivers/net/ethernet/intel/i40e/i40e_nvm.c
drivers/net/ethernet/intel/i40e/i40e_prototype.h
drivers/net/ethernet/intel/i40e/i40e_ptp.c
drivers/net/ethernet/intel/i40e/i40e_register.h
drivers/net/ethernet/intel/i40e/i40e_txrx.c
drivers/net/ethernet/intel/i40e/i40e_txrx.h
drivers/net/ethernet/intel/i40e/i40e_type.h
drivers/net/ethernet/intel/i40e/i40e_virtchnl.h
drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
drivers/net/ethernet/intel/i40evf/i40e_adminq.h
drivers/net/ethernet/intel/i40evf/i40e_common.c
drivers/net/ethernet/intel/i40evf/i40e_prototype.h
drivers/net/ethernet/intel/i40evf/i40e_register.h
drivers/net/ethernet/intel/i40evf/i40e_txrx.c
drivers/net/ethernet/intel/i40evf/i40e_txrx.h
drivers/net/ethernet/intel/i40evf/i40e_type.h
drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h
drivers/net/ethernet/intel/i40evf/i40evf.h
drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
drivers/net/ethernet/intel/i40evf/i40evf_main.c
drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
drivers/net/ethernet/intel/igb/igb_main.c
drivers/net/ethernet/intel/igb/igb_ptp.c
drivers/net/ethernet/intel/igbvf/defines.h
drivers/net/ethernet/intel/igbvf/ethtool.c
drivers/net/ethernet/intel/igbvf/igbvf.h
drivers/net/ethernet/intel/igbvf/mbx.c
drivers/net/ethernet/intel/igbvf/mbx.h
drivers/net/ethernet/intel/igbvf/netdev.c
drivers/net/ethernet/intel/igbvf/regs.h
drivers/net/ethernet/intel/igbvf/vf.c
drivers/net/ethernet/intel/igbvf/vf.h
drivers/net/ethernet/intel/ixgb/ixgb_main.c
drivers/net/ethernet/intel/ixgbe/ixgbe.h
drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
drivers/net/ethernet/intel/ixgbevf/defines.h
drivers/net/ethernet/intel/ixgbevf/ethtool.c
drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
drivers/net/ethernet/intel/ixgbevf/mbx.c
drivers/net/ethernet/intel/ixgbevf/mbx.h
drivers/net/ethernet/intel/ixgbevf/regs.h
drivers/net/ethernet/intel/ixgbevf/vf.c
drivers/net/ethernet/intel/ixgbevf/vf.h
drivers/net/ethernet/marvell/mvneta.c
drivers/net/ethernet/marvell/mvpp2.c
drivers/net/ethernet/mellanox/mlx4/Makefile
drivers/net/ethernet/mellanox/mlx4/cmd.c
drivers/net/ethernet/mellanox/mlx4/en_clock.c
drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
drivers/net/ethernet/mellanox/mlx4/en_main.c
drivers/net/ethernet/mellanox/mlx4/en_netdev.c
drivers/net/ethernet/mellanox/mlx4/en_port.c
drivers/net/ethernet/mellanox/mlx4/en_rx.c
drivers/net/ethernet/mellanox/mlx4/en_selftest.c
drivers/net/ethernet/mellanox/mlx4/fw.c
drivers/net/ethernet/mellanox/mlx4/fw.h
drivers/net/ethernet/mellanox/mlx4/fw_qos.c [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlx4/fw_qos.h [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlx4/main.c
drivers/net/ethernet/mellanox/mlx4/mlx4.h
drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlx4/port.c
drivers/net/ethernet/mellanox/mlx4/qp.c
drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
drivers/net/ethernet/mellanox/mlx5/core/alloc.c
drivers/net/ethernet/mellanox/mlx5/core/cmd.c
drivers/net/ethernet/mellanox/mlx5/core/cq.c
drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
drivers/net/ethernet/mellanox/mlx5/core/eq.c
drivers/net/ethernet/mellanox/mlx5/core/fw.c
drivers/net/ethernet/mellanox/mlx5/core/health.c
drivers/net/ethernet/mellanox/mlx5/core/mad.c
drivers/net/ethernet/mellanox/mlx5/core/main.c
drivers/net/ethernet/mellanox/mlx5/core/mcg.c
drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
drivers/net/ethernet/mellanox/mlx5/core/mr.c
drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
drivers/net/ethernet/mellanox/mlx5/core/pd.c
drivers/net/ethernet/mellanox/mlx5/core/port.c
drivers/net/ethernet/mellanox/mlx5/core/qp.c
drivers/net/ethernet/mellanox/mlx5/core/srq.c
drivers/net/ethernet/mellanox/mlx5/core/uar.c
drivers/net/ethernet/micrel/ksz884x.c
drivers/net/ethernet/moxa/moxart_ether.c
drivers/net/ethernet/neterion/s2io.c
drivers/net/ethernet/neterion/vxge/vxge-ethtool.c
drivers/net/ethernet/neterion/vxge/vxge-ethtool.h
drivers/net/ethernet/octeon/octeon_mgmt.c
drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
drivers/net/ethernet/packetengines/hamachi.c
drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
drivers/net/ethernet/qlogic/qlge/qlge_main.c
drivers/net/ethernet/qualcomm/qca_spi.c
drivers/net/ethernet/renesas/sh_eth.c
drivers/net/ethernet/renesas/sh_eth.h
drivers/net/ethernet/rocker/rocker.c
drivers/net/ethernet/rocker/rocker.h
drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
drivers/net/ethernet/sfc/efx.c
drivers/net/ethernet/sfc/farch.c
drivers/net/ethernet/sfc/mcdi_pcol.h
drivers/net/ethernet/sfc/ptp.c
drivers/net/ethernet/sfc/siena_sriov.c
drivers/net/ethernet/sfc/vfdi.h
drivers/net/ethernet/smsc/smc91x.c
drivers/net/ethernet/smsc/smsc911x.c
drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
drivers/net/ethernet/sun/sungem.c
drivers/net/ethernet/sun/sunvnet.c
drivers/net/ethernet/ti/Kconfig
drivers/net/ethernet/ti/cpsw.c
drivers/net/ethernet/ti/cpts.c
drivers/net/ethernet/ti/netcp.h
drivers/net/ethernet/ti/netcp_core.c
drivers/net/ethernet/ti/netcp_ethss.c
drivers/net/ethernet/tile/tilegx.c
drivers/net/ethernet/toshiba/ps3_gelic_net.c
drivers/net/ethernet/toshiba/ps3_gelic_wireless.c
drivers/net/ethernet/via/via-rhine.c
drivers/net/ethernet/via/via-velocity.c
drivers/net/ethernet/wiznet/w5100.c
drivers/net/ethernet/wiznet/w5300.c
drivers/net/ethernet/xilinx/ll_temac_main.c
drivers/net/ethernet/xilinx/xilinx_axienet_main.c
drivers/net/ethernet/xilinx/xilinx_emaclite.c
drivers/net/ethernet/xscale/ixp4xx_eth.c
drivers/net/hamradio/6pack.c
drivers/net/hamradio/baycom_epp.c
drivers/net/hamradio/bpqether.c
drivers/net/hamradio/dmascc.c
drivers/net/hamradio/hdlcdrv.c
drivers/net/hamradio/mkiss.c
drivers/net/hamradio/scc.c
drivers/net/hamradio/yam.c
drivers/net/hyperv/hyperv_net.h
drivers/net/hyperv/netvsc.c
drivers/net/hyperv/netvsc_drv.c
drivers/net/hyperv/rndis_filter.c
drivers/net/ieee802154/at86rf230.c
drivers/net/ieee802154/cc2520.c
drivers/net/ipvlan/ipvlan_core.c
drivers/net/ipvlan/ipvlan_main.c
drivers/net/macvlan.c
drivers/net/macvtap.c
drivers/net/netconsole.c
drivers/net/phy/amd-xgbe-phy.c
drivers/net/phy/at803x.c
drivers/net/phy/bcm7xxx.c
drivers/net/phy/dp83640.c
drivers/net/phy/fixed_phy.c
drivers/net/phy/mdio-bcm-unimac.c
drivers/net/phy/mdio-gpio.c
drivers/net/phy/mdio-mux-gpio.c
drivers/net/phy/mdio-mux-mmioreg.c
drivers/net/phy/mdio-octeon.c
drivers/net/ppp/pppoe.c
drivers/net/ppp/pptp.c
drivers/net/team/team.c
drivers/net/tun.c
drivers/net/usb/catc.c
drivers/net/usb/cdc_mbim.c
drivers/net/usb/cx82310_eth.c
drivers/net/usb/hso.c
drivers/net/usb/lg-vl600.c
drivers/net/usb/qmi_wwan.c
drivers/net/usb/r8152.c
drivers/net/veth.c
drivers/net/virtio_net.c
drivers/net/vmxnet3/vmxnet3_drv.c
drivers/net/vmxnet3/vmxnet3_ethtool.c
drivers/net/vmxnet3/vmxnet3_int.h
drivers/net/vxlan.c
drivers/net/wireless/airo.c
drivers/net/wireless/at76c50x-usb.c
drivers/net/wireless/ath/ar5523/ar5523.c
drivers/net/wireless/ath/ar5523/ar5523.h
drivers/net/wireless/ath/ath.h
drivers/net/wireless/ath/ath10k/ce.h
drivers/net/wireless/ath/ath10k/core.c
drivers/net/wireless/ath/ath10k/core.h
drivers/net/wireless/ath/ath10k/debug.c
drivers/net/wireless/ath/ath10k/htt_rx.c
drivers/net/wireless/ath/ath10k/mac.c
drivers/net/wireless/ath/ath10k/pci.c
drivers/net/wireless/ath/ath10k/wmi-ops.h
drivers/net/wireless/ath/ath10k/wmi-tlv.c
drivers/net/wireless/ath/ath10k/wmi-tlv.h
drivers/net/wireless/ath/ath10k/wmi.c
drivers/net/wireless/ath/ath10k/wmi.h
drivers/net/wireless/ath/ath5k/ath5k.h
drivers/net/wireless/ath/ath5k/base.c
drivers/net/wireless/ath/ath5k/reset.c
drivers/net/wireless/ath/ath6kl/cfg80211.c
drivers/net/wireless/ath/ath6kl/cfg80211.h
drivers/net/wireless/ath/ath6kl/core.c
drivers/net/wireless/ath/ath6kl/main.c
drivers/net/wireless/ath/ath9k/Makefile
drivers/net/wireless/ath/ath9k/ani.c
drivers/net/wireless/ath/ath9k/ar5008_phy.c
drivers/net/wireless/ath/ath9k/ar9002_calib.c
drivers/net/wireless/ath/ath9k/ar9003_aic.c [new file with mode: 0644]
drivers/net/wireless/ath/ath9k/ar9003_aic.h [new file with mode: 0644]
drivers/net/wireless/ath/ath9k/ar9003_hw.c
drivers/net/wireless/ath/ath9k/ar9003_mci.c
drivers/net/wireless/ath/ath9k/ar9003_mci.h
drivers/net/wireless/ath/ath9k/ar9003_phy.h
drivers/net/wireless/ath/ath9k/ar9003_rtt.c
drivers/net/wireless/ath/ath9k/ar9003_wow.c
drivers/net/wireless/ath/ath9k/ath9k.h
drivers/net/wireless/ath/ath9k/btcoex.c
drivers/net/wireless/ath/ath9k/btcoex.h
drivers/net/wireless/ath/ath9k/calib.c
drivers/net/wireless/ath/ath9k/debug.c
drivers/net/wireless/ath/ath9k/dfs.c
drivers/net/wireless/ath/ath9k/eeprom.c
drivers/net/wireless/ath/ath9k/eeprom_4k.c
drivers/net/wireless/ath/ath9k/eeprom_def.c
drivers/net/wireless/ath/ath9k/gpio.c
drivers/net/wireless/ath/ath9k/hif_usb.c
drivers/net/wireless/ath/ath9k/htc.h
drivers/net/wireless/ath/ath9k/htc_drv_init.c
drivers/net/wireless/ath/ath9k/htc_drv_main.c
drivers/net/wireless/ath/ath9k/hw-ops.h
drivers/net/wireless/ath/ath9k/hw.c
drivers/net/wireless/ath/ath9k/hw.h
drivers/net/wireless/ath/ath9k/init.c
drivers/net/wireless/ath/ath9k/main.c
drivers/net/wireless/ath/ath9k/mci.c
drivers/net/wireless/ath/ath9k/reg.h
drivers/net/wireless/ath/ath9k/reg_aic.h [new file with mode: 0644]
drivers/net/wireless/ath/ath9k/reg_mci.h [new file with mode: 0644]
drivers/net/wireless/ath/ath9k/reg_wow.h
drivers/net/wireless/ath/ath9k/wmi.c
drivers/net/wireless/ath/ath9k/wmi.h
drivers/net/wireless/ath/ath9k/xmit.c
drivers/net/wireless/ath/dfs_pattern_detector.c
drivers/net/wireless/ath/wil6210/cfg80211.c
drivers/net/wireless/ath/wil6210/debugfs.c
drivers/net/wireless/ath/wil6210/ethtool.c
drivers/net/wireless/ath/wil6210/fw.c
drivers/net/wireless/ath/wil6210/fw_inc.c
drivers/net/wireless/ath/wil6210/interrupt.c
drivers/net/wireless/ath/wil6210/main.c
drivers/net/wireless/ath/wil6210/netdev.c
drivers/net/wireless/ath/wil6210/pcie_bus.c
drivers/net/wireless/ath/wil6210/txrx.c
drivers/net/wireless/ath/wil6210/wil6210.h
drivers/net/wireless/ath/wil6210/wmi.c
drivers/net/wireless/ath/wil6210/wmi.h
drivers/net/wireless/atmel.c
drivers/net/wireless/b43/main.c
drivers/net/wireless/b43legacy/main.c
drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c
drivers/net/wireless/brcm80211/brcmfmac/chip.c
drivers/net/wireless/brcm80211/brcmfmac/chip.h
drivers/net/wireless/brcm80211/brcmfmac/core.c
drivers/net/wireless/brcm80211/brcmfmac/flowring.c
drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c
drivers/net/wireless/brcm80211/brcmfmac/msgbuf.h
drivers/net/wireless/brcm80211/brcmfmac/p2p.c
drivers/net/wireless/brcm80211/brcmfmac/p2p.h
drivers/net/wireless/brcm80211/brcmfmac/pcie.c
drivers/net/wireless/brcm80211/brcmfmac/sdio.c
drivers/net/wireless/brcm80211/brcmfmac/sdio.h
drivers/net/wireless/brcm80211/brcmsmac/main.c
drivers/net/wireless/brcm80211/brcmsmac/phy/phy_n.c
drivers/net/wireless/brcm80211/include/brcm_hw_ids.h
drivers/net/wireless/brcm80211/include/chipcommon.h
drivers/net/wireless/cw1200/cw1200_spi.c
drivers/net/wireless/cw1200/sta.c
drivers/net/wireless/cw1200/txrx.c
drivers/net/wireless/hostap/hostap_80211_tx.c
drivers/net/wireless/hostap/hostap_ap.c
drivers/net/wireless/hostap/hostap_info.c
drivers/net/wireless/hostap/hostap_main.c
drivers/net/wireless/hostap/hostap_wlan.h
drivers/net/wireless/ipw2x00/Kconfig
drivers/net/wireless/ipw2x00/ipw2100.c
drivers/net/wireless/ipw2x00/ipw2200.c
drivers/net/wireless/iwlegacy/4965-rs.c
drivers/net/wireless/iwlegacy/common.c
drivers/net/wireless/iwlwifi/dvm/mac80211.c
drivers/net/wireless/iwlwifi/dvm/main.c
drivers/net/wireless/iwlwifi/dvm/rs.c
drivers/net/wireless/iwlwifi/dvm/tx.c
drivers/net/wireless/iwlwifi/iwl-7000.c
drivers/net/wireless/iwlwifi/iwl-8000.c
drivers/net/wireless/iwlwifi/iwl-debug.h
drivers/net/wireless/iwlwifi/iwl-devtrace.h
drivers/net/wireless/iwlwifi/iwl-drv.c
drivers/net/wireless/iwlwifi/iwl-drv.h
drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h
drivers/net/wireless/iwlwifi/iwl-fw-error-dump.h
drivers/net/wireless/iwlwifi/iwl-fw-file.h
drivers/net/wireless/iwlwifi/iwl-fw.h
drivers/net/wireless/iwlwifi/iwl-io.c
drivers/net/wireless/iwlwifi/iwl-modparams.h
drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
drivers/net/wireless/iwlwifi/iwl-nvm-parse.h
drivers/net/wireless/iwlwifi/iwl-phy-db.c
drivers/net/wireless/iwlwifi/iwl-prph.h
drivers/net/wireless/iwlwifi/iwl-trans.h
drivers/net/wireless/iwlwifi/mvm/coex.c
drivers/net/wireless/iwlwifi/mvm/coex_legacy.c
drivers/net/wireless/iwlwifi/mvm/d3.c
drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c
drivers/net/wireless/iwlwifi/mvm/debugfs.c
drivers/net/wireless/iwlwifi/mvm/fw-api-coex.h
drivers/net/wireless/iwlwifi/mvm/fw-api-mac.h
drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
drivers/net/wireless/iwlwifi/mvm/fw-api-stats.h
drivers/net/wireless/iwlwifi/mvm/fw-api.h
drivers/net/wireless/iwlwifi/mvm/fw.c
drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
drivers/net/wireless/iwlwifi/mvm/mac80211.c
drivers/net/wireless/iwlwifi/mvm/mvm.h
drivers/net/wireless/iwlwifi/mvm/nvm.c
drivers/net/wireless/iwlwifi/mvm/ops.c
drivers/net/wireless/iwlwifi/mvm/phy-ctxt.c
drivers/net/wireless/iwlwifi/mvm/power.c
drivers/net/wireless/iwlwifi/mvm/quota.c
drivers/net/wireless/iwlwifi/mvm/rs.c
drivers/net/wireless/iwlwifi/mvm/rs.h
drivers/net/wireless/iwlwifi/mvm/rx.c
drivers/net/wireless/iwlwifi/mvm/scan.c
drivers/net/wireless/iwlwifi/mvm/sf.c
drivers/net/wireless/iwlwifi/mvm/sta.c
drivers/net/wireless/iwlwifi/mvm/time-event.c
drivers/net/wireless/iwlwifi/mvm/tx.c
drivers/net/wireless/iwlwifi/mvm/utils.c
drivers/net/wireless/iwlwifi/pcie/drv.c
drivers/net/wireless/iwlwifi/pcie/internal.h
drivers/net/wireless/iwlwifi/pcie/trans.c
drivers/net/wireless/iwlwifi/pcie/tx.c
drivers/net/wireless/libertas/cfg.c
drivers/net/wireless/libertas/debugfs.c
drivers/net/wireless/libertas/main.c
drivers/net/wireless/libertas_tf/if_usb.c
drivers/net/wireless/libertas_tf/main.c
drivers/net/wireless/mac80211_hwsim.c
drivers/net/wireless/mwifiex/11n.c
drivers/net/wireless/mwifiex/11n.h
drivers/net/wireless/mwifiex/11n_aggr.c
drivers/net/wireless/mwifiex/11n_rxreorder.c
drivers/net/wireless/mwifiex/cfg80211.c
drivers/net/wireless/mwifiex/decl.h
drivers/net/wireless/mwifiex/fw.h
drivers/net/wireless/mwifiex/init.c
drivers/net/wireless/mwifiex/main.c
drivers/net/wireless/mwifiex/main.h
drivers/net/wireless/mwifiex/pcie.c
drivers/net/wireless/mwifiex/pcie.h
drivers/net/wireless/mwifiex/sdio.c
drivers/net/wireless/mwifiex/sdio.h
drivers/net/wireless/mwifiex/sta_cmd.c
drivers/net/wireless/mwifiex/sta_cmdresp.c
drivers/net/wireless/mwifiex/sta_event.c
drivers/net/wireless/mwifiex/txrx.c
drivers/net/wireless/mwifiex/usb.c
drivers/net/wireless/mwifiex/util.c
drivers/net/wireless/mwifiex/wmm.c
drivers/net/wireless/mwifiex/wmm.h
drivers/net/wireless/mwl8k.c
drivers/net/wireless/orinoco/Kconfig
drivers/net/wireless/orinoco/airport.c
drivers/net/wireless/orinoco/wext.c
drivers/net/wireless/p54/fwio.c
drivers/net/wireless/p54/main.c
drivers/net/wireless/ray_cs.c
drivers/net/wireless/rndis_wlan.c
drivers/net/wireless/rt2x00/rt2800usb.c
drivers/net/wireless/rt2x00/rt2x00usb.h
drivers/net/wireless/rtlwifi/base.h
drivers/net/wireless/rtlwifi/cam.h
drivers/net/wireless/rtlwifi/core.c
drivers/net/wireless/rtlwifi/core.h
drivers/net/wireless/rtlwifi/efuse.h
drivers/net/wireless/rtlwifi/rtl8188ee/def.h
drivers/net/wireless/rtlwifi/rtl8188ee/hw.c
drivers/net/wireless/rtlwifi/rtl8188ee/phy.c
drivers/net/wireless/rtlwifi/rtl8188ee/rf.h
drivers/net/wireless/rtlwifi/rtl8192ce/def.h
drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
drivers/net/wireless/rtlwifi/rtl8192ce/rf.h
drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
drivers/net/wireless/rtlwifi/rtl8192cu/hw.h
drivers/net/wireless/rtlwifi/rtl8192cu/mac.c
drivers/net/wireless/rtlwifi/rtl8192cu/rf.h
drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
drivers/net/wireless/rtlwifi/rtl8192de/def.h
drivers/net/wireless/rtlwifi/rtl8192de/hw.c
drivers/net/wireless/rtlwifi/rtl8192ee/hw.c
drivers/net/wireless/rtlwifi/rtl8192ee/rf.h
drivers/net/wireless/rtlwifi/rtl8192se/def.h
drivers/net/wireless/rtlwifi/rtl8192se/hw.c
drivers/net/wireless/rtlwifi/rtl8723ae/def.h
drivers/net/wireless/rtlwifi/rtl8723ae/hw.c
drivers/net/wireless/rtlwifi/rtl8723ae/rf.h
drivers/net/wireless/rtlwifi/rtl8723be/hw.c
drivers/net/wireless/rtlwifi/rtl8723be/rf.h
drivers/net/wireless/rtlwifi/rtl8821ae/def.h
drivers/net/wireless/rtlwifi/rtl8821ae/hw.c
drivers/net/wireless/rtlwifi/rtl8821ae/rf.h
drivers/net/wireless/rtlwifi/rtl8821ae/trx.c
drivers/net/wireless/rtlwifi/stats.c
drivers/net/wireless/rtlwifi/stats.h
drivers/net/wireless/rtlwifi/usb.c
drivers/net/wireless/ti/wl1251/main.c
drivers/net/wireless/ti/wl18xx/debugfs.c
drivers/net/wireless/ti/wl18xx/event.c
drivers/net/wireless/ti/wlcore/cmd.c
drivers/net/wireless/ti/wlcore/debugfs.h
drivers/net/xen-netback/common.h
drivers/net/xen-netback/interface.c
drivers/net/xen-netback/netback.c
drivers/net/xen-netback/xenbus.c
drivers/net/xen-netfront.c
drivers/of/address.c
drivers/of/of_mdio.c
drivers/ptp/ptp_chardev.c
drivers/ptp/ptp_clock.c
drivers/ptp/ptp_ixp46x.c
drivers/ptp/ptp_pch.c
drivers/s390/net/Kconfig
drivers/s390/net/Makefile
drivers/s390/net/claw.c [deleted file]
drivers/s390/net/claw.h [deleted file]
drivers/s390/net/qeth_core_main.c
drivers/scsi/csiostor/csio_init.c
drivers/scsi/sg.c
drivers/ssb/main.c
drivers/staging/android/ashmem.c
drivers/staging/comedi/drivers/serial2002.c
drivers/staging/iio/Kconfig
drivers/staging/iio/magnetometer/hmc5843_core.c
drivers/staging/lustre/lustre/llite/file.c
drivers/staging/lustre/lustre/llite/llite_internal.h
drivers/staging/lustre/lustre/llite/rw26.c
drivers/staging/rtl8723au/os_dep/ioctl_cfg80211.c
drivers/staging/unisys/include/timskmod.h
drivers/tty/serial/fsl_lpuart.c
drivers/tty/serial/samsung.c
drivers/usb/gadget/function/f_fs.c
drivers/usb/gadget/legacy/inode.c
drivers/usb/host/xhci-hub.c
drivers/usb/host/xhci-pci.c
drivers/usb/isp1760/isp1760-udc.c
drivers/usb/serial/ftdi_sio.c
drivers/usb/serial/ftdi_sio_ids.h
drivers/usb/serial/keyspan_pda.c
drivers/vhost/net.c
fs/9p/v9fs_vfs.h
fs/9p/vfs_addr.c
fs/9p/vfs_dir.c
fs/9p/vfs_file.c
fs/9p/xattr.c
fs/adfs/file.c
fs/affs/file.c
fs/afs/file.c
fs/afs/misc.c
fs/afs/rxrpc.c
fs/afs/write.c
fs/aio.c
fs/autofs4/autofs_i.h
fs/autofs4/waitq.c
fs/bfs/file.c
fs/bfs/inode.c
fs/block_dev.c
fs/btrfs/file.c
fs/btrfs/inode.c
fs/ceph/addr.c
fs/ceph/file.c
fs/cifs/cifsencrypt.c
fs/cifs/cifsfs.c
fs/cifs/connect.c
fs/cifs/file.c
fs/cifs/inode.c
fs/cifs/smb2misc.c
fs/cifs/smb2ops.c
fs/cifs/smb2pdu.c
fs/coda/file.c
fs/compat_ioctl.c
fs/coredump.c
fs/dax.c
fs/dcache.c
fs/direct-io.c
fs/ecryptfs/file.c
fs/exofs/file.c
fs/exofs/inode.c
fs/ext2/file.c
fs/ext2/inode.c
fs/ext3/file.c
fs/ext3/inode.c
fs/ext4/ext4.h
fs/ext4/file.c
fs/ext4/indirect.c
fs/ext4/inode.c
fs/ext4/page-io.c
fs/f2fs/data.c
fs/f2fs/file.c
fs/fat/file.c
fs/fat/inode.c
fs/file_table.c
fs/fuse/cuse.c
fs/fuse/dev.c
fs/fuse/file.c
fs/fuse/fuse_i.h
fs/gfs2/aops.c
fs/gfs2/file.c
fs/hfs/inode.c
fs/hfsplus/inode.c
fs/hostfs/hostfs_kern.c
fs/hpfs/file.c
fs/hugetlbfs/inode.c
fs/jffs2/file.c
fs/jfs/file.c
fs/jfs/inode.c
fs/logfs/file.c
fs/minix/file.c
fs/namei.c
fs/ncpfs/file.c
fs/ncpfs/ncplib_kernel.c
fs/ncpfs/ncplib_kernel.h
fs/nfs/direct.c
fs/nfs/file.c
fs/nfs/nfs4file.c
fs/nilfs2/file.c
fs/nilfs2/inode.c
fs/ntfs/Makefile
fs/ntfs/file.c
fs/ntfs/inode.c
fs/ocfs2/aops.c
fs/ocfs2/aops.h
fs/ocfs2/file.c
fs/omfs/file.c
fs/open.c
fs/pipe.c
fs/ramfs/file-mmu.c
fs/ramfs/file-nommu.c
fs/read_write.c
fs/reiserfs/file.c
fs/reiserfs/inode.c
fs/romfs/mmap-nommu.c
fs/splice.c
fs/stat.c
fs/sysv/file.c
fs/ubifs/file.c
fs/udf/file.c
fs/udf/inode.c
fs/ufs/file.c
fs/xfs/xfs_aops.c
fs/xfs/xfs_file.c
include/crypto/if_alg.h
include/linux/aio.h
include/linux/bcma/bcma.h
include/linux/bcma/bcma_driver_chipcommon.h
include/linux/bcma/bcma_driver_gmac_cmn.h
include/linux/bcma/bcma_driver_mips.h
include/linux/bcma/bcma_driver_pci.h
include/linux/bcma/bcma_driver_pcie2.h
include/linux/bpf.h
include/linux/brcmphy.h
include/linux/can/dev.h
include/linux/can/led.h
include/linux/can/skb.h
include/linux/dccp.h
include/linux/etherdevice.h
include/linux/filter.h
include/linux/fs.h
include/linux/ieee802154.h
include/linux/if_bridge.h
include/linux/if_pppox.h
include/linux/if_vlan.h
include/linux/inet_diag.h
include/linux/ipv6.h
include/linux/jhash.h
include/linux/lcm.h
include/linux/mlx4/cmd.h
include/linux/mlx4/device.h
include/linux/mlx4/qp.h
include/linux/mlx5/cmd.h
include/linux/mlx5/cq.h
include/linux/mlx5/device.h
include/linux/mlx5/doorbell.h
include/linux/mlx5/driver.h
include/linux/mlx5/mlx5_ifc.h
include/linux/mlx5/qp.h
include/linux/mlx5/srq.h
include/linux/mmc/sdio_ids.h
include/linux/net.h
include/linux/netdevice.h
include/linux/netfilter.h
include/linux/netfilter/ipset/ip_set.h
include/linux/netfilter_arp/arp_tables.h
include/linux/netfilter_bridge.h
include/linux/netfilter_ipv4/ip_tables.h
include/linux/netfilter_ipv6/ip6_tables.h
include/linux/nfs_fs.h
include/linux/of_mdio.h
include/linux/of_net.h
include/linux/phy_fixed.h
include/linux/ptp_clock_kernel.h
include/linux/rhashtable.h
include/linux/security.h
include/linux/skbuff.h
include/linux/sock_diag.h
include/linux/socket.h
include/linux/spi/at86rf230.h
include/linux/spi/cc2520.h
include/linux/tcp.h
include/linux/udp.h
include/linux/uio.h
include/net/9p/client.h
include/net/9p/transport.h
include/net/af_vsock.h
include/net/arp.h
include/net/ax25.h
include/net/bluetooth/bluetooth.h
include/net/bluetooth/hci.h
include/net/bluetooth/hci_core.h
include/net/bluetooth/mgmt.h
include/net/bond_3ad.h
include/net/cfg80211.h
include/net/compat.h
include/net/dcbnl.h
include/net/dn_neigh.h
include/net/dsa.h
include/net/dst_ops.h
include/net/fib_rules.h
include/net/genetlink.h
include/net/if_inet6.h
include/net/inet6_connection_sock.h
include/net/inet6_hashtables.h
include/net/inet_common.h
include/net/inet_connection_sock.h
include/net/inet_hashtables.h
include/net/inet_sock.h
include/net/inet_timewait_sock.h
include/net/inetpeer.h
include/net/ip.h
include/net/ip6_route.h
include/net/ip6_tunnel.h
include/net/ip_fib.h
include/net/ip_tunnels.h
include/net/ip_vs.h
include/net/ipv6.h
include/net/iw_handler.h
include/net/mac80211.h
include/net/mac802154.h
include/net/ndisc.h
include/net/neighbour.h
include/net/net_namespace.h
include/net/netfilter/ipv4/nf_reject.h
include/net/netfilter/ipv6/nf_reject.h
include/net/netfilter/nf_conntrack.h
include/net/netfilter/nf_nat_l3proto.h
include/net/netfilter/nf_queue.h
include/net/netfilter/nf_tables.h
include/net/netfilter/nf_tables_ipv4.h
include/net/netfilter/nf_tables_ipv6.h
include/net/netlink.h
include/net/netns/hash.h
include/net/netns/ipv4.h
include/net/netns/ipv6.h
include/net/netns/mpls.h [new file with mode: 0644]
include/net/netns/x_tables.h
include/net/ping.h
include/net/request_sock.h
include/net/sch_generic.h
include/net/sctp/sctp.h
include/net/sock.h
include/net/switchdev.h
include/net/tc_act/tc_bpf.h
include/net/tcp.h
include/net/tcp_states.h
include/net/udp.h
include/net/vxlan.h
include/net/xfrm.h
include/rxrpc/packet.h
include/uapi/linux/bpf.h
include/uapi/linux/can/raw.h
include/uapi/linux/dcbnl.h
include/uapi/linux/filter.h
include/uapi/linux/if_addr.h
include/uapi/linux/if_link.h
include/uapi/linux/if_packet.h
include/uapi/linux/input.h
include/uapi/linux/ip_vs.h
include/uapi/linux/ipv6.h
include/uapi/linux/neighbour.h
include/uapi/linux/netfilter/nf_tables.h
include/uapi/linux/nl80211.h
include/uapi/linux/pkt_cls.h
include/uapi/linux/rtnetlink.h
include/uapi/linux/tc_act/tc_bpf.h
include/uapi/linux/tipc_netlink.h
include/uapi/linux/xfrm.h
kernel/acct.c
kernel/bpf/Makefile
kernel/bpf/arraymap.c
kernel/bpf/core.c
kernel/bpf/hashtab.c
kernel/bpf/helpers.c
kernel/bpf/syscall.c
kernel/bpf/test_stub.c [deleted file]
kernel/bpf/verifier.c
kernel/printk/printk.c
kernel/sysctl.c
lib/iov_iter.c
lib/lcm.c
lib/rhashtable.c
lib/sha1.c
lib/test_rhashtable.c
mm/filemap.c
mm/nommu.c
mm/page_io.c
mm/process_vm_access.c
mm/shmem.c
net/6lowpan/Kconfig
net/6lowpan/Makefile
net/6lowpan/iphc.c
net/6lowpan/nhc.c [new file with mode: 0644]
net/6lowpan/nhc.h [new file with mode: 0644]
net/6lowpan/nhc_dest.c [new file with mode: 0644]
net/6lowpan/nhc_fragment.c [new file with mode: 0644]
net/6lowpan/nhc_hop.c [new file with mode: 0644]
net/6lowpan/nhc_ipv6.c [new file with mode: 0644]
net/6lowpan/nhc_mobility.c [new file with mode: 0644]
net/6lowpan/nhc_routing.c [new file with mode: 0644]
net/6lowpan/nhc_udp.c [new file with mode: 0644]
net/802/fc.c
net/802/fddi.c
net/802/hippi.c
net/8021q/vlan.c
net/8021q/vlan_dev.c
net/9p/client.c
net/9p/protocol.c
net/9p/trans_common.c
net/9p/trans_common.h
net/9p/trans_fd.c
net/9p/trans_virtio.c
net/Makefile
net/appletalk/aarp.c
net/appletalk/ddp.c
net/atm/common.c
net/atm/common.h
net/atm/lec.c
net/atm/signaling.c
net/ax25/af_ax25.c
net/ax25/ax25_ip.c
net/batman-adv/gateway_client.c
net/batman-adv/hard-interface.c
net/bluetooth/Kconfig
net/bluetooth/Makefile
net/bluetooth/a2mp.c
net/bluetooth/a2mp.h
net/bluetooth/af_bluetooth.c
net/bluetooth/bnep/bnep.h
net/bluetooth/bnep/core.c
net/bluetooth/bnep/netdev.c
net/bluetooth/bnep/sock.c
net/bluetooth/cmtp/capi.c
net/bluetooth/cmtp/core.c
net/bluetooth/hci_conn.c
net/bluetooth/hci_core.c
net/bluetooth/hci_debugfs.c
net/bluetooth/hci_debugfs.h
net/bluetooth/hci_event.c
net/bluetooth/hci_request.c
net/bluetooth/hci_request.h
net/bluetooth/hci_sock.c
net/bluetooth/hidp/core.c
net/bluetooth/l2cap_core.c
net/bluetooth/l2cap_sock.c
net/bluetooth/mgmt.c
net/bluetooth/mgmt_util.c [new file with mode: 0644]
net/bluetooth/mgmt_util.h [new file with mode: 0644]
net/bluetooth/rfcomm/sock.c
net/bluetooth/sco.c
net/bluetooth/selftest.c
net/bluetooth/smp.c
net/bluetooth/smp.h
net/bridge/br_device.c
net/bridge/br_forward.c
net/bridge/br_input.c
net/bridge/br_netfilter.c
net/bridge/br_netlink.c
net/bridge/br_nf_core.c
net/bridge/br_private.h
net/bridge/br_sysfs_if.c
net/bridge/netfilter/ebtable_filter.c
net/bridge/netfilter/ebtable_nat.c
net/bridge/netfilter/nf_tables_bridge.c
net/bridge/netfilter/nft_reject_bridge.c
net/caif/caif_socket.c
net/can/bcm.c
net/can/raw.c
net/compat.c
net/core/datagram.c
net/core/dev.c
net/core/ethtool.c
net/core/fib_rules.c
net/core/filter.c
net/core/link_watch.c
net/core/neighbour.c
net/core/net-sysfs.c
net/core/net_namespace.c
net/core/request_sock.c
net/core/rtnetlink.c
net/core/skbuff.c
net/core/sock.c
net/core/sock_diag.c
net/core/sysctl_net_core.c
net/dcb/dcbnl.c
net/dccp/dccp.h
net/dccp/diag.c
net/dccp/ipv4.c
net/dccp/ipv6.c
net/dccp/minisocks.c
net/dccp/probe.c
net/dccp/proto.c
net/dccp/timer.c
net/decnet/af_decnet.c
net/decnet/dn_neigh.c
net/decnet/dn_route.c
net/decnet/dn_rules.c
net/decnet/netfilter/dn_rtmsg.c
net/dsa/Kconfig
net/dsa/dsa.c
net/dsa/dsa_priv.h
net/dsa/slave.c
net/ethernet/eth.c
net/ieee802154/6lowpan/core.c
net/ieee802154/core.c
net/ieee802154/nl-mac.c
net/ieee802154/socket.c
net/ieee802154/sysfs.c
net/ipv4/af_inet.c
net/ipv4/arp.c
net/ipv4/cipso_ipv4.c
net/ipv4/devinet.c
net/ipv4/esp4.c
net/ipv4/fib_frontend.c
net/ipv4/fib_lookup.h
net/ipv4/fib_rules.c
net/ipv4/fib_semantics.c
net/ipv4/fib_trie.c
net/ipv4/geneve.c
net/ipv4/gre_offload.c
net/ipv4/icmp.c
net/ipv4/igmp.c
net/ipv4/inet_connection_sock.c
net/ipv4/inet_diag.c
net/ipv4/inet_fragment.c
net/ipv4/inet_hashtables.c
net/ipv4/inet_timewait_sock.c
net/ipv4/ip_fragment.c
net/ipv4/ip_gre.c
net/ipv4/ip_input.c
net/ipv4/ip_options.c
net/ipv4/ip_output.c
net/ipv4/ip_sockglue.c
net/ipv4/ip_tunnel.c
net/ipv4/ip_tunnel_core.c
net/ipv4/ip_vti.c
net/ipv4/ipcomp.c
net/ipv4/ipconfig.c
net/ipv4/ipip.c
net/ipv4/ipmr.c
net/ipv4/netfilter.c
net/ipv4/netfilter/Kconfig
net/ipv4/netfilter/arp_tables.c
net/ipv4/netfilter/arptable_filter.c
net/ipv4/netfilter/ip_tables.c
net/ipv4/netfilter/ipt_CLUSTERIP.c
net/ipv4/netfilter/ipt_REJECT.c
net/ipv4/netfilter/ipt_SYNPROXY.c
net/ipv4/netfilter/iptable_filter.c
net/ipv4/netfilter/iptable_mangle.c
net/ipv4/netfilter/iptable_nat.c
net/ipv4/netfilter/iptable_raw.c
net/ipv4/netfilter/iptable_security.c
net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c
net/ipv4/netfilter/nf_defrag_ipv4.c
net/ipv4/netfilter/nf_log_arp.c
net/ipv4/netfilter/nf_log_ipv4.c
net/ipv4/netfilter/nf_nat_l3proto_ipv4.c
net/ipv4/netfilter/nf_reject_ipv4.c
net/ipv4/netfilter/nf_tables_arp.c
net/ipv4/netfilter/nf_tables_ipv4.c
net/ipv4/netfilter/nft_chain_nat_ipv4.c
net/ipv4/netfilter/nft_chain_route_ipv4.c
net/ipv4/netfilter/nft_reject_ipv4.c
net/ipv4/ping.c
net/ipv4/raw.c
net/ipv4/route.c
net/ipv4/syncookies.c
net/ipv4/sysctl_net_ipv4.c
net/ipv4/tcp.c
net/ipv4/tcp_cong.c
net/ipv4/tcp_diag.c
net/ipv4/tcp_fastopen.c
net/ipv4/tcp_input.c
net/ipv4/tcp_ipv4.c
net/ipv4/tcp_metrics.c
net/ipv4/tcp_minisocks.c
net/ipv4/tcp_offload.c
net/ipv4/tcp_output.c
net/ipv4/tcp_timer.c
net/ipv4/udp.c
net/ipv4/udp_diag.c
net/ipv4/udp_impl.h
net/ipv4/udp_offload.c
net/ipv4/xfrm4_input.c
net/ipv4/xfrm4_mode_tunnel.c
net/ipv4/xfrm4_policy.c
net/ipv6/addrconf.c
net/ipv6/addrconf_core.c
net/ipv6/addrlabel.c
net/ipv6/af_inet6.c
net/ipv6/ah6.c
net/ipv6/anycast.c
net/ipv6/datagram.c
net/ipv6/esp6.c
net/ipv6/exthdrs_core.c
net/ipv6/fib6_rules.c
net/ipv6/icmp.c
net/ipv6/inet6_connection_sock.c
net/ipv6/inet6_hashtables.c
net/ipv6/ip6_fib.c
net/ipv6/ip6_flowlabel.c
net/ipv6/ip6_gre.c
net/ipv6/ip6_input.c
net/ipv6/ip6_offload.c
net/ipv6/ip6_output.c
net/ipv6/ip6_tunnel.c
net/ipv6/ip6_vti.c
net/ipv6/ip6mr.c
net/ipv6/ipv6_sockglue.c
net/ipv6/mcast.c
net/ipv6/ndisc.c
net/ipv6/netfilter.c
net/ipv6/netfilter/Kconfig
net/ipv6/netfilter/ip6_tables.c
net/ipv6/netfilter/ip6t_REJECT.c
net/ipv6/netfilter/ip6t_SYNPROXY.c
net/ipv6/netfilter/ip6table_filter.c
net/ipv6/netfilter/ip6table_mangle.c
net/ipv6/netfilter/ip6table_nat.c
net/ipv6/netfilter/ip6table_raw.c
net/ipv6/netfilter/ip6table_security.c
net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
net/ipv6/netfilter/nf_defrag_ipv6_hooks.c
net/ipv6/netfilter/nf_log_ipv6.c
net/ipv6/netfilter/nf_nat_l3proto_ipv6.c
net/ipv6/netfilter/nf_reject_ipv6.c
net/ipv6/netfilter/nf_tables_ipv6.c
net/ipv6/netfilter/nft_chain_nat_ipv6.c
net/ipv6/netfilter/nft_chain_route_ipv6.c
net/ipv6/output_core.c
net/ipv6/ping.c
net/ipv6/raw.c
net/ipv6/reassembly.c
net/ipv6/route.c
net/ipv6/sit.c
net/ipv6/syncookies.c
net/ipv6/sysctl_net_ipv6.c
net/ipv6/tcp_ipv6.c
net/ipv6/tcpv6_offload.c
net/ipv6/udp.c
net/ipv6/udp_impl.h
net/ipv6/udp_offload.c
net/ipv6/xfrm6_mode_beet.c
net/ipv6/xfrm6_policy.c
net/ipx/af_ipx.c
net/irda/af_irda.c
net/iucv/af_iucv.c
net/key/af_key.c
net/l2tp/l2tp_core.c
net/l2tp/l2tp_eth.c
net/l2tp/l2tp_ip.c
net/l2tp/l2tp_ip6.c
net/l2tp/l2tp_netlink.c
net/l2tp/l2tp_ppp.c
net/llc/af_llc.c
net/mac80211/aes_ccm.c
net/mac80211/aes_gcm.c
net/mac80211/aes_gmac.c
net/mac80211/agg-rx.c
net/mac80211/agg-tx.c
net/mac80211/cfg.c
net/mac80211/debugfs.c
net/mac80211/debugfs_netdev.c
net/mac80211/debugfs_sta.c
net/mac80211/driver-ops.h
net/mac80211/ht.c
net/mac80211/ibss.c
net/mac80211/ieee80211_i.h
net/mac80211/iface.c
net/mac80211/key.c
net/mac80211/key.h
net/mac80211/main.c
net/mac80211/mesh.c
net/mac80211/mesh_plink.c
net/mac80211/mlme.c
net/mac80211/pm.c
net/mac80211/rc80211_minstrel_ht.c
net/mac80211/rx.c
net/mac80211/scan.c
net/mac80211/sta_info.c
net/mac80211/sta_info.h
net/mac80211/status.c
net/mac80211/tdls.c
net/mac80211/trace.h
net/mac80211/tx.c
net/mac80211/util.c
net/mac80211/vht.c
net/mac80211/wpa.c
net/mac802154/driver-ops.h
net/mac802154/iface.c
net/mac802154/util.c
net/mpls/Kconfig
net/mpls/Makefile
net/mpls/af_mpls.c [new file with mode: 0644]
net/mpls/internal.h [new file with mode: 0644]
net/netfilter/Kconfig
net/netfilter/core.c
net/netfilter/ipvs/ip_vs_core.c
net/netfilter/ipvs/ip_vs_ctl.c
net/netfilter/ipvs/ip_vs_est.c
net/netfilter/ipvs/ip_vs_sync.c
net/netfilter/ipvs/ip_vs_xmit.c
net/netfilter/nf_conntrack_acct.c
net/netfilter/nf_conntrack_amanda.c
net/netfilter/nf_conntrack_expect.c
net/netfilter/nf_internals.h
net/netfilter/nf_log_common.c
net/netfilter/nf_queue.c
net/netfilter/nf_tables_api.c
net/netfilter/nf_tables_core.c
net/netfilter/nfnetlink_log.c
net/netfilter/nfnetlink_queue_core.c
net/netfilter/nft_compat.c
net/netfilter/nft_hash.c
net/netfilter/nft_log.c
net/netfilter/nft_lookup.c
net/netfilter/nft_meta.c
net/netfilter/nft_rbtree.c
net/netfilter/nft_reject_inet.c
net/netfilter/xt_TPROXY.c
net/netfilter/xt_physdev.c
net/netfilter/xt_set.c
net/netfilter/xt_socket.c
net/netfilter/xt_string.c
net/netlabel/netlabel_mgmt.c
net/netlabel/netlabel_unlabeled.c
net/netlink/af_netlink.c
net/netrom/af_netrom.c
net/netrom/nr_dev.c
net/nfc/llcp_sock.c
net/nfc/rawsock.c
net/openvswitch/Kconfig
net/openvswitch/datapath.c
net/openvswitch/datapath.h
net/openvswitch/flow_netlink.c
net/packet/af_packet.c
net/packet/internal.h
net/phonet/datagram.c
net/phonet/pep.c
net/phonet/socket.c
net/rds/rds.h
net/rds/recv.c
net/rds/send.c
net/rose/af_rose.c
net/rose/rose_dev.c
net/rxrpc/af_rxrpc.c
net/rxrpc/ar-input.c
net/rxrpc/ar-internal.h
net/rxrpc/ar-local.c
net/rxrpc/ar-output.c
net/rxrpc/ar-recvmsg.c
net/sched/act_bpf.c
net/sched/cls_api.c
net/sched/cls_basic.c
net/sched/cls_bpf.c
net/sched/cls_cgroup.c
net/sched/cls_flow.c
net/sched/cls_fw.c
net/sched/cls_route.c
net/sched/cls_rsvp.h
net/sched/cls_tcindex.c
net/sched/cls_u32.c
net/sched/em_text.c
net/sched/sch_api.c
net/sched/sch_fq.c
net/sctp/protocol.c
net/sctp/socket.c
net/sctp/sysctl.c
net/socket.c
net/sunrpc/svcsock.c
net/switchdev/switchdev.c
net/tipc/Kconfig
net/tipc/Makefile
net/tipc/addr.c
net/tipc/addr.h
net/tipc/bcast.c
net/tipc/bcast.h
net/tipc/bearer.c
net/tipc/bearer.h
net/tipc/discover.c
net/tipc/eth_media.c
net/tipc/ib_media.c
net/tipc/link.c
net/tipc/link.h
net/tipc/msg.c
net/tipc/msg.h
net/tipc/name_distr.c
net/tipc/name_table.c
net/tipc/node.c
net/tipc/node.h
net/tipc/server.c
net/tipc/socket.c
net/tipc/socket.h
net/tipc/subscr.c
net/tipc/udp_media.c [new file with mode: 0644]
net/unix/af_unix.c
net/vmw_vsock/af_vsock.c
net/vmw_vsock/vmci_transport.c
net/wireless/Kconfig
net/wireless/ibss.c
net/wireless/mlme.c
net/wireless/nl80211.c
net/wireless/rdev-ops.h
net/wireless/reg.c
net/wireless/reg.h
net/wireless/scan.c
net/wireless/sme.c
net/wireless/trace.h
net/wireless/util.c
net/wireless/wext-compat.c
net/wireless/wext-compat.h
net/wireless/wext-sme.c
net/x25/af_x25.c
net/xfrm/xfrm_state.c
samples/bpf/Makefile
samples/bpf/bpf_helpers.h
samples/bpf/libbpf.h
samples/bpf/sockex1_kern.c
samples/bpf/sockex1_user.c
samples/bpf/sockex2_kern.c
samples/bpf/sockex2_user.c
samples/bpf/tcbpf1_kern.c [new file with mode: 0644]
samples/bpf/test_verifier.c
samples/pktgen/pktgen.conf-1-1 [new file with mode: 0755]
samples/pktgen/pktgen.conf-1-1-flows [new file with mode: 0755]
samples/pktgen/pktgen.conf-1-1-ip6 [new file with mode: 0755]
samples/pktgen/pktgen.conf-1-1-ip6-rdos [new file with mode: 0755]
samples/pktgen/pktgen.conf-1-1-rdos [new file with mode: 0755]
samples/pktgen/pktgen.conf-1-2 [new file with mode: 0755]
samples/pktgen/pktgen.conf-2-1 [new file with mode: 0755]
samples/pktgen/pktgen.conf-2-2 [new file with mode: 0755]
security/apparmor/lsm.c
security/capability.c
security/keys/compat.c
security/keys/internal.h
security/keys/keyctl.c
security/security.c
security/selinux/hooks.c
security/smack/smack_lsm.c
security/smack/smack_netfilter.c
security/tomoyo/common.h
security/tomoyo/file.c
security/tomoyo/realpath.c
security/tomoyo/tomoyo.c
sound/core/pcm_native.c
tools/net/bpf_exp.l
tools/net/bpf_exp.y

index beb8ec4dabbc648dd629e16a076662c9b78ff529..5ecfd72ba684c35acce51273d4370258b147ffb8 100644 (file)
@@ -188,6 +188,14 @@ Description:
                Indicates the interface unique physical port identifier within
                the NIC, as a string.
 
+What:          /sys/class/net/<iface>/phys_port_name
+Date:          March 2015
+KernelVersion: 4.0
+Contact:       netdev@vger.kernel.org
+Description:
+               Indicates the interface physical port name within the NIC,
+               as a string.
+
 What:          /sys/class/net/<iface>/speed
 Date:          October 2009
 KernelVersion: 2.6.33
index 5e9aeb91d355562f94c93bcbef09c5f73d2cc102..0c0df91b1516fb087ae22f82a43dafa3fb44e858 100644 (file)
@@ -24,6 +24,14 @@ Description:
                Indicates the number of transmit timeout events seen by this
                network interface transmit queue.
 
+What:          /sys/class/<iface>/queues/tx-<queue>/tx_maxrate
+Date:          March 2015
+KernelVersion: 4.1
+Contact:       netdev@vger.kernel.org
+Description:
+               A Mbps max-rate set for the queue, a value of zero means disabled,
+               default is disabled.
+
 What:          /sys/class/<iface>/queues/tx-<queue>/xps_cpus
 Date:          November 2010
 KernelVersion: 2.6.38
index 6151999c5dcae6e31f60dc155b433723550cf693..f55aa280d34f9fae6e25170f41a06e5c13104f06 100644 (file)
@@ -14,7 +14,11 @@ Required properties for all the ethernet interfaces:
   - "enet_csr": Ethernet control and status register address space
   - "ring_csr": Descriptor ring control and status register address space
   - "ring_cmd": Descriptor ring command register address space
-- interrupts: Ethernet main interrupt
+- interrupts: Two interrupt specifiers can be specified.
+  - First is the Rx interrupt.  This irq is mandatory.
+  - Second is the Tx completion interrupt.
+    This is supported only on SGMII based 1GbE and 10GbE interfaces.
+- port-id: Port number (0 or 1)
 - clocks: Reference to the clock entry.
 - local-mac-address: MAC address assigned to this device
 - phy-connection-type: Interface type between ethernet device and PHY device
@@ -49,6 +53,7 @@ Example:
                      <0x0 0X10000000 0x0 0X200>;
                reg-names = "enet_csr", "ring_csr", "ring_cmd";
                interrupts = <0x0 0x3c 0x4>;
+               port-id = <0>;
                clocks = <&menetclk 0>;
                local-mac-address = [00 01 73 00 00 01];
                phy-connection-type = "rgmii";
index e124847443f87c9465ec82dc03ce792d2d1cf5c4..f0b4cd72411d66bf12aeec3d64335d6641b26674 100644 (file)
@@ -19,7 +19,9 @@ the parent DSA node. The maximum number of allowed child nodes is 4
 (DSA_MAX_SWITCHES).
 Each of these switch child nodes should have the following required properties:
 
-- reg                  : Describes the switch address on the MII bus
+- reg                  : Contains two fields. The first one describes the
+                         address on the MII bus. The second is the switch
+                         number that must be unique in cascaded configurations
 - #address-cells       : Must be 1
 - #size-cells          : Must be 0
 
index d3bbdded4cbe8db90ce49648e95b6476212f27f6..168f1be509126ad2ad77c9e4bdcc4642847226b5 100644 (file)
@@ -6,11 +6,14 @@ Required properties:
   - spi-max-frequency: maximal bus speed, should be set to 7500000 depends
                        sync or async operation mode
   - reg:               the chipselect index
-  - interrupts:                the interrupt generated by the device
+  - interrupts:                the interrupt generated by the device. Non high-level
+                       can occur deadlocks while handling isr.
 
 Optional properties:
   - reset-gpio:                GPIO spec for the rstn pin
   - sleep-gpio:                GPIO spec for the slp_tr pin
+  - xtal-trim:         u8 value for fine tuning the internal capacitance
+                       arrays of xtal pins: 0 = +0 pF, 0xf = +4.5 pF
 
 Example:
 
@@ -18,6 +21,7 @@ Example:
                compatible = "atmel,at86rf231";
                spi-max-frequency = <7500000>;
                reg = <0>;
-               interrupts = <19 1>;
+               interrupts = <19 4>;
                interrupt-parent = <&gpio3>;
+               xtal-trim = /bits/ 8 <0x06>;
        };
index 0071883c08d8787f1d9a49d1369a6220527b869f..fb6d49f184edc5b4ef65524ff673fec4ca3a2ba6 100644 (file)
@@ -13,11 +13,15 @@ Required properties:
        - cca-gpio:             GPIO spec for the CCA pin
        - vreg-gpio:            GPIO spec for the VREG pin
        - reset-gpio:           GPIO spec for the RESET pin
+Optional properties:
+       - amplified:            include if the CC2520 is connected to a CC2591 amplifier
+
 Example:
        cc2520@0 {
                compatible = "ti,cc2520";
                reg = <0>;
                spi-max-frequency = <4000000>;
+               amplified;
                pinctrl-names = "default";
                pinctrl-0 = <&cc2520_cape_pins>;
                fifo-gpio = <&gpio1 18 0>;
index f9c07710478d9cfe4de4d451bd52774ad06a04c1..d0e6fa38f335fcfa10f0840221bc663e4a5c9f31 100644 (file)
@@ -49,6 +49,7 @@ Required properties:
 - compatible:  Should be "ti,netcp-1.0"
 - clocks:      phandle to the reference clocks for the subsystem.
 - dma-id:      Navigator packet dma instance id.
+- ranges:      address range of NetCP (includes, Ethernet SS, PA and SA)
 
 Optional properties:
 - reg:         register location and the size for the following register
@@ -64,10 +65,30 @@ NetCP device properties: Device specification for NetCP sub-modules.
 1Gb/10Gb (gbe/xgbe) ethernet switch sub-module specifications.
 Required properties:
 - label:       Must be "netcp-gbe" for 1Gb & "netcp-xgbe" for 10Gb.
+- compatible:  Must be one of below:-
+               "ti,netcp-gbe" for 1GbE on NetCP 1.4
+               "ti,netcp-gbe-5" for 1GbE N NetCP 1.5 (N=5)
+               "ti,netcp-gbe-9" for 1GbE N NetCP 1.5 (N=9)
+               "ti,netcp-gbe-2" for 1GbE N NetCP 1.5 (N=2)
+               "ti,netcp-xgbe" for 10 GbE
+
 - reg:         register location and the size for the following register
                regions in the specified order.
-               - subsystem registers
-               - serdes registers
+               - switch subsystem registers
+               - sgmii port3/4 module registers (only for NetCP 1.4)
+               - switch module registers
+               - serdes registers (only for 10G)
+
+               NetCP 1.4 ethss, here is the order
+                       index #0 - switch subsystem registers
+                       index #1 - sgmii port3/4 module registers
+                       index #2 - switch module registers
+
+               NetCP 1.5 ethss 9 port, 5 port and 2 port
+                       index #0 - switch subsystem registers
+                       index #1 - switch module registers
+                       index #2 - serdes registers
+
 - tx-channel:  the navigator packet dma channel name for tx.
 - tx-queue:    the navigator queue number associated with the tx dma channel.
 - interfaces:  specification for each of the switch port to be registered as a
@@ -120,14 +141,13 @@ Optional properties:
 
 Example binding:
 
-netcp: netcp@2090000 {
+netcp: netcp@2000000 {
        reg = <0x2620110 0x8>;
        reg-names = "efuse";
        compatible = "ti,netcp-1.0";
        #address-cells = <1>;
        #size-cells = <1>;
-       ranges;
-
+       ranges  = <0 0x2000000 0xfffff>;
        clocks = <&papllclk>, <&clkcpgmac>, <&chipclk12>;
        dma-coherent;
        /* big-endian; */
@@ -137,9 +157,9 @@ netcp: netcp@2090000 {
                #address-cells = <1>;
                #size-cells = <1>;
                ranges;
-               gbe@0x2090000 {
+               gbe@90000 {
                        label = "netcp-gbe";
-                       reg = <0x2090000 0xf00>;
+                       reg = <0x90000 0x300>, <0x90400 0x400>, <0x90800 0x700>;
                        /* enable-ale; */
                        tx-queue = <648>;
                        tx-channel = <8>;
index aaa696414f57a1012b5fc18586ce06da6a899309..ba19d671e8081148529368442fa39be451294608 100644 (file)
@@ -2,10 +2,13 @@
 
 Required properties:
 - compatible: Should be "cdns,[<chip>-]{macb|gem}"
-  Use "cdns,at91sam9260-macb" Atmel at91sam9260 and at91sam9263 SoCs.
+  Use "cdns,at91sam9260-macb" for Atmel at91sam9 SoCs or the 10/100Mbit IP
+  available on sama5d3 SoCs.
   Use "cdns,at32ap7000-macb" for other 10/100 usage or use the generic form: "cdns,macb".
   Use "cdns,pc302-gem" for Picochip picoXcell pc302 and later devices based on
   the Cadence GEM, or the generic form: "cdns,gem".
+  Use "cdns,sama5d3-gem" for the Gigabit IP available on Atmel sama5d3 SoCs.
+  Use "cdns,sama5d4-gem" for the Gigabit IP available on Atmel sama5d4 SoCs.
 - reg: Address and length of the register set for the device
 - interrupts: Should contain macb interrupt
 - phy-mode: See ethernet.txt file in the same directory.
index f91926f2f4824dee2c6785dcaf4cedd84df838e9..7cdbca44e34331be6a419c461b3848fb73d8e207 100644 (file)
@@ -196,7 +196,7 @@ prototypes:
        void (*invalidatepage) (struct page *, unsigned int, unsigned int);
        int (*releasepage) (struct page *, int);
        void (*freepage)(struct page *);
-       int (*direct_IO)(int, struct kiocb *, struct iov_iter *iter, loff_t offset);
+       int (*direct_IO)(struct kiocb *, struct iov_iter *iter, loff_t offset);
        int (*migratepage)(struct address_space *, struct page *, struct page *);
        int (*launder_page)(struct page *);
        int (*is_partially_uptodate)(struct page *, unsigned long, unsigned long);
@@ -429,8 +429,6 @@ prototypes:
        loff_t (*llseek) (struct file *, loff_t, int);
        ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
        ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
-       ssize_t (*aio_read) (struct kiocb *, const struct iovec *, unsigned long, loff_t);
-       ssize_t (*aio_write) (struct kiocb *, const struct iovec *, unsigned long, loff_t);
        ssize_t (*read_iter) (struct kiocb *, struct iov_iter *);
        ssize_t (*write_iter) (struct kiocb *, struct iov_iter *);
        int (*iterate) (struct file *, struct dir_context *);
index fa2db081505e6d6d581ea4ee6d8f1eaab56a3482..e69274de8d0c9c1754cc40b8d99d78a724e63e50 100644 (file)
@@ -471,3 +471,15 @@ in your dentry operations instead.
 [mandatory]
        f_dentry is gone; use f_path.dentry, or, better yet, see if you can avoid
        it entirely.
+--
+[mandatory]
+       never call ->read() and ->write() directly; use __vfs_{read,write} or
+       wrappers; instead of checking for ->write or ->read being NULL, look for
+       FMODE_CAN_{WRITE,READ} in file->f_mode.
+--
+[mandatory]
+       do _not_ use new_sync_{read,write} for ->read/->write; leave it NULL
+       instead.
+--
+[mandatory]
+       ->aio_read/->aio_write are gone.  Use ->read_iter/->write_iter.
index 966b22829f3b605b92f19ce43665225ab17a8a7e..5d833b32bbcd1046de40a15fee169ed462d274fc 100644 (file)
@@ -590,7 +590,7 @@ struct address_space_operations {
        void (*invalidatepage) (struct page *, unsigned int, unsigned int);
        int (*releasepage) (struct page *, int);
        void (*freepage)(struct page *);
-       ssize_t (*direct_IO)(int, struct kiocb *, struct iov_iter *iter, loff_t offset);
+       ssize_t (*direct_IO)(struct kiocb *, struct iov_iter *iter, loff_t offset);
        /* migrate the contents of a page to the specified target */
        int (*migratepage) (struct page *, struct page *);
        int (*launder_page) (struct page *);
@@ -804,8 +804,6 @@ struct file_operations {
        loff_t (*llseek) (struct file *, loff_t, int);
        ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
        ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
-       ssize_t (*aio_read) (struct kiocb *, const struct iovec *, unsigned long, loff_t);
-       ssize_t (*aio_write) (struct kiocb *, const struct iovec *, unsigned long, loff_t);
        ssize_t (*read_iter) (struct kiocb *, struct iov_iter *);
        ssize_t (*write_iter) (struct kiocb *, struct iov_iter *);
        int (*iterate) (struct file *, struct dir_context *);
@@ -838,14 +836,10 @@ otherwise noted.
 
   read: called by read(2) and related system calls
 
-  aio_read: vectored, possibly asynchronous read
-
   read_iter: possibly asynchronous read with iov_iter as destination
 
   write: called by write(2) and related system calls
 
-  aio_write: vectored, possibly asynchronous write
-
   write_iter: possibly asynchronous write with iov_iter as source
 
   iterate: called when the VFS needs to read the directory contents
index a63e5e013a8cddee63b1d3520dd1c2c73e80dc31..92ae734c00c348ab810373e0dc838a92462c932f 100644 (file)
@@ -114,6 +114,9 @@ ALPS Absolute Mode - Protocol Version 2
  byte 4:  0   y6   y5   y4   y3   y2   y1   y0
  byte 5:  0   z6   z5   z4   z3   z2   z1   z0
 
+Protocol Version 2 DualPoint devices send standard PS/2 mouse packets for
+the DualPoint Stick.
+
 Dualpoint device -- interleaved packet format
 ---------------------------------------------
 
@@ -127,6 +130,11 @@ Dualpoint device -- interleaved packet format
  byte 7:    0   y6   y5   y4   y3   y2   y1   y0
  byte 8:    0   z6   z5   z4   z3   z2   z1   z0
 
+Devices which use the interleaving format normally send standard PS/2 mouse
+packets for the DualPoint Stick + ALPS Absolute Mode packets for the
+touchpad, switching to the interleaved packet format when both the stick and
+the touchpad are used at the same time.
+
 ALPS Absolute Mode - Protocol Version 3
 ---------------------------------------
 
index c587a966413e8597da3241f851db92f6e6df1d81..96705616f5820a6d48d6cfda497c7fcf30917e2a 100644 (file)
@@ -294,6 +294,12 @@ accordingly. This property does not affect kernel behavior.
 The kernel does not provide button emulation for such devices but treats
 them as any other INPUT_PROP_BUTTONPAD device.
 
+INPUT_PROP_ACCELEROMETER
+-------------------------
+Directional axes on this device (absolute and/or relative x, y, z) represent
+accelerometer data. All other axes retain their meaning. A device must not mix
+regular directional axes and accelerometer axes on the same event node.
+
 Guidelines:
 ==========
 The guidelines below ensure proper single-touch and multi-finger functionality.
index 7b4f59c09ee2301d077446f5f9ce9c2b96aa2551..b85d000faeb4067c9ab1ed06690105d459a40a4e 100644 (file)
@@ -312,9 +312,12 @@ ABS_MT_TOOL_TYPE
 
 The type of approaching tool. A lot of kernel drivers cannot distinguish
 between different tool types, such as a finger or a pen. In such cases, the
-event should be omitted. The protocol currently supports MT_TOOL_FINGER and
-MT_TOOL_PEN [2]. For type B devices, this event is handled by input core;
-drivers should instead use input_mt_report_slot_state().
+event should be omitted. The protocol currently supports MT_TOOL_FINGER,
+MT_TOOL_PEN, and MT_TOOL_PALM [2]. For type B devices, this event is handled
+by input core; drivers should instead use input_mt_report_slot_state().
+A contact's ABS_MT_TOOL_TYPE may change over time while still touching the
+device, because the firmware may not be able to determine which tool is being
+used when it first appears.
 
 ABS_MT_BLOB_ID
 
index 0a2859a8ee7ec66e6f2da253ca345b9d0f6d4ed1..5abad1e921ca810c1e765d1d84d1ca4a50ce5016 100644 (file)
@@ -22,7 +22,8 @@ This file contains
       4.1.3 RAW socket option CAN_RAW_LOOPBACK
       4.1.4 RAW socket option CAN_RAW_RECV_OWN_MSGS
       4.1.5 RAW socket option CAN_RAW_FD_FRAMES
-      4.1.6 RAW socket returned message flags
+      4.1.6 RAW socket option CAN_RAW_JOIN_FILTERS
+      4.1.7 RAW socket returned message flags
     4.2 Broadcast Manager protocol sockets (SOCK_DGRAM)
       4.2.1 Broadcast Manager operations
       4.2.2 Broadcast Manager message flags
@@ -601,7 +602,22 @@ solution for a couple of reasons:
   CAN FD frames by checking if the device maximum transfer unit is CANFD_MTU.
   The CAN device MTU can be retrieved e.g. with a SIOCGIFMTU ioctl() syscall.
 
-  4.1.6 RAW socket returned message flags
+  4.1.6 RAW socket option CAN_RAW_JOIN_FILTERS
+
+  The CAN_RAW socket can set multiple CAN identifier specific filters that
+  lead to multiple filters in the af_can.c filter processing. These filters
+  are indenpendent from each other which leads to logical OR'ed filters when
+  applied (see 4.1.1).
+
+  This socket option joines the given CAN filters in the way that only CAN
+  frames are passed to user space that matched *all* given CAN filters. The
+  semantic for the applied filters is therefore changed to a logical AND.
+
+  This is useful especially when the filterset is a combination of filters
+  where the CAN_INV_FILTER flag is set in order to notch single CAN IDs or
+  CAN ID ranges from the incoming traffic.
+
+  4.1.7 RAW socket returned message flags
 
   When using recvmsg() call, the msg->msg_flags may contain following flags:
 
index 9930ecfbb4658f7bd93969b52440e49eeb753de2..135581f015e1eb4298c063ebd938ccf2ec2cb461 100644 (file)
@@ -280,7 +280,8 @@ Possible BPF extensions are shown in the following table:
   rxhash                                skb->hash
   cpu                                   raw_smp_processor_id()
   vlan_tci                              skb_vlan_tag_get(skb)
-  vlan_pr                               skb_vlan_tag_present(skb)
+  vlan_avail                            skb_vlan_tag_present(skb)
+  vlan_tpid                             skb->vlan_proto
   rand                                  prandom_u32()
 
 These extensions can also be prefixed with '#'.
index 1b8c964b0d175c7d86d86896469650e5c59fa689..071fb18dc57c868e99367f9d3872bc0e91401414 100644 (file)
@@ -388,6 +388,16 @@ tcp_mtu_probing - INTEGER
          1 - Disabled by default, enabled when an ICMP black hole detected
          2 - Always enabled, use initial MSS of tcp_base_mss.
 
+tcp_probe_interval - INTEGER
+       Controls how often to start TCP Packetization-Layer Path MTU
+       Discovery reprobe. The default is reprobing every 10 minutes as
+       per RFC4821.
+
+tcp_probe_threshold - INTEGER
+       Controls when TCP Packetization-Layer Path MTU Discovery probing
+       will stop in respect to the width of search range in bytes. Default
+       is 8 bytes.
+
 tcp_no_metrics_save - BOOLEAN
        By default, TCP saves various connection metrics in the route cache
        when the connection closes, so that connections established in the
@@ -1116,11 +1126,23 @@ arp_accept - BOOLEAN
        gratuitous arp frame, the arp table will be updated regardless
        if this setting is on or off.
 
+mcast_solicit - INTEGER
+       The maximum number of multicast probes in INCOMPLETE state,
+       when the associated hardware address is unknown.  Defaults
+       to 3.
+
+ucast_solicit - INTEGER
+       The maximum number of unicast probes in PROBE state, when
+       the hardware address is being reconfirmed.  Defaults to 3.
 
 app_solicit - INTEGER
        The maximum number of probes to send to the user space ARP daemon
        via netlink before dropping back to multicast probes (see
-       mcast_solicit).  Defaults to 0.
+       mcast_resolicit).  Defaults to 0.
+
+mcast_resolicit - INTEGER
+       The maximum number of multicast probes after unicast and
+       app probes in PROBE state.  Defaults to 0.
 
 disable_policy - BOOLEAN
        Disable IPSEC policy (SPD) for this interface
@@ -1198,6 +1220,17 @@ anycast_src_echo_reply - BOOLEAN
        FALSE: disabled
        Default: FALSE
 
+idgen_delay - INTEGER
+       Controls the delay in seconds after which time to retry
+       privacy stable address generation if a DAD conflict is
+       detected.
+       Default: 1 (as specified in RFC7217)
+
+idgen_retries - INTEGER
+       Controls the number of retries to generate a stable privacy
+       address if a DAD conflict is detected.
+       Default: 3 (as specified in RFC7217)
+
 mld_qrv - INTEGER
        Controls the MLD query robustness variable (see RFC3810 9.1).
        Default: 2 (as specified by RFC3810 9.1)
@@ -1518,6 +1551,20 @@ use_optimistic - BOOLEAN
                0: disabled (default)
                1: enabled
 
+stable_secret - IPv6 address
+       This IPv6 address will be used as a secret to generate IPv6
+       addresses for link-local addresses and autoconfigured
+       ones. All addresses generated after setting this secret will
+       be stable privacy ones by default. This can be changed via the
+       addrgenmode ip-link. conf/default/stable_secret is used as the
+       secret for the namespace, the interface specific ones can
+       overwrite that. Writes to conf/all/stable_secret are refused.
+
+       It is recommended to generate this secret during installation
+       of a system and keep it stable after that.
+
+       By default the stable secret is unset.
+
 icmp/*:
 ratelimit - INTEGER
        Limit the maximal rates for sending ICMPv6 packets.
index 7a3c047295914cbc8c4273506a9b6d35246a1750..3ba709531adba970595251fa73d6d471ed14c5c1 100644 (file)
@@ -22,6 +22,27 @@ backup_only - BOOLEAN
        If set, disable the director function while the server is
        in backup mode to avoid packet loops for DR/TUN methods.
 
+conn_reuse_mode - INTEGER
+       1 - default
+
+       Controls how ipvs will deal with connections that are detected
+       port reuse. It is a bitmap, with the values being:
+
+       0: disable any special handling on port reuse. The new
+       connection will be delivered to the same real server that was
+       servicing the previous connection. This will effectively
+       disable expire_nodest_conn.
+
+       bit 1: enable rescheduling of new connections when it is safe.
+       That is, whenever expire_nodest_conn and for TCP sockets, when
+       the connection is in TIME_WAIT state (which is only possible if
+       you use NAT mode).
+
+       bit 2: it is bit 1 plus, for TCP connections, when connections
+       are in FIN_WAIT state, as this is the last state seen by load
+       balancer in Direct Routing mode. This bit helps on adding new
+       real servers to a very busy cluster.
+
 conntrack - BOOLEAN
        0 - disabled (default)
        not 0 - enabled
diff --git a/Documentation/networking/mpls-sysctl.txt b/Documentation/networking/mpls-sysctl.txt
new file mode 100644 (file)
index 0000000..639ddf0
--- /dev/null
@@ -0,0 +1,20 @@
+/proc/sys/net/mpls/* Variables:
+
+platform_labels - INTEGER
+       Number of entries in the platform label table.  It is not
+       possible to configure forwarding for label values equal to or
+       greater than the number of platform labels.
+
+       A dense utliziation of the entries in the platform label table
+       is possible and expected aas the platform labels are locally
+       allocated.
+
+       If the number of platform label table entries is set to 0 no
+       label will be recognized by the kernel and mpls forwarding
+       will be disabled.
+
+       Reducing this value will remove all label routing entries that
+       no longer fit in the table.
+
+       Possible values: 0 - 1048575
+       Default: 0
index a6d7cb91069e207b24bae3a2f4e2925475639640..daa015af16a092a8d4b7fd1df73f2bbe282251d9 100644 (file)
@@ -440,9 +440,10 @@ and the following flags apply:
 +++ Capture process:
      from include/linux/if_packet.h
 
-     #define TP_STATUS_COPY          2 
-     #define TP_STATUS_LOSING        4 
-     #define TP_STATUS_CSUMNOTREADY  8 
+     #define TP_STATUS_COPY          (1 << 1)
+     #define TP_STATUS_LOSING        (1 << 2)
+     #define TP_STATUS_CSUMNOTREADY  (1 << 3)
+     #define TP_STATUS_CSUM_VALID    (1 << 7)
 
 TP_STATUS_COPY        : This flag indicates that the frame (and associated
                         meta information) has been truncated because it's 
@@ -466,6 +467,12 @@ TP_STATUS_CSUMNOTREADY: currently it's used for outgoing IP packets which
                         reading the packet we should not try to check the 
                         checksum. 
 
+TP_STATUS_CSUM_VALID  : This flag indicates that at least the transport
+                        header checksum of the packet has been already
+                        validated on the kernel side. If the flag is not set
+                        then we are free to check the checksum by ourselves
+                        provided that TP_STATUS_CSUMNOTREADY is also not set.
+
 for convenience there are also the following defines:
 
      #define TP_STATUS_KERNEL        0
index 6915c6b2786972b8afde6b91c1695df0cf993c08..0344f1d45b3765b016f0388a830e3a8e48af31ce 100644 (file)
@@ -3,13 +3,11 @@
                   HOWTO for the linux packet generator 
                   ------------------------------------
 
-Date: 041221
-
-Enable CONFIG_NET_PKTGEN to compile and build pktgen.o either in kernel
-or as module. Module is preferred. insmod pktgen if needed. Once running
-pktgen creates a thread on each CPU where each thread has affinity to its CPU.
-Monitoring and controlling is done via /proc. Easiest to select a suitable 
-a sample script and configure.
+Enable CONFIG_NET_PKTGEN to compile and build pktgen either in-kernel
+or as a module.  A module is preferred; modprobe pktgen if needed.  Once
+running, pktgen creates a thread for each CPU with affinity to that CPU.
+Monitoring and controlling is done via /proc.  It is easiest to select a
+suitable sample script and configure that.
 
 On a dual CPU:
 
@@ -27,7 +25,7 @@ For monitoring and control pktgen creates:
 Tuning NIC for max performance
 ==============================
 
-The default NIC setting are (likely) not tuned for pktgen's artificial
+The default NIC settings are (likely) not tuned for pktgen's artificial
 overload type of benchmarking, as this could hurt the normal use-case.
 
 Specifically increasing the TX ring buffer in the NIC:
@@ -35,20 +33,20 @@ Specifically increasing the TX ring buffer in the NIC:
 
 A larger TX ring can improve pktgen's performance, while it can hurt
 in the general case, 1) because the TX ring buffer might get larger
-than the CPUs L1/L2 cache, 2) because it allow more queueing in the
+than the CPU's L1/L2 cache, 2) because it allows more queueing in the
 NIC HW layer (which is bad for bufferbloat).
 
-One should be careful to conclude, that packets/descriptors in the HW
+One should hesitate to conclude that packets/descriptors in the HW
 TX ring cause delay.  Drivers usually delay cleaning up the
-ring-buffers (for various performance reasons), thus packets stalling
-the TX ring, might just be waiting for cleanup.
+ring-buffers for various performance reasons, and packets stalling
+the TX ring might just be waiting for cleanup.
 
-This cleanup issues is specifically the case, for the driver ixgbe
-(Intel 82599 chip).  This driver (ixgbe) combine TX+RX ring cleanups,
+This cleanup issue is specifically the case for the driver ixgbe
+(Intel 82599 chip).  This driver (ixgbe) combines TX+RX ring cleanups,
 and the cleanup interval is affected by the ethtool --coalesce setting
 of parameter "rx-usecs".
 
-For ixgbe use e.g "30" resulting in approx 33K interrupts/sec (1/30*10^6):
+For ixgbe use e.g. "30" resulting in approx 33K interrupts/sec (1/30*10^6):
  # ethtool -C ethX rx-usecs 30
 
 
@@ -60,15 +58,16 @@ Running:
 Stopped: eth1 
 Result: OK: max_before_softirq=10000
 
-Most important the devices assigned to thread. Note! A device can only belong 
-to one thread.
+Most important are the devices assigned to the thread.  Note that a
+device can only belong to one thread.
 
 
 Viewing devices
 ===============
 
-Parm section holds configured info. Current hold running stats. 
-Result is printed after run or after interruption. Example:
+The Params section holds configured information.  The Current section
+holds running statistics.  The Result is printed after a run or after
+interruption.  Example:
 
 /proc/net/pktgen/eth1       
 
@@ -93,7 +92,8 @@ Result: OK: 13101142(c12220741+d880401) usec, 10000000 (60byte,0frags)
 
 Configuring threads and devices
 ================================
-This is done via the /proc interface easiest done via pgset in the scripts
+This is done via the /proc interface, and most easily done via pgset
+as defined in the sample scripts.
 
 Examples:
 
@@ -192,10 +192,11 @@ Examples:
  pgset "rate 300M"        set rate to 300 Mb/s
  pgset "ratep 1000000"    set rate to 1Mpps
 
-Example scripts
-===============
+Sample scripts
+==============
 
-A collection of small tutorial scripts for pktgen is in examples dir.
+A collection of small tutorial scripts for pktgen is in the
+samples/pktgen directory:
 
 pktgen.conf-1-1                  # 1 CPU 1 dev 
 pktgen.conf-1-2                  # 1 CPU 2 dev
@@ -206,25 +207,26 @@ pktgen.conf-1-1-ip6              # 1 CPU 1 dev ipv6
 pktgen.conf-1-1-ip6-rdos         # 1 CPU 1 dev ipv6  w. route DoS
 pktgen.conf-1-1-flows            # 1 CPU 1 dev multiple flows.
 
-Run in shell: ./pktgen.conf-X-Y It does all the setup including sending. 
+Run in shell: ./pktgen.conf-X-Y
+This does all the setup including sending.
 
 
 Interrupt affinity
 ===================
-Note when adding devices to a specific CPU there good idea to also assign 
-/proc/irq/XX/smp_affinity so the TX-interrupts gets bound to the same CPU.
-as this reduces cache bouncing when freeing skb's.
+Note that when adding devices to a specific CPU it is a good idea to
+also assign /proc/irq/XX/smp_affinity so that the TX interrupts are bound
+to the same CPU.  This reduces cache bouncing when freeing skbs.
 
 Enable IPsec
 ============
-Default IPsec transformation with ESP encapsulation plus Transport mode
-could be enabled by simply setting:
+Default IPsec transformation with ESP encapsulation plus transport mode
+can be enabled by simply setting:
 
 pgset "flag IPSEC"
 pgset "flows 1"
 
 To avoid breaking existing testbed scripts for using AH type and tunnel mode,
-user could use "pgset spi SPI_VALUE" to specify which formal of transformation
+you can use "pgset spi SPI_VALUE" to specify which transformation mode
 to employ.
 
 
index d2a9f43b5546684fec415e957617869e418ed140..0362a42f7cf4478b8592d02f20d3128acd560ead 100644 (file)
@@ -38,7 +38,7 @@ The corresponding adapter's LED will blink multiple times.
 
 3.     Features supported:
 a. Jumbo frames. Xframe I/II supports MTU up to 9600 bytes,
-modifiable using ifconfig command.
+modifiable using ip command.
 
 b. Offloads. Supports checksum offload(TCP/UDP/IP) on transmit
 and receive, TSO.
index 99ca40e8e810888d30bbb9726eb2de2e537c3e79..cbfac0949635c1d109930b051e6c4d96dc4c74d1 100644 (file)
@@ -421,6 +421,15 @@ best CPUs to share a given queue are probably those that share the cache
 with the CPU that processes transmit completions for that queue
 (transmit interrupts).
 
+Per TX Queue rate limitation:
+=============================
+
+These are rate-limitation mechanisms implemented by HW, where currently
+a max-rate attribute is supported, by setting a Mbps value to
+
+/sys/class/net/<dev>/queues/tx-<n>/tx_maxrate
+
+A value of zero means disabled, and this is the default.
 
 Further Information
 ===================
index bb76c667a476557df0416a43f2ad622247fbd65c..abfec245f97c6aa9cc6e4bc40dcf8c2324f496ec 100644 (file)
@@ -39,7 +39,7 @@ iii) PCI-SIG's I/O Virtualization
 
 iv)  Jumbo frames
        X3100 Series supports MTU up to 9600 bytes, modifiable using
-       ifconfig command.
+       ip command.
 
 v)   Offloads supported: (Enabled by default)
        Checksum offload (TCP/UDP/IP) on transmit and receive paths
index ac976acd9841031c816ed5885bc19df0d3793677..9091b4ad1cc3c74c380da5e1eff26c14f8e165d1 100644 (file)
@@ -637,8 +637,7 @@ F:      drivers/gpu/drm/radeon/radeon_kfd.h
 F:      include/uapi/linux/kfd_ioctl.h
 
 AMD MICROCODE UPDATE SUPPORT
-M:     Andreas Herrmann <herrmann.der.user@googlemail.com>
-L:     amd64-microcode@amd64.org
+M:     Borislav Petkov <bp@alien8.de>
 S:     Maintained
 F:     arch/x86/kernel/cpu/microcode/amd*
 
@@ -5095,7 +5094,7 @@ S:        Supported
 F:     drivers/platform/x86/intel_menlow.c
 
 INTEL IA32 MICROCODE UPDATE SUPPORT
-M:     Tigran Aivazian <tigran@aivazian.fsnet.co.uk>
+M:     Borislav Petkov <bp@alien8.de>
 S:     Maintained
 F:     arch/x86/kernel/cpu/microcode/core*
 F:     arch/x86/kernel/cpu/microcode/intel*
@@ -6323,6 +6322,7 @@ F:        drivers/scsi/megaraid/
 
 MELLANOX ETHERNET DRIVER (mlx4_en)
 M:     Amir Vadai <amirv@mellanox.com>
+M:     Ido Shamay <idos@mellanox.com>
 L:     netdev@vger.kernel.org
 S:     Supported
 W:     http://www.mellanox.com
@@ -8352,7 +8352,6 @@ F:        block/partitions/ibm.c
 
 S390 NETWORK DRIVERS
 M:     Ursula Braun <ursula.braun@de.ibm.com>
-M:     Frank Blaschka <blaschka@linux.vnet.ibm.com>
 M:     linux390@de.ibm.com
 L:     linux-s390@vger.kernel.org
 W:     http://www.ibm.com/developerworks/linux/linux390/
@@ -9827,7 +9826,7 @@ F:        include/linux/wl12xx.h
 
 TIPC NETWORK LAYER
 M:     Jon Maloy <jon.maloy@ericsson.com>
-M:     Allan Stephens <allan.stephens@windriver.com>
+M:     Ying Xue <ying.xue@windriver.com>
 L:     netdev@vger.kernel.org (core kernel code)
 L:     tipc-discussion@lists.sourceforge.net (user apps, general discussion)
 W:     http://tipc.sourceforge.net/
index da36a3be7969049870d969ae4a623fd7a0c15ee1..54430f933b628ca99bdbc1e2bf5dd2570ca0354c 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 4
 PATCHLEVEL = 0
 SUBLEVEL = 0
-EXTRAVERSION = -rc6
+EXTRAVERSION = -rc7
 NAME = Hurr durr I'ma sheep
 
 # *DOCUMENTATION*
index 98c00a2d4dd9a57f1c503ac2ebb6d63a3f1a76b4..f46efd14059d302712df70442604c19f1a8f2fe6 100644 (file)
@@ -155,8 +155,6 @@ int copy_thread(unsigned long clone_flags,
  */
 void start_thread(struct pt_regs * regs, unsigned long pc, unsigned long usp)
 {
-       set_fs(USER_DS); /* user space */
-
        regs->sp = usp;
        regs->ret = pc;
 
index e7f0a4ae271c6a53244c3a3b0f6204cae45d6b0b..62d25b14deb8a6916ddbeb6116b25ba71a7e31ba 100644 (file)
                        };
 
                        macb0: ethernet@fffc4000 {
-                               compatible = "cdns,at32ap7000-macb", "cdns,macb";
+                               compatible = "cdns,at91sam9260-macb", "cdns,macb";
                                reg = <0xfffc4000 0x100>;
                                interrupts = <21 IRQ_TYPE_LEVEL_HIGH 3>;
                                pinctrl-names = "default";
index fce301c4e9d6e22aeb37d3a2a4fe83bf6634ad78..e4f61a979a5700a067c9795d370dc841450d713b 100644 (file)
                        };
 
                        macb0: ethernet@fffbc000 {
-                               compatible = "cdns,at32ap7000-macb", "cdns,macb";
+                               compatible = "cdns,at91sam9260-macb", "cdns,macb";
                                reg = <0xfffbc000 0x100>;
                                interrupts = <21 IRQ_TYPE_LEVEL_HIGH 3>;
                                pinctrl-names = "default";
index 488af63d5174c7e9dc0eb29319ccb35eebd8fc7c..8ec05b11298a536b07c2c48586d6badcdcfaf9cd 100644 (file)
                        };
 
                        macb0: ethernet@fffbc000 {
-                               compatible = "cdns,at32ap7000-macb", "cdns,macb";
+                               compatible = "cdns,at91sam9260-macb", "cdns,macb";
                                reg = <0xfffbc000 0x100>;
                                interrupts = <25 IRQ_TYPE_LEVEL_HIGH 3>;
                                pinctrl-names = "default";
index 57e89d1d03253fd64af8f4c01f807caf9611dfaf..73d7e30965badd14366540fe2cb9ce6780d67f2e 100644 (file)
@@ -53,7 +53,7 @@
                        };
 
                        macb0: ethernet@f802c000 {
-                               compatible = "cdns,at32ap7000-macb", "cdns,macb";
+                               compatible = "cdns,at91sam9260-macb", "cdns,macb";
                                reg = <0xf802c000 0x100>;
                                interrupts = <24 IRQ_TYPE_LEVEL_HIGH 3>;
                                pinctrl-names = "default";
index 663676c02861ff2e35118f5526d36847edb904e0..d81980c40c7d05a7525fcb1b9f1b09892325dc26 100644 (file)
@@ -41,7 +41,7 @@
                        };
 
                        macb1: ethernet@f8030000 {
-                               compatible = "cdns,at32ap7000-macb", "cdns,macb";
+                               compatible = "cdns,at91sam9260-macb", "cdns,macb";
                                reg = <0xf8030000 0x100>;
                                interrupts = <27 IRQ_TYPE_LEVEL_HIGH 3>;
                                pinctrl-names = "default";
index fe2af92763129beb8b4b493adbfb847684b4a264..b4544cf11bad1b08e5825dcf6690bb57acc558db 100644 (file)
@@ -41,7 +41,7 @@
                        };
 
                        macb1: ethernet@f802c000 {
-                               compatible = "cdns,at32ap7000-macb", "cdns,macb";
+                               compatible = "cdns,at91sam9260-macb", "cdns,macb";
                                reg = <0xf802c000 0x100>;
                                interrupts = <35 IRQ_TYPE_LEVEL_HIGH 3>;
                                pinctrl-names = "default";
index 2e25de0800b9f061bebb4ee81c4c2ffaa1cf0dc9..83578e766b945ae2c35b41b93813a8c29ec15c1b 100644 (file)
        status = "ok";
 };
 
+&sgenet1 {
+       status = "ok";
+};
+
 &xgenet {
        status = "ok";
 };
index a857794432d6756ac628d55f362512ed375bcabd..e74f6e0a208ccbf584d2b2d61c98d3d0f1d169e2 100644 (file)
                                clock-output-names = "sge0clk";
                        };
 
+                       sge1clk: sge1clk@1f21c000 {
+                               compatible = "apm,xgene-device-clock";
+                               #clock-cells = <1>;
+                               clocks = <&socplldiv2 0>;
+                               reg = <0x0 0x1f21c000 0x0 0x1000>;
+                               reg-names = "csr-reg";
+                               csr-mask = <0xc>;
+                               clock-output-names = "sge1clk";
+                       };
+
                        xge0clk: xge0clk@1f61c000 {
                                compatible = "apm,xgene-device-clock";
                                #clock-cells = <1>;
                              <0x0 0x1f200000 0x0 0Xc300>,
                              <0x0 0x1B000000 0x0 0X200>;
                        reg-names = "enet_csr", "ring_csr", "ring_cmd";
-                       interrupts = <0x0 0xA0 0x4>;
+                       interrupts = <0x0 0xA0 0x4>,
+                                    <0x0 0xA1 0x4>;
                        dma-coherent;
                        clocks = <&sge0clk 0>;
                        local-mac-address = [00 00 00 00 00 00];
                        phy-connection-type = "sgmii";
                };
 
+               sgenet1: ethernet@1f210030 {
+                       compatible = "apm,xgene1-sgenet";
+                       status = "disabled";
+                       reg = <0x0 0x1f210030 0x0 0xd100>,
+                             <0x0 0x1f200000 0x0 0Xc300>,
+                             <0x0 0x1B000000 0x0 0X8000>;
+                       reg-names = "enet_csr", "ring_csr", "ring_cmd";
+                       interrupts = <0x0 0xAC 0x4>,
+                                    <0x0 0xAD 0x4>;
+                       port-id = <1>;
+                       dma-coherent;
+                       clocks = <&sge1clk 0>;
+                       local-mac-address = [00 00 00 00 00 00];
+                       phy-connection-type = "sgmii";
+               };
+
                xgenet: ethernet@1f610000 {
                        compatible = "apm,xgene1-xgenet";
                        status = "disabled";
                              <0x0 0x1f600000 0x0 0Xc300>,
                              <0x0 0x18000000 0x0 0X200>;
                        reg-names = "enet_csr", "ring_csr", "ring_cmd";
-                       interrupts = <0x0 0x60 0x4>;
+                       interrupts = <0x0 0x60 0x4>,
+                                    <0x0 0x61 0x4>;
                        dma-coherent;
                        clocks = <&xge0clk 0>;
                        /* mac address will be overwritten by the bootloader */
index 57d2ea8d19773828d620c98b8593f069ff32cd23..3ae9f5a166a0584034dea8fb41ea645ccf88aeea 100644 (file)
@@ -101,7 +101,6 @@ void start_thread(struct pt_regs *regs, unsigned int pc, unsigned long usp)
         */
        usp -= 8;
 
-       set_fs(USER_DS);
        regs->pc  = pc;
        regs->sp  = usp;
        regs->tsr |= 0x40; /* set user mode */
index 336713ab47454fa2afd8e603d53255f5d91acaa0..85ca6727ca075c8ce47ca73f801ed612cdeeb86d 100644 (file)
@@ -176,8 +176,6 @@ static int setup_frame(struct ksignal *ksig, sigset_t *set)
        struct sigframe __user *frame;
        int rsig, sig = ksig->sig;
 
-       set_fs(USER_DS);
-
        frame = get_sigframe(ksig, sizeof(*frame));
 
        if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
@@ -257,8 +255,6 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set)
        struct rt_sigframe __user *frame;
        int rsig, sig = ksig->sig;
 
-       set_fs(USER_DS);
-
        frame = get_sigframe(ksig, sizeof(*frame));
 
        if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
index 0a0dd5c05b46af8fda112b2ab9cc606a08d6d5a5..a9ebd471823a6644a6773ed99d780c5c620f3e56 100644 (file)
@@ -37,8 +37,6 @@
  */
 void start_thread(struct pt_regs *regs, unsigned long pc, unsigned long sp)
 {
-       /* Set to run with user-mode data segmentation */
-       set_fs(USER_DS);
        /* We want to zero all data-containing registers. Is this overkill? */
        memset(regs, 0, sizeof(*regs));
        /* We might want to also zero all Processor registers here */
index 7736c6660a1580562bbbed37ece6aae5a61ac99a..8c25e0c8f6a5c752ba9c201de8292e4dd0c5120b 100644 (file)
@@ -214,8 +214,6 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
        regs->r2 = (unsigned long)&frame->uc;
        regs->bpc = (unsigned long)ksig->ka.sa.sa_handler;
 
-       set_fs(USER_DS);
-
 #if DEBUG_SIG
        printk("SIG deliver (%s:%d): sp=%p pc=%p\n",
                current->comm, current->pid, frame, regs->pc);
index 13272fd5a5baec8e3b1a4de778a6982abf0adae7..0838ca69976466bbfc3c3854fecf91566afd6a9b 100644 (file)
@@ -111,7 +111,6 @@ struct thread_struct {
  */
 #define start_thread(regs, pc, usp) do {                                  \
        unsigned int *argc = (unsigned int *) bprm->exec;                  \
-       set_fs(USER_DS);                                                   \
        current->thread.int_depth = 1;                                     \
        /* Force this process down to user land */                         \
        regs->ctx.SaveMask = TBICTX_PRIV_BIT;                              \
index a1cbaf90e2ea47215e8bfce77d7d8d113f9fd110..20ccd4e2baa54c88f4fbcdd13c1a407ee66ffc82 100644 (file)
@@ -236,8 +236,6 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
        /* Offset to handle microblaze rtid r14, 0 */
        regs->pc = (unsigned long)ksig->ka.sa.sa_handler;
 
-       set_fs(USER_DS);
-
 #ifdef DEBUG_SIG
        pr_info("SIG deliver (%s:%d): sp=%p pc=%08lx\n",
                current->comm, current->pid, frame, regs->pc);
index 0e075b5ad2a54298c99ea668848b523c12449b15..2f8c74f93e705a08e28f2c7a9e6ba9da754ff187 100644 (file)
@@ -94,7 +94,6 @@ void show_regs(struct pt_regs *regs)
 
 void flush_thread(void)
 {
-       set_fs(USER_DS);
 }
 
 int copy_thread(unsigned long clone_flags,
index 386af258591dbe7084867f88b79c3eef980ce15b..7095dfe7666ba3dd55a0807ffd7d09b00af3ccc2 100644 (file)
@@ -197,7 +197,6 @@ void start_thread(struct pt_regs *regs, unsigned long pc, unsigned long sp)
 {
        unsigned long sr = mfspr(SPR_SR) & ~SPR_SR_SM;
 
-       set_fs(USER_DS);
        memset(regs, 0, sizeof(struct pt_regs));
 
        regs->pc = pc;
index 22b0940494bb5016be1fca2a98aa7b75c105487a..5084bdcc604676fe663432c036fdd3213c9a646a 100644 (file)
@@ -126,7 +126,7 @@ config PPC
        select IRQ_FORCED_THREADING
        select HAVE_RCU_TABLE_FREE if SMP
        select HAVE_SYSCALL_TRACEPOINTS
-       select HAVE_BPF_JIT if PPC64
+       select HAVE_BPF_JIT
        select HAVE_ARCH_JUMP_LABEL
        select ARCH_HAVE_NMI_SAFE_CMPXCHG
        select ARCH_HAS_GCOV_PROFILE_ALL
index 21be8ae8f809747302debb8d2b8f9ae974e69613..dc85dcb891cfa5a60adc894c692f5aed2ec39549 100644 (file)
@@ -23,6 +23,8 @@
 #define PPC_STL                stringify_in_c(std)
 #define PPC_STLU       stringify_in_c(stdu)
 #define PPC_LCMPI      stringify_in_c(cmpdi)
+#define PPC_LCMPLI     stringify_in_c(cmpldi)
+#define PPC_LCMP       stringify_in_c(cmpd)
 #define PPC_LONG       stringify_in_c(.llong)
 #define PPC_LONG_ALIGN stringify_in_c(.balign 8)
 #define PPC_TLNEI      stringify_in_c(tdnei)
@@ -52,6 +54,8 @@
 #define PPC_STL                stringify_in_c(stw)
 #define PPC_STLU       stringify_in_c(stwu)
 #define PPC_LCMPI      stringify_in_c(cmpwi)
+#define PPC_LCMPLI     stringify_in_c(cmplwi)
+#define PPC_LCMP       stringify_in_c(cmpw)
 #define PPC_LONG       stringify_in_c(.long)
 #define PPC_LONG_ALIGN stringify_in_c(.balign 4)
 #define PPC_TLNEI      stringify_in_c(twnei)
index 4cbe23af400ab622749b31f980d5d72d6b8587ac..5c93f691b4955ce91a287aabf98ed45a7aafe870 100644 (file)
 #define PPC_INST_LWZ                   0x80000000
 #define PPC_INST_STD                   0xf8000000
 #define PPC_INST_STDU                  0xf8000001
+#define PPC_INST_STW                   0x90000000
+#define PPC_INST_STWU                  0x94000000
 #define PPC_INST_MFLR                  0x7c0802a6
 #define PPC_INST_MTLR                  0x7c0803a6
 #define PPC_INST_CMPWI                 0x2c000000
index 266b3950c3ac519099e11c698ed5b4cc5cd15519..1306a58ac5413b742e9214cdc4983d34f5fec4fc 100644 (file)
@@ -1,4 +1,4 @@
 #
 # Arch-specific network modules
 #
-obj-$(CONFIG_BPF_JIT) += bpf_jit_64.o bpf_jit_comp.o
+obj-$(CONFIG_BPF_JIT) += bpf_jit_asm.o bpf_jit_comp.o
index c406aa95b2bc5970b1aa7d3033cc8518faa93089..889fd199a821cbda22fe6e61ebed54cfa115d53f 100644 (file)
 #ifndef _BPF_JIT_H
 #define _BPF_JIT_H
 
+#ifdef CONFIG_PPC64
+#define BPF_PPC_STACK_R3_OFF   48
 #define BPF_PPC_STACK_LOCALS   32
 #define BPF_PPC_STACK_BASIC    (48+64)
 #define BPF_PPC_STACK_SAVE     (18*8)
 #define BPF_PPC_STACKFRAME     (BPF_PPC_STACK_BASIC+BPF_PPC_STACK_LOCALS+ \
                                 BPF_PPC_STACK_SAVE)
 #define BPF_PPC_SLOWPATH_FRAME (48+64)
+#else
+#define BPF_PPC_STACK_R3_OFF   24
+#define BPF_PPC_STACK_LOCALS   16
+#define BPF_PPC_STACK_BASIC    (24+32)
+#define BPF_PPC_STACK_SAVE     (18*4)
+#define BPF_PPC_STACKFRAME     (BPF_PPC_STACK_BASIC+BPF_PPC_STACK_LOCALS+ \
+                                BPF_PPC_STACK_SAVE)
+#define BPF_PPC_SLOWPATH_FRAME (24+32)
+#endif
+
+#define REG_SZ         (BITS_PER_LONG/8)
 
 /*
  * Generated code register usage:
@@ -57,7 +70,11 @@ DECLARE_LOAD_FUNC(sk_load_half);
 DECLARE_LOAD_FUNC(sk_load_byte);
 DECLARE_LOAD_FUNC(sk_load_byte_msh);
 
+#ifdef CONFIG_PPC64
 #define FUNCTION_DESCR_SIZE    24
+#else
+#define FUNCTION_DESCR_SIZE    0
+#endif
 
 /*
  * 16-bit immediate helper macros: HA() is for use with sign-extending instrs
@@ -86,7 +103,12 @@ DECLARE_LOAD_FUNC(sk_load_byte_msh);
 #define PPC_LIS(r, i)          PPC_ADDIS(r, 0, i)
 #define PPC_STD(r, base, i)    EMIT(PPC_INST_STD | ___PPC_RS(r) |            \
                                     ___PPC_RA(base) | ((i) & 0xfffc))
-
+#define PPC_STDU(r, base, i)   EMIT(PPC_INST_STDU | ___PPC_RS(r) |           \
+                                    ___PPC_RA(base) | ((i) & 0xfffc))
+#define PPC_STW(r, base, i)    EMIT(PPC_INST_STW | ___PPC_RS(r) |            \
+                                    ___PPC_RA(base) | ((i) & 0xfffc))
+#define PPC_STWU(r, base, i)   EMIT(PPC_INST_STWU | ___PPC_RS(r) |           \
+                                    ___PPC_RA(base) | ((i) & 0xfffc))
 
 #define PPC_LBZ(r, base, i)    EMIT(PPC_INST_LBZ | ___PPC_RT(r) |            \
                                     ___PPC_RA(base) | IMM_L(i))
@@ -98,6 +120,17 @@ DECLARE_LOAD_FUNC(sk_load_byte_msh);
                                     ___PPC_RA(base) | IMM_L(i))
 #define PPC_LHBRX(r, base, b)  EMIT(PPC_INST_LHBRX | ___PPC_RT(r) |          \
                                     ___PPC_RA(base) | ___PPC_RB(b))
+
+#ifdef CONFIG_PPC64
+#define PPC_BPF_LL(r, base, i) do { PPC_LD(r, base, i); } while(0)
+#define PPC_BPF_STL(r, base, i) do { PPC_STD(r, base, i); } while(0)
+#define PPC_BPF_STLU(r, base, i) do { PPC_STDU(r, base, i); } while(0)
+#else
+#define PPC_BPF_LL(r, base, i) do { PPC_LWZ(r, base, i); } while(0)
+#define PPC_BPF_STL(r, base, i) do { PPC_STW(r, base, i); } while(0)
+#define PPC_BPF_STLU(r, base, i) do { PPC_STWU(r, base, i); } while(0)
+#endif
+
 /* Convenience helpers for the above with 'far' offsets: */
 #define PPC_LBZ_OFFS(r, base, i) do { if ((i) < 32768) PPC_LBZ(r, base, i);   \
                else {  PPC_ADDIS(r, base, IMM_HA(i));                        \
@@ -115,6 +148,29 @@ DECLARE_LOAD_FUNC(sk_load_byte_msh);
                else {  PPC_ADDIS(r, base, IMM_HA(i));                        \
                        PPC_LHZ(r, r, IMM_L(i)); } } while(0)
 
+#ifdef CONFIG_PPC64
+#define PPC_LL_OFFS(r, base, i) do { PPC_LD_OFFS(r, base, i); } while(0)
+#else
+#define PPC_LL_OFFS(r, base, i) do { PPC_LWZ_OFFS(r, base, i); } while(0)
+#endif
+
+#ifdef CONFIG_SMP
+#ifdef CONFIG_PPC64
+#define PPC_BPF_LOAD_CPU(r)            \
+       do { BUILD_BUG_ON(FIELD_SIZEOF(struct paca_struct, paca_index) != 2);   \
+               PPC_LHZ_OFFS(r, 13, offsetof(struct paca_struct, paca_index));          \
+       } while (0)
+#else
+#define PPC_BPF_LOAD_CPU(r)     \
+       do { BUILD_BUG_ON(FIELD_SIZEOF(struct thread_info, cpu) != 4);                  \
+               PPC_LHZ_OFFS(r, (1 & ~(THREAD_SIZE - 1)),                                                       \
+                               offsetof(struct thread_info, cpu));                                                     \
+       } while(0)
+#endif
+#else
+#define PPC_BPF_LOAD_CPU(r) do { PPC_LI(r, 0); } while(0)
+#endif
+
 #define PPC_CMPWI(a, i)                EMIT(PPC_INST_CMPWI | ___PPC_RA(a) | IMM_L(i))
 #define PPC_CMPDI(a, i)                EMIT(PPC_INST_CMPDI | ___PPC_RA(a) | IMM_L(i))
 #define PPC_CMPLWI(a, i)       EMIT(PPC_INST_CMPLWI | ___PPC_RA(a) | IMM_L(i))
@@ -196,6 +252,12 @@ DECLARE_LOAD_FUNC(sk_load_byte_msh);
                                PPC_ORI(d, d, (uintptr_t)(i) & 0xffff);       \
                } } while (0);
 
+#ifdef CONFIG_PPC64
+#define PPC_FUNC_ADDR(d,i) do { PPC_LI64(d, i); } while(0)
+#else
+#define PPC_FUNC_ADDR(d,i) do { PPC_LI32(d, i); } while(0)
+#endif
+
 #define PPC_LHBRX_OFFS(r, base, i) \
                do { PPC_LI32(r, i); PPC_LHBRX(r, r, base); } while(0)
 #ifdef __LITTLE_ENDIAN__
similarity index 76%
rename from arch/powerpc/net/bpf_jit_64.S
rename to arch/powerpc/net/bpf_jit_asm.S
index 8f87d92171228d470f9274ff4eee27e921dbfbed..8ff5a3b5d1c3d4aab4bab875b5f9165b3b7f0678 100644 (file)
  */
        .globl  sk_load_word
 sk_load_word:
-       cmpdi   r_addr, 0
+       PPC_LCMPI       r_addr, 0
        blt     bpf_slow_path_word_neg
        .globl  sk_load_word_positive_offset
 sk_load_word_positive_offset:
        /* Are we accessing past headlen? */
        subi    r_scratch1, r_HL, 4
-       cmpd    r_scratch1, r_addr
+       PPC_LCMP        r_scratch1, r_addr
        blt     bpf_slow_path_word
        /* Nope, just hitting the header.  cr0 here is eq or gt! */
 #ifdef __LITTLE_ENDIAN__
@@ -52,12 +52,12 @@ sk_load_word_positive_offset:
 
        .globl  sk_load_half
 sk_load_half:
-       cmpdi   r_addr, 0
+       PPC_LCMPI       r_addr, 0
        blt     bpf_slow_path_half_neg
        .globl  sk_load_half_positive_offset
 sk_load_half_positive_offset:
        subi    r_scratch1, r_HL, 2
-       cmpd    r_scratch1, r_addr
+       PPC_LCMP        r_scratch1, r_addr
        blt     bpf_slow_path_half
 #ifdef __LITTLE_ENDIAN__
        lhbrx   r_A, r_D, r_addr
@@ -68,11 +68,11 @@ sk_load_half_positive_offset:
 
        .globl  sk_load_byte
 sk_load_byte:
-       cmpdi   r_addr, 0
+       PPC_LCMPI       r_addr, 0
        blt     bpf_slow_path_byte_neg
        .globl  sk_load_byte_positive_offset
 sk_load_byte_positive_offset:
-       cmpd    r_HL, r_addr
+       PPC_LCMP        r_HL, r_addr
        ble     bpf_slow_path_byte
        lbzx    r_A, r_D, r_addr
        blr
@@ -83,11 +83,11 @@ sk_load_byte_positive_offset:
  */
        .globl sk_load_byte_msh
 sk_load_byte_msh:
-       cmpdi   r_addr, 0
+       PPC_LCMPI       r_addr, 0
        blt     bpf_slow_path_byte_msh_neg
        .globl sk_load_byte_msh_positive_offset
 sk_load_byte_msh_positive_offset:
-       cmpd    r_HL, r_addr
+       PPC_LCMP        r_HL, r_addr
        ble     bpf_slow_path_byte_msh
        lbzx    r_X, r_D, r_addr
        rlwinm  r_X, r_X, 2, 32-4-2, 31-2
@@ -101,13 +101,13 @@ sk_load_byte_msh_positive_offset:
  */
 #define bpf_slow_path_common(SIZE)                             \
        mflr    r0;                                             \
-       std     r0, 16(r1);                                     \
+       PPC_STL r0, PPC_LR_STKOFF(r1);                                  \
        /* R3 goes in parameter space of caller's frame */      \
-       std     r_skb, (BPF_PPC_STACKFRAME+48)(r1);             \
-       std     r_A, (BPF_PPC_STACK_BASIC+(0*8))(r1);           \
-       std     r_X, (BPF_PPC_STACK_BASIC+(1*8))(r1);           \
-       addi    r5, r1, BPF_PPC_STACK_BASIC+(2*8);              \
-       stdu    r1, -BPF_PPC_SLOWPATH_FRAME(r1);                \
+       PPC_STL r_skb, (BPF_PPC_STACKFRAME+BPF_PPC_STACK_R3_OFF)(r1);           \
+       PPC_STL r_A, (BPF_PPC_STACK_BASIC+(0*REG_SZ))(r1);              \
+       PPC_STL r_X, (BPF_PPC_STACK_BASIC+(1*REG_SZ))(r1);              \
+       addi    r5, r1, BPF_PPC_STACK_BASIC+(2*REG_SZ);         \
+       PPC_STLU        r1, -BPF_PPC_SLOWPATH_FRAME(r1);                \
        /* R3 = r_skb, as passed */                             \
        mr      r4, r_addr;                                     \
        li      r6, SIZE;                                       \
@@ -115,19 +115,19 @@ sk_load_byte_msh_positive_offset:
        nop;                                                    \
        /* R3 = 0 on success */                                 \
        addi    r1, r1, BPF_PPC_SLOWPATH_FRAME;                 \
-       ld      r0, 16(r1);                                     \
-       ld      r_A, (BPF_PPC_STACK_BASIC+(0*8))(r1);           \
-       ld      r_X, (BPF_PPC_STACK_BASIC+(1*8))(r1);           \
+       PPC_LL  r0, PPC_LR_STKOFF(r1);                                  \
+       PPC_LL  r_A, (BPF_PPC_STACK_BASIC+(0*REG_SZ))(r1);              \
+       PPC_LL  r_X, (BPF_PPC_STACK_BASIC+(1*REG_SZ))(r1);              \
        mtlr    r0;                                             \
-       cmpdi   r3, 0;                                          \
+       PPC_LCMPI       r3, 0;                                          \
        blt     bpf_error;      /* cr0 = LT */                  \
-       ld      r_skb, (BPF_PPC_STACKFRAME+48)(r1);             \
+       PPC_LL  r_skb, (BPF_PPC_STACKFRAME+BPF_PPC_STACK_R3_OFF)(r1);           \
        /* Great success! */
 
 bpf_slow_path_word:
        bpf_slow_path_common(4)
        /* Data value is on stack, and cr0 != LT */
-       lwz     r_A, BPF_PPC_STACK_BASIC+(2*8)(r1)
+       lwz     r_A, BPF_PPC_STACK_BASIC+(2*REG_SZ)(r1)
        blr
 
 bpf_slow_path_half:
@@ -154,12 +154,12 @@ bpf_slow_path_byte_msh:
  */
 #define sk_negative_common(SIZE)                               \
        mflr    r0;                                             \
-       std     r0, 16(r1);                                     \
+       PPC_STL r0, PPC_LR_STKOFF(r1);                                  \
        /* R3 goes in parameter space of caller's frame */      \
-       std     r_skb, (BPF_PPC_STACKFRAME+48)(r1);             \
-       std     r_A, (BPF_PPC_STACK_BASIC+(0*8))(r1);           \
-       std     r_X, (BPF_PPC_STACK_BASIC+(1*8))(r1);           \
-       stdu    r1, -BPF_PPC_SLOWPATH_FRAME(r1);                \
+       PPC_STL r_skb, (BPF_PPC_STACKFRAME+BPF_PPC_STACK_R3_OFF)(r1);           \
+       PPC_STL r_A, (BPF_PPC_STACK_BASIC+(0*REG_SZ))(r1);              \
+       PPC_STL r_X, (BPF_PPC_STACK_BASIC+(1*REG_SZ))(r1);              \
+       PPC_STLU        r1, -BPF_PPC_SLOWPATH_FRAME(r1);                \
        /* R3 = r_skb, as passed */                             \
        mr      r4, r_addr;                                     \
        li      r5, SIZE;                                       \
@@ -167,19 +167,19 @@ bpf_slow_path_byte_msh:
        nop;                                                    \
        /* R3 != 0 on success */                                \
        addi    r1, r1, BPF_PPC_SLOWPATH_FRAME;                 \
-       ld      r0, 16(r1);                                     \
-       ld      r_A, (BPF_PPC_STACK_BASIC+(0*8))(r1);           \
-       ld      r_X, (BPF_PPC_STACK_BASIC+(1*8))(r1);           \
+       PPC_LL  r0, PPC_LR_STKOFF(r1);                                  \
+       PPC_LL  r_A, (BPF_PPC_STACK_BASIC+(0*REG_SZ))(r1);              \
+       PPC_LL  r_X, (BPF_PPC_STACK_BASIC+(1*REG_SZ))(r1);              \
        mtlr    r0;                                             \
-       cmpldi  r3, 0;                                          \
+       PPC_LCMPLI      r3, 0;                                          \
        beq     bpf_error_slow; /* cr0 = EQ */                  \
        mr      r_addr, r3;                                     \
-       ld      r_skb, (BPF_PPC_STACKFRAME+48)(r1);             \
+       PPC_LL  r_skb, (BPF_PPC_STACKFRAME+BPF_PPC_STACK_R3_OFF)(r1);           \
        /* Great success! */
 
 bpf_slow_path_word_neg:
        lis     r_scratch1,-32  /* SKF_LL_OFF */
-       cmpd    r_addr, r_scratch1      /* addr < SKF_* */
+       PPC_LCMP        r_addr, r_scratch1      /* addr < SKF_* */
        blt     bpf_error       /* cr0 = LT */
        .globl  sk_load_word_negative_offset
 sk_load_word_negative_offset:
@@ -189,7 +189,7 @@ sk_load_word_negative_offset:
 
 bpf_slow_path_half_neg:
        lis     r_scratch1,-32  /* SKF_LL_OFF */
-       cmpd    r_addr, r_scratch1      /* addr < SKF_* */
+       PPC_LCMP        r_addr, r_scratch1      /* addr < SKF_* */
        blt     bpf_error       /* cr0 = LT */
        .globl  sk_load_half_negative_offset
 sk_load_half_negative_offset:
@@ -199,7 +199,7 @@ sk_load_half_negative_offset:
 
 bpf_slow_path_byte_neg:
        lis     r_scratch1,-32  /* SKF_LL_OFF */
-       cmpd    r_addr, r_scratch1      /* addr < SKF_* */
+       PPC_LCMP        r_addr, r_scratch1      /* addr < SKF_* */
        blt     bpf_error       /* cr0 = LT */
        .globl  sk_load_byte_negative_offset
 sk_load_byte_negative_offset:
@@ -209,7 +209,7 @@ sk_load_byte_negative_offset:
 
 bpf_slow_path_byte_msh_neg:
        lis     r_scratch1,-32  /* SKF_LL_OFF */
-       cmpd    r_addr, r_scratch1      /* addr < SKF_* */
+       PPC_LCMP        r_addr, r_scratch1      /* addr < SKF_* */
        blt     bpf_error       /* cr0 = LT */
        .globl  sk_load_byte_msh_negative_offset
 sk_load_byte_msh_negative_offset:
@@ -221,7 +221,7 @@ sk_load_byte_msh_negative_offset:
 bpf_error_slow:
        /* fabricate a cr0 = lt */
        li      r_scratch1, -1
-       cmpdi   r_scratch1, 0
+       PPC_LCMPI       r_scratch1, 0
 bpf_error:
        /* Entered with cr0 = lt */
        li      r3, 0
index d1916b577f2c9a71c3fb3a5ee419925f070412d0..17cea18a09d32f103aa453c645a7324739c58ffe 100644 (file)
@@ -1,8 +1,9 @@
-/* bpf_jit_comp.c: BPF JIT compiler for PPC64
+/* bpf_jit_comp.c: BPF JIT compiler
  *
  * Copyright 2011 Matt Evans <matt@ozlabs.org>, IBM Corporation
  *
  * Based on the x86 BPF compiler, by Eric Dumazet (eric.dumazet@gmail.com)
+ * Ported to ppc32 by Denis Kirjanov <kda@linux-powerpc.org>
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License
@@ -36,11 +37,11 @@ static void bpf_jit_build_prologue(struct bpf_prog *fp, u32 *image,
                if (ctx->seen & SEEN_DATAREF) {
                        /* If we call any helpers (for loads), save LR */
                        EMIT(PPC_INST_MFLR | __PPC_RT(R0));
-                       PPC_STD(0, 1, 16);
+                       PPC_BPF_STL(0, 1, PPC_LR_STKOFF);
 
                        /* Back up non-volatile regs. */
-                       PPC_STD(r_D, 1, -(8*(32-r_D)));
-                       PPC_STD(r_HL, 1, -(8*(32-r_HL)));
+                       PPC_BPF_STL(r_D, 1, -(REG_SZ*(32-r_D)));
+                       PPC_BPF_STL(r_HL, 1, -(REG_SZ*(32-r_HL)));
                }
                if (ctx->seen & SEEN_MEM) {
                        /*
@@ -49,11 +50,10 @@ static void bpf_jit_build_prologue(struct bpf_prog *fp, u32 *image,
                         */
                        for (i = r_M; i < (r_M+16); i++) {
                                if (ctx->seen & (1 << (i-r_M)))
-                                       PPC_STD(i, 1, -(8*(32-i)));
+                                       PPC_BPF_STL(i, 1, -(REG_SZ*(32-i)));
                        }
                }
-               EMIT(PPC_INST_STDU | __PPC_RS(R1) | __PPC_RA(R1) |
-                    (-BPF_PPC_STACKFRAME & 0xfffc));
+               PPC_BPF_STLU(1, 1, -BPF_PPC_STACKFRAME);
        }
 
        if (ctx->seen & SEEN_DATAREF) {
@@ -67,7 +67,7 @@ static void bpf_jit_build_prologue(struct bpf_prog *fp, u32 *image,
                                                         data_len));
                PPC_LWZ_OFFS(r_HL, r_skb, offsetof(struct sk_buff, len));
                PPC_SUB(r_HL, r_HL, r_scratch1);
-               PPC_LD_OFFS(r_D, r_skb, offsetof(struct sk_buff, data));
+               PPC_LL_OFFS(r_D, r_skb, offsetof(struct sk_buff, data));
        }
 
        if (ctx->seen & SEEN_XREG) {
@@ -99,16 +99,16 @@ static void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx)
        if (ctx->seen & (SEEN_MEM | SEEN_DATAREF)) {
                PPC_ADDI(1, 1, BPF_PPC_STACKFRAME);
                if (ctx->seen & SEEN_DATAREF) {
-                       PPC_LD(0, 1, 16);
+                       PPC_BPF_LL(0, 1, PPC_LR_STKOFF);
                        PPC_MTLR(0);
-                       PPC_LD(r_D, 1, -(8*(32-r_D)));
-                       PPC_LD(r_HL, 1, -(8*(32-r_HL)));
+                       PPC_BPF_LL(r_D, 1, -(REG_SZ*(32-r_D)));
+                       PPC_BPF_LL(r_HL, 1, -(REG_SZ*(32-r_HL)));
                }
                if (ctx->seen & SEEN_MEM) {
                        /* Restore any saved non-vol registers */
                        for (i = r_M; i < (r_M+16); i++) {
                                if (ctx->seen & (1 << (i-r_M)))
-                                       PPC_LD(i, 1, -(8*(32-i)));
+                                       PPC_BPF_LL(i, 1, -(REG_SZ*(32-i)));
                        }
                }
        }
@@ -355,7 +355,7 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
                                                ifindex) != 4);
                        BUILD_BUG_ON(FIELD_SIZEOF(struct net_device,
                                                type) != 2);
-                       PPC_LD_OFFS(r_scratch1, r_skb, offsetof(struct sk_buff,
+                       PPC_LL_OFFS(r_scratch1, r_skb, offsetof(struct sk_buff,
                                                                dev));
                        PPC_CMPDI(r_scratch1, 0);
                        if (ctx->pc_ret0 != -1) {
@@ -411,20 +411,8 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
                        PPC_SRWI(r_A, r_A, 5);
                        break;
                case BPF_ANC | SKF_AD_CPU:
-#ifdef CONFIG_SMP
-                       /*
-                        * PACA ptr is r13:
-                        * raw_smp_processor_id() = local_paca->paca_index
-                        */
-                       BUILD_BUG_ON(FIELD_SIZEOF(struct paca_struct,
-                                                 paca_index) != 2);
-                       PPC_LHZ_OFFS(r_A, 13,
-                                    offsetof(struct paca_struct, paca_index));
-#else
-                       PPC_LI(r_A, 0);
-#endif
+                       PPC_BPF_LOAD_CPU(r_A);
                        break;
-
                        /*** Absolute loads from packet header/data ***/
                case BPF_LD | BPF_W | BPF_ABS:
                        func = CHOOSE_LOAD_FUNC(K, sk_load_word);
@@ -437,7 +425,7 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
                common_load:
                        /* Load from [K]. */
                        ctx->seen |= SEEN_DATAREF;
-                       PPC_LI64(r_scratch1, func);
+                       PPC_FUNC_ADDR(r_scratch1, func);
                        PPC_MTLR(r_scratch1);
                        PPC_LI32(r_addr, K);
                        PPC_BLRL();
@@ -463,7 +451,7 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
                         * in the helper functions.
                         */
                        ctx->seen |= SEEN_DATAREF | SEEN_XREG;
-                       PPC_LI64(r_scratch1, func);
+                       PPC_FUNC_ADDR(r_scratch1, func);
                        PPC_MTLR(r_scratch1);
                        PPC_ADDI(r_addr, r_X, IMM_L(K));
                        if (K >= 32768)
@@ -685,9 +673,11 @@ void bpf_jit_compile(struct bpf_prog *fp)
 
        if (image) {
                bpf_flush_icache(code_base, code_base + (proglen/4));
+#ifdef CONFIG_PPC64
                /* Function descriptor nastiness: Address + TOC */
                ((u64 *)image)[0] = (u64)code_base;
                ((u64 *)image)[1] = local_paca->kernel_toc;
+#endif
                fp->bpf_func = (void *)image;
                fp->jited = true;
        }
index 99824ff8dd354e74ff421a2c9bb59243e045d541..3f5c799b7fb53aecc3af6cf8b877705cb0e8bd0e 100644 (file)
@@ -21,7 +21,7 @@
 #include <linux/module.h>
 #include <linux/seq_file.h>
 #include <linux/mount.h>
-#include <linux/aio.h>
+#include <linux/uio.h>
 #include <asm/ebcdic.h>
 #include "hypfs.h"
 
@@ -437,8 +437,6 @@ struct dentry *hypfs_create_str(struct dentry *dir,
 static const struct file_operations hypfs_file_ops = {
        .open           = hypfs_open,
        .release        = hypfs_release,
-       .read           = new_sync_read,
-       .write          = new_sync_write,
        .read_iter      = hypfs_read_iter,
        .write_iter     = hypfs_write_iter,
        .llseek         = no_llseek,
index 343ea7c987aa196911df1dd0967e5aee099ea5d5..ff95d15a2384760bab3be77bab26eda0ab848a09 100644 (file)
@@ -57,7 +57,6 @@ enum interruption_class {
        IRQIO_TAP,
        IRQIO_VMR,
        IRQIO_LCS,
-       IRQIO_CLW,
        IRQIO_CTC,
        IRQIO_APB,
        IRQIO_ADM,
index f238720690f3c2d8c70feb80f0cce76e514bcd97..0220e7d3c629018b238b47f2eb77baf94a12d872 100644 (file)
@@ -79,7 +79,6 @@ static const struct irq_class irqclass_sub_desc[NR_ARCH_IRQS] = {
        {.irq = IRQIO_TAP,  .name = "TAP", .desc = "[I/O] Tape"},
        {.irq = IRQIO_VMR,  .name = "VMR", .desc = "[I/O] Unit Record Devices"},
        {.irq = IRQIO_LCS,  .name = "LCS", .desc = "[I/O] LCS"},
-       {.irq = IRQIO_CLW,  .name = "CLW", .desc = "[I/O] CLAW"},
        {.irq = IRQIO_CTC,  .name = "CTC", .desc = "[I/O] CTC"},
        {.irq = IRQIO_APB,  .name = "APB", .desc = "[I/O] AP Bus"},
        {.irq = IRQIO_ADM,  .name = "ADM", .desc = "[I/O] EADM Subchannel"},
index 0b34f2a704fe1d2fdcf03b49fe37d73a7725ecf7..97292890b51bc4d36121cfa35fde08c79e3260f5 100644 (file)
@@ -329,8 +329,6 @@ static int setup_frame(struct ksignal *ksig, sigset_t *set,
        if (err)
                return -EFAULT;
 
-       set_fs(USER_DS);
-
        pr_debug("SIG deliver (%s:%d): sp=%p pc=%08lx pr=%08lx\n",
                 current->comm, task_pid_nr(current), frame, regs->pc, regs->pr);
 
@@ -408,8 +406,6 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
        if (err)
                return -EFAULT;
 
-       set_fs(USER_DS);
-
        pr_debug("SIG deliver (%s:%d): sp=%p pc=%08lx pr=%08lx\n",
                 current->comm, task_pid_nr(current), frame, regs->pc, regs->pr);
 
index 71993c6a7d94b0f6da895f78d96176de5aa31e14..0462995d4d7f66ed9531948aaf0d3ef9bdd63b63 100644 (file)
@@ -457,8 +457,6 @@ static int setup_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs
 
        regs->pc = neff_sign_extend((unsigned long)ksig->ka.sa.sa_handler);
 
-       set_fs(USER_DS);
-
        /* Broken %016Lx */
        pr_debug("SIG deliver (#%d,%s:%d): sp=%p pc=%08Lx%08Lx link=%08Lx%08Lx\n",
                 signal, current->comm, current->pid, frame,
@@ -547,8 +545,6 @@ static int setup_rt_frame(struct ksignal *kig, sigset_t *set,
        regs->regs[REG_ARG3] = (unsigned long long)(unsigned long)(signed long)&frame->uc.uc_mcontext;
        regs->pc = neff_sign_extend((unsigned long)ksig->ka.sa.sa_handler);
 
-       set_fs(USER_DS);
-
        pr_debug("SIG deliver (#%d,%s:%d): sp=%p pc=%08Lx%08Lx link=%08Lx%08Lx\n",
                 signal, current->comm, current->pid, frame,
                 regs->pc >> 32, regs->pc & 0xffffffff,
index 6f00e98506366f9e597377205443b414f92f6922..ee186e13dfe6fde92c9127aa07dccc474d1253d2 100644 (file)
@@ -456,7 +456,7 @@ int gxio_mpipe_equeue_init(gxio_mpipe_equeue_t *equeue,
 EXPORT_SYMBOL_GPL(gxio_mpipe_equeue_init);
 
 int gxio_mpipe_set_timestamp(gxio_mpipe_context_t *context,
-                            const struct timespec *ts)
+                            const struct timespec64 *ts)
 {
        cycles_t cycles = get_cycles();
        return gxio_mpipe_set_timestamp_aux(context, (uint64_t)ts->tv_sec,
@@ -466,7 +466,7 @@ int gxio_mpipe_set_timestamp(gxio_mpipe_context_t *context,
 EXPORT_SYMBOL_GPL(gxio_mpipe_set_timestamp);
 
 int gxio_mpipe_get_timestamp(gxio_mpipe_context_t *context,
-                            struct timespec *ts)
+                            struct timespec64 *ts)
 {
        int ret;
        cycles_t cycles_prev, cycles_now, clock_rate;
index e37cf4f0cffd42a14f3d392d9701ef8a4da746f5..73e83a187866602806cd771daba6fceaaf76bc9a 100644 (file)
@@ -1830,7 +1830,7 @@ extern int gxio_mpipe_link_set_attr(gxio_mpipe_link_t *link, uint32_t attr,
  *  code.
  */
 extern int gxio_mpipe_get_timestamp(gxio_mpipe_context_t *context,
-                                   struct timespec *ts);
+                                   struct timespec64 *ts);
 
 /* Set the timestamp of mPIPE.
  *
@@ -1840,7 +1840,7 @@ extern int gxio_mpipe_get_timestamp(gxio_mpipe_context_t *context,
  *  code.
  */
 extern int gxio_mpipe_set_timestamp(gxio_mpipe_context_t *context,
-                                   const struct timespec *ts);
+                                   const struct timespec64 *ts);
 
 /* Adjust the timestamp of mPIPE.
  *
index 498b6d967138b1fff29659e81813c77e70ff1f58..258990688a5e999557de7f3b5398d7eccdc45ebb 100644 (file)
@@ -212,11 +212,11 @@ static struct event_constraint intel_hsw_event_constraints[] = {
        INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
        INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
        /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
-       INTEL_EVENT_CONSTRAINT(0x08a3, 0x4),
+       INTEL_UEVENT_CONSTRAINT(0x08a3, 0x4),
        /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */
-       INTEL_EVENT_CONSTRAINT(0x0ca3, 0x4),
+       INTEL_UEVENT_CONSTRAINT(0x0ca3, 0x4),
        /* CYCLE_ACTIVITY.CYCLES_NO_EXECUTE */
-       INTEL_EVENT_CONSTRAINT(0x04a3, 0xf),
+       INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf),
        EVENT_CONSTRAINT_END
 };
 
@@ -1649,11 +1649,11 @@ intel_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event
        if (c)
                return c;
 
-       c = intel_pebs_constraints(event);
+       c = intel_shared_regs_constraints(cpuc, event);
        if (c)
                return c;
 
-       c = intel_shared_regs_constraints(cpuc, event);
+       c = intel_pebs_constraints(event);
        if (c)
                return c;
 
index 2babb393915e76dbeb8a1b757305b819fddd324b..f0095a76c18211813d711bfa52b82c916190f42d 100644 (file)
@@ -799,7 +799,21 @@ retint_swapgs:             /* return to user-space */
        cmpq %r11,(EFLAGS-ARGOFFSET)(%rsp)      /* R11 == RFLAGS */
        jne opportunistic_sysret_failed
 
-       testq $X86_EFLAGS_RF,%r11               /* sysret can't restore RF */
+       /*
+        * SYSRET can't restore RF.  SYSRET can restore TF, but unlike IRET,
+        * restoring TF results in a trap from userspace immediately after
+        * SYSRET.  This would cause an infinite loop whenever #DB happens
+        * with register state that satisfies the opportunistic SYSRET
+        * conditions.  For example, single-stepping this user code:
+        *
+        *           movq $stuck_here,%rcx
+        *           pushfq
+        *           popq %r11
+        *   stuck_here:
+        *
+        * would never get past 'stuck_here'.
+        */
+       testq $(X86_EFLAGS_RF|X86_EFLAGS_TF), %r11
        jnz opportunistic_sysret_failed
 
        /* nothing to check for RSP */
index 7ec1d5f8d28339bce0b74191a1d458c6c8e5d5df..25ecd56cefa8f22496153cf29b763c266bd8d91e 100644 (file)
@@ -72,7 +72,7 @@ struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] =
        { "bx", 8, offsetof(struct pt_regs, bx) },
        { "cx", 8, offsetof(struct pt_regs, cx) },
        { "dx", 8, offsetof(struct pt_regs, dx) },
-       { "si", 8, offsetof(struct pt_regs, dx) },
+       { "si", 8, offsetof(struct pt_regs, si) },
        { "di", 8, offsetof(struct pt_regs, di) },
        { "bp", 8, offsetof(struct pt_regs, bp) },
        { "sp", 8, offsetof(struct pt_regs, sp) },
index bae6c609888e7fdff25784d5bd96fd8dcd5ea88a..86db4bcd7ce52bcb74a5bf42efcd8e7152488cf1 100644 (file)
@@ -183,6 +183,16 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = {
                },
        },
 
+       /* ASRock */
+       {       /* Handle problems with rebooting on ASRock Q1900DC-ITX */
+               .callback = set_pci_reboot,
+               .ident = "ASRock Q1900DC-ITX",
+               .matches = {
+                       DMI_MATCH(DMI_BOARD_VENDOR, "ASRock"),
+                       DMI_MATCH(DMI_BOARD_NAME, "Q1900DC-ITX"),
+               },
+       },
+
        /* ASUS */
        {       /* Handle problems with rebooting on ASUS P4S800 */
                .callback = set_bios_reboot,
index 3d733ba16f28a2db8f6f4c91c61170a44ca7fd83..6b3790445cbed4c5d2ab7fb31cdc6bb6d3db25f8 100644 (file)
@@ -405,11 +405,6 @@ static int setup_frame(struct ksignal *ksig, sigset_t *set,
        regs->areg[8] = (unsigned long) &frame->uc;
        regs->threadptr = tp;
 
-       /* Set access mode to USER_DS.  Nomenclature is outdated, but
-        * functionality is used in uaccess.h
-        */
-       set_fs(USER_DS);
-
 #if DEBUG_SIG
        printk("SIG rt deliver (%s:%d): signal=%d sp=%p pc=%08x\n",
                current->comm, current->pid, signal, frame, regs->pc);
index b8d2725324a6b88391db4c8a5daf7a9c1309dd86..da310a1054299720d1b809d4ac3fd27af02b1e19 100644 (file)
@@ -124,10 +124,10 @@ int blk_rq_map_user(struct request_queue *q, struct request *rq,
 {
        struct iovec iov;
        struct iov_iter i;
+       int ret = import_single_range(rq_data_dir(rq), ubuf, len, &iov, &i);
 
-       iov.iov_base = ubuf;
-       iov.iov_len = len;
-       iov_iter_init(&i, rq_data_dir(rq), &iov, 1, len);
+       if (unlikely(ret < 0))
+               return ret;
 
        return blk_rq_map_user_iov(q, rq, map_data, &i, gfp_mask);
 }
index 6ed2cbe5e8c9ae340233b0491255dfed40947590..12600bfffca93f4547e2325eeda9669ff443a7a7 100644 (file)
@@ -585,7 +585,7 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
                                     b->physical_block_size);
 
        t->io_min = max(t->io_min, b->io_min);
-       t->io_opt = lcm(t->io_opt, b->io_opt);
+       t->io_opt = lcm_not_zero(t->io_opt, b->io_opt);
 
        t->cluster &= b->cluster;
        t->discard_zeroes_data &= b->discard_zeroes_data;
@@ -616,7 +616,7 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
                    b->raid_partial_stripes_expensive);
 
        /* Find lowest common alignment_offset */
-       t->alignment_offset = lcm(t->alignment_offset, alignment)
+       t->alignment_offset = lcm_not_zero(t->alignment_offset, alignment)
                % max(t->physical_block_size, t->io_min);
 
        /* Verify that new alignment_offset is on a logical block boundary */
@@ -643,7 +643,7 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
                                                      b->max_discard_sectors);
                t->discard_granularity = max(t->discard_granularity,
                                             b->discard_granularity);
-               t->discard_alignment = lcm(t->discard_alignment, alignment) %
+               t->discard_alignment = lcm_not_zero(t->discard_alignment, alignment) %
                        t->discard_granularity;
        }
 
index e1f71c3961934b9ed9adbf01247d196b05801750..55b6f15dac900af77a5ad7038cd98f3133d816a8 100644 (file)
@@ -335,16 +335,14 @@ static int sg_io(struct request_queue *q, struct gendisk *bd_disk,
                struct iov_iter i;
                struct iovec *iov = NULL;
 
-               ret = rw_copy_check_uvector(-1, hdr->dxferp, hdr->iovec_count,
-                                           0, NULL, &iov);
-               if (ret < 0) {
-                       kfree(iov);
+               ret = import_iovec(rq_data_dir(rq),
+                                  hdr->dxferp, hdr->iovec_count,
+                                  0, &iov, &i);
+               if (ret < 0)
                        goto out_free_cdb;
-               }
 
                /* SG_IO howto says that the shorter of the two wins */
-               iov_iter_init(&i, rq_data_dir(rq), iov, hdr->iovec_count,
-                             min_t(unsigned, ret, hdr->dxfer_len));
+               iov_iter_truncate(&i, hdr->dxfer_len);
 
                ret = blk_rq_map_user_iov(q, rq, NULL, &i, GFP_KERNEL);
                kfree(iov);
index 7f8b7edcadca3f64168e3451e52642a485a5d51f..f22cc56fd1b383f7ef37472808070e8e31120a11 100644 (file)
@@ -358,8 +358,8 @@ int af_alg_make_sg(struct af_alg_sgl *sgl, struct iov_iter *iter, int len)
        npages = (off + n + PAGE_SIZE - 1) >> PAGE_SHIFT;
        if (WARN_ON(npages == 0))
                return -EINVAL;
-
-       sg_init_table(sgl->sg, npages);
+       /* Add one extra for linking */
+       sg_init_table(sgl->sg, npages + 1);
 
        for (i = 0, len = n; i < npages; i++) {
                int plen = min_t(int, len, PAGE_SIZE - off);
@@ -369,18 +369,26 @@ int af_alg_make_sg(struct af_alg_sgl *sgl, struct iov_iter *iter, int len)
                off = 0;
                len -= plen;
        }
+       sg_mark_end(sgl->sg + npages - 1);
+       sgl->npages = npages;
+
        return n;
 }
 EXPORT_SYMBOL_GPL(af_alg_make_sg);
 
+void af_alg_link_sg(struct af_alg_sgl *sgl_prev, struct af_alg_sgl *sgl_new)
+{
+       sg_unmark_end(sgl_prev->sg + sgl_prev->npages - 1);
+       sg_chain(sgl_prev->sg, sgl_prev->npages + 1, sgl_new->sg);
+}
+EXPORT_SYMBOL_GPL(af_alg_link_sg);
+
 void af_alg_free_sg(struct af_alg_sgl *sgl)
 {
        int i;
 
-       i = 0;
-       do {
+       for (i = 0; i < sgl->npages; i++)
                put_page(sgl->pages[i]);
-       } while (!sg_is_last(sgl->sg + (i++)));
 }
 EXPORT_SYMBOL_GPL(af_alg_free_sg);
 
index 01da360bdb5510b78eac0ee43630795c4a011d76..1396ad0787fc6b84ebdd9a9d552ce7eaccf1e698 100644 (file)
@@ -34,8 +34,8 @@ struct hash_ctx {
        struct ahash_request req;
 };
 
-static int hash_sendmsg(struct kiocb *unused, struct socket *sock,
-                       struct msghdr *msg, size_t ignored)
+static int hash_sendmsg(struct socket *sock, struct msghdr *msg,
+                       size_t ignored)
 {
        int limit = ALG_MAX_PAGES * PAGE_SIZE;
        struct sock *sk = sock->sk;
@@ -56,8 +56,8 @@ static int hash_sendmsg(struct kiocb *unused, struct socket *sock,
 
        ctx->more = 0;
 
-       while (iov_iter_count(&msg->msg_iter)) {
-               int len = iov_iter_count(&msg->msg_iter);
+       while (msg_data_left(msg)) {
+               int len = msg_data_left(msg);
 
                if (len > limit)
                        len = limit;
@@ -139,8 +139,8 @@ unlock:
        return err ?: size;
 }
 
-static int hash_recvmsg(struct kiocb *unused, struct socket *sock,
-                       struct msghdr *msg, size_t len, int flags)
+static int hash_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
+                       int flags)
 {
        struct sock *sk = sock->sk;
        struct alg_sock *ask = alg_sk(sk);
index 67f612cfed97b3ac31c39b45c63afc97f0a675a9..3acba0a7cd551d2061f80c4e0d30002de792f6b3 100644 (file)
@@ -55,8 +55,8 @@ struct rng_ctx {
        struct crypto_rng *drng;
 };
 
-static int rng_recvmsg(struct kiocb *unused, struct socket *sock,
-                      struct msghdr *msg, size_t len, int flags)
+static int rng_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
+                      int flags)
 {
        struct sock *sk = sock->sk;
        struct alg_sock *ask = alg_sk(sk);
index 0c8a1e5ccadf7d1ca16d9da3bd1042e3ff724177..945075292bc9584e57f4612bb1b7549a8e9e9b22 100644 (file)
@@ -39,6 +39,7 @@ struct skcipher_ctx {
 
        struct af_alg_completion completion;
 
+       atomic_t inflight;
        unsigned used;
 
        unsigned int len;
@@ -49,9 +50,65 @@ struct skcipher_ctx {
        struct ablkcipher_request req;
 };
 
+struct skcipher_async_rsgl {
+       struct af_alg_sgl sgl;
+       struct list_head list;
+};
+
+struct skcipher_async_req {
+       struct kiocb *iocb;
+       struct skcipher_async_rsgl first_sgl;
+       struct list_head list;
+       struct scatterlist *tsg;
+       char iv[];
+};
+
+#define GET_SREQ(areq, ctx) (struct skcipher_async_req *)((char *)areq + \
+       crypto_ablkcipher_reqsize(crypto_ablkcipher_reqtfm(&ctx->req)))
+
+#define GET_REQ_SIZE(ctx) \
+       crypto_ablkcipher_reqsize(crypto_ablkcipher_reqtfm(&ctx->req))
+
+#define GET_IV_SIZE(ctx) \
+       crypto_ablkcipher_ivsize(crypto_ablkcipher_reqtfm(&ctx->req))
+
 #define MAX_SGL_ENTS ((4096 - sizeof(struct skcipher_sg_list)) / \
                      sizeof(struct scatterlist) - 1)
 
+static void skcipher_free_async_sgls(struct skcipher_async_req *sreq)
+{
+       struct skcipher_async_rsgl *rsgl, *tmp;
+       struct scatterlist *sgl;
+       struct scatterlist *sg;
+       int i, n;
+
+       list_for_each_entry_safe(rsgl, tmp, &sreq->list, list) {
+               af_alg_free_sg(&rsgl->sgl);
+               if (rsgl != &sreq->first_sgl)
+                       kfree(rsgl);
+       }
+       sgl = sreq->tsg;
+       n = sg_nents(sgl);
+       for_each_sg(sgl, sg, n, i)
+               put_page(sg_page(sg));
+
+       kfree(sreq->tsg);
+}
+
+static void skcipher_async_cb(struct crypto_async_request *req, int err)
+{
+       struct sock *sk = req->data;
+       struct alg_sock *ask = alg_sk(sk);
+       struct skcipher_ctx *ctx = ask->private;
+       struct skcipher_async_req *sreq = GET_SREQ(req, ctx);
+       struct kiocb *iocb = sreq->iocb;
+
+       atomic_dec(&ctx->inflight);
+       skcipher_free_async_sgls(sreq);
+       kfree(req);
+       iocb->ki_complete(iocb, err, err);
+}
+
 static inline int skcipher_sndbuf(struct sock *sk)
 {
        struct alg_sock *ask = alg_sk(sk);
@@ -96,7 +153,7 @@ static int skcipher_alloc_sgl(struct sock *sk)
        return 0;
 }
 
-static void skcipher_pull_sgl(struct sock *sk, int used)
+static void skcipher_pull_sgl(struct sock *sk, int used, int put)
 {
        struct alg_sock *ask = alg_sk(sk);
        struct skcipher_ctx *ctx = ask->private;
@@ -123,8 +180,8 @@ static void skcipher_pull_sgl(struct sock *sk, int used)
 
                        if (sg[i].length)
                                return;
-
-                       put_page(sg_page(sg + i));
+                       if (put)
+                               put_page(sg_page(sg + i));
                        sg_assign_page(sg + i, NULL);
                }
 
@@ -143,7 +200,7 @@ static void skcipher_free_sgl(struct sock *sk)
        struct alg_sock *ask = alg_sk(sk);
        struct skcipher_ctx *ctx = ask->private;
 
-       skcipher_pull_sgl(sk, ctx->used);
+       skcipher_pull_sgl(sk, ctx->used, 1);
 }
 
 static int skcipher_wait_for_wmem(struct sock *sk, unsigned flags)
@@ -239,8 +296,8 @@ static void skcipher_data_wakeup(struct sock *sk)
        rcu_read_unlock();
 }
 
-static int skcipher_sendmsg(struct kiocb *unused, struct socket *sock,
-                           struct msghdr *msg, size_t size)
+static int skcipher_sendmsg(struct socket *sock, struct msghdr *msg,
+                           size_t size)
 {
        struct sock *sk = sock->sk;
        struct alg_sock *ask = alg_sk(sk);
@@ -424,8 +481,153 @@ unlock:
        return err ?: size;
 }
 
-static int skcipher_recvmsg(struct kiocb *unused, struct socket *sock,
-                           struct msghdr *msg, size_t ignored, int flags)
+static int skcipher_all_sg_nents(struct skcipher_ctx *ctx)
+{
+       struct skcipher_sg_list *sgl;
+       struct scatterlist *sg;
+       int nents = 0;
+
+       list_for_each_entry(sgl, &ctx->tsgl, list) {
+               sg = sgl->sg;
+
+               while (!sg->length)
+                       sg++;
+
+               nents += sg_nents(sg);
+       }
+       return nents;
+}
+
+static int skcipher_recvmsg_async(struct socket *sock, struct msghdr *msg,
+                                 int flags)
+{
+       struct sock *sk = sock->sk;
+       struct alg_sock *ask = alg_sk(sk);
+       struct skcipher_ctx *ctx = ask->private;
+       struct skcipher_sg_list *sgl;
+       struct scatterlist *sg;
+       struct skcipher_async_req *sreq;
+       struct ablkcipher_request *req;
+       struct skcipher_async_rsgl *last_rsgl = NULL;
+       unsigned int txbufs = 0, len = 0, tx_nents = skcipher_all_sg_nents(ctx);
+       unsigned int reqlen = sizeof(struct skcipher_async_req) +
+                               GET_REQ_SIZE(ctx) + GET_IV_SIZE(ctx);
+       int err = -ENOMEM;
+       bool mark = false;
+
+       lock_sock(sk);
+       req = kmalloc(reqlen, GFP_KERNEL);
+       if (unlikely(!req))
+               goto unlock;
+
+       sreq = GET_SREQ(req, ctx);
+       sreq->iocb = msg->msg_iocb;
+       memset(&sreq->first_sgl, '\0', sizeof(struct skcipher_async_rsgl));
+       INIT_LIST_HEAD(&sreq->list);
+       sreq->tsg = kcalloc(tx_nents, sizeof(*sg), GFP_KERNEL);
+       if (unlikely(!sreq->tsg)) {
+               kfree(req);
+               goto unlock;
+       }
+       sg_init_table(sreq->tsg, tx_nents);
+       memcpy(sreq->iv, ctx->iv, GET_IV_SIZE(ctx));
+       ablkcipher_request_set_tfm(req, crypto_ablkcipher_reqtfm(&ctx->req));
+       ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+                                       skcipher_async_cb, sk);
+
+       while (iov_iter_count(&msg->msg_iter)) {
+               struct skcipher_async_rsgl *rsgl;
+               int used;
+
+               if (!ctx->used) {
+                       err = skcipher_wait_for_data(sk, flags);
+                       if (err)
+                               goto free;
+               }
+               sgl = list_first_entry(&ctx->tsgl,
+                                      struct skcipher_sg_list, list);
+               sg = sgl->sg;
+
+               while (!sg->length)
+                       sg++;
+
+               used = min_t(unsigned long, ctx->used,
+                            iov_iter_count(&msg->msg_iter));
+               used = min_t(unsigned long, used, sg->length);
+
+               if (txbufs == tx_nents) {
+                       struct scatterlist *tmp;
+                       int x;
+                       /* Ran out of tx slots in async request
+                        * need to expand */
+                       tmp = kcalloc(tx_nents * 2, sizeof(*tmp),
+                                     GFP_KERNEL);
+                       if (!tmp)
+                               goto free;
+
+                       sg_init_table(tmp, tx_nents * 2);
+                       for (x = 0; x < tx_nents; x++)
+                               sg_set_page(&tmp[x], sg_page(&sreq->tsg[x]),
+                                           sreq->tsg[x].length,
+                                           sreq->tsg[x].offset);
+                       kfree(sreq->tsg);
+                       sreq->tsg = tmp;
+                       tx_nents *= 2;
+                       mark = true;
+               }
+               /* Need to take over the tx sgl from ctx
+                * to the asynch req - these sgls will be freed later */
+               sg_set_page(sreq->tsg + txbufs++, sg_page(sg), sg->length,
+                           sg->offset);
+
+               if (list_empty(&sreq->list)) {
+                       rsgl = &sreq->first_sgl;
+                       list_add_tail(&rsgl->list, &sreq->list);
+               } else {
+                       rsgl = kmalloc(sizeof(*rsgl), GFP_KERNEL);
+                       if (!rsgl) {
+                               err = -ENOMEM;
+                               goto free;
+                       }
+                       list_add_tail(&rsgl->list, &sreq->list);
+               }
+
+               used = af_alg_make_sg(&rsgl->sgl, &msg->msg_iter, used);
+               err = used;
+               if (used < 0)
+                       goto free;
+               if (last_rsgl)
+                       af_alg_link_sg(&last_rsgl->sgl, &rsgl->sgl);
+
+               last_rsgl = rsgl;
+               len += used;
+               skcipher_pull_sgl(sk, used, 0);
+               iov_iter_advance(&msg->msg_iter, used);
+       }
+
+       if (mark)
+               sg_mark_end(sreq->tsg + txbufs - 1);
+
+       ablkcipher_request_set_crypt(req, sreq->tsg, sreq->first_sgl.sgl.sg,
+                                    len, sreq->iv);
+       err = ctx->enc ? crypto_ablkcipher_encrypt(req) :
+                        crypto_ablkcipher_decrypt(req);
+       if (err == -EINPROGRESS) {
+               atomic_inc(&ctx->inflight);
+               err = -EIOCBQUEUED;
+               goto unlock;
+       }
+free:
+       skcipher_free_async_sgls(sreq);
+       kfree(req);
+unlock:
+       skcipher_wmem_wakeup(sk);
+       release_sock(sk);
+       return err;
+}
+
+static int skcipher_recvmsg_sync(struct socket *sock, struct msghdr *msg,
+                                int flags)
 {
        struct sock *sk = sock->sk;
        struct alg_sock *ask = alg_sk(sk);
@@ -439,7 +641,7 @@ static int skcipher_recvmsg(struct kiocb *unused, struct socket *sock,
        long copied = 0;
 
        lock_sock(sk);
-       while (iov_iter_count(&msg->msg_iter)) {
+       while (msg_data_left(msg)) {
                sgl = list_first_entry(&ctx->tsgl,
                                       struct skcipher_sg_list, list);
                sg = sgl->sg;
@@ -453,7 +655,7 @@ static int skcipher_recvmsg(struct kiocb *unused, struct socket *sock,
                                goto unlock;
                }
 
-               used = min_t(unsigned long, ctx->used, iov_iter_count(&msg->msg_iter));
+               used = min_t(unsigned long, ctx->used, msg_data_left(msg));
 
                used = af_alg_make_sg(&ctx->rsgl, &msg->msg_iter, used);
                err = used;
@@ -484,7 +686,7 @@ free:
                        goto unlock;
 
                copied += used;
-               skcipher_pull_sgl(sk, used);
+               skcipher_pull_sgl(sk, used, 1);
                iov_iter_advance(&msg->msg_iter, used);
        }
 
@@ -497,6 +699,13 @@ unlock:
        return copied ?: err;
 }
 
+static int skcipher_recvmsg(struct socket *sock, struct msghdr *msg,
+                           size_t ignored, int flags)
+{
+       return (msg->msg_iocb && !is_sync_kiocb(msg->msg_iocb)) ?
+               skcipher_recvmsg_async(sock, msg, flags) :
+               skcipher_recvmsg_sync(sock, msg, flags);
+}
 
 static unsigned int skcipher_poll(struct file *file, struct socket *sock,
                                  poll_table *wait)
@@ -555,12 +764,25 @@ static int skcipher_setkey(void *private, const u8 *key, unsigned int keylen)
        return crypto_ablkcipher_setkey(private, key, keylen);
 }
 
+static void skcipher_wait(struct sock *sk)
+{
+       struct alg_sock *ask = alg_sk(sk);
+       struct skcipher_ctx *ctx = ask->private;
+       int ctr = 0;
+
+       while (atomic_read(&ctx->inflight) && ctr++ < 100)
+               msleep(100);
+}
+
 static void skcipher_sock_destruct(struct sock *sk)
 {
        struct alg_sock *ask = alg_sk(sk);
        struct skcipher_ctx *ctx = ask->private;
        struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(&ctx->req);
 
+       if (atomic_read(&ctx->inflight))
+               skcipher_wait(sk);
+
        skcipher_free_sgl(sk);
        sock_kzfree_s(sk, ctx->iv, crypto_ablkcipher_ivsize(tfm));
        sock_kfree_s(sk, ctx, ctx->len);
@@ -592,6 +814,7 @@ static int skcipher_accept_parent(void *private, struct sock *sk)
        ctx->more = 0;
        ctx->merge = 0;
        ctx->enc = 0;
+       atomic_set(&ctx->inflight, 0);
        af_alg_init_completion(&ctx->completion);
 
        ask->private = ctx;
index b7e1cc0a97c86aac6c47cb5d459f63313a9beb45..ddc4ceb85fc560d4c0295f86e44f6a6e7457995b 100644 (file)
@@ -73,9 +73,6 @@
 #undef GENERAL_DEBUG
 #undef EXTRA_DEBUG
 
-#undef NS_USE_DESTRUCTORS      /* For now keep this undefined unless you know
-                                  you're going to use only raw ATM */
-
 /* Do not touch these */
 
 #ifdef TX_DEBUG
@@ -138,11 +135,6 @@ static void process_tsq(ns_dev * card);
 static void drain_scq(ns_dev * card, scq_info * scq, int pos);
 static void process_rsq(ns_dev * card);
 static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe);
-#ifdef NS_USE_DESTRUCTORS
-static void ns_sb_destructor(struct sk_buff *sb);
-static void ns_lb_destructor(struct sk_buff *lb);
-static void ns_hb_destructor(struct sk_buff *hb);
-#endif /* NS_USE_DESTRUCTORS */
 static void recycle_rx_buf(ns_dev * card, struct sk_buff *skb);
 static void recycle_iovec_rx_bufs(ns_dev * card, struct iovec *iov, int count);
 static void recycle_iov_buf(ns_dev * card, struct sk_buff *iovb);
@@ -2169,9 +2161,6 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
                        } else {
                                skb_put(skb, len);
                                dequeue_sm_buf(card, skb);
-#ifdef NS_USE_DESTRUCTORS
-                               skb->destructor = ns_sb_destructor;
-#endif /* NS_USE_DESTRUCTORS */
                                ATM_SKB(skb)->vcc = vcc;
                                __net_timestamp(skb);
                                vcc->push(vcc, skb);
@@ -2190,9 +2179,6 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
                                } else {
                                        skb_put(sb, len);
                                        dequeue_sm_buf(card, sb);
-#ifdef NS_USE_DESTRUCTORS
-                                       sb->destructor = ns_sb_destructor;
-#endif /* NS_USE_DESTRUCTORS */
                                        ATM_SKB(sb)->vcc = vcc;
                                        __net_timestamp(sb);
                                        vcc->push(vcc, sb);
@@ -2208,9 +2194,6 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
                                        atomic_inc(&vcc->stats->rx_drop);
                                } else {
                                        dequeue_lg_buf(card, skb);
-#ifdef NS_USE_DESTRUCTORS
-                                       skb->destructor = ns_lb_destructor;
-#endif /* NS_USE_DESTRUCTORS */
                                        skb_push(skb, NS_SMBUFSIZE);
                                        skb_copy_from_linear_data(sb, skb->data,
                                                                  NS_SMBUFSIZE);
@@ -2322,9 +2305,6 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
                                             card->index);
 #endif /* EXTRA_DEBUG */
                                ATM_SKB(hb)->vcc = vcc;
-#ifdef NS_USE_DESTRUCTORS
-                               hb->destructor = ns_hb_destructor;
-#endif /* NS_USE_DESTRUCTORS */
                                __net_timestamp(hb);
                                vcc->push(vcc, hb);
                                atomic_inc(&vcc->stats->rx);
@@ -2337,68 +2317,6 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
 
 }
 
-#ifdef NS_USE_DESTRUCTORS
-
-static void ns_sb_destructor(struct sk_buff *sb)
-{
-       ns_dev *card;
-       u32 stat;
-
-       card = (ns_dev *) ATM_SKB(sb)->vcc->dev->dev_data;
-       stat = readl(card->membase + STAT);
-       card->sbfqc = ns_stat_sfbqc_get(stat);
-       card->lbfqc = ns_stat_lfbqc_get(stat);
-
-       do {
-               sb = __dev_alloc_skb(NS_SMSKBSIZE, GFP_KERNEL);
-               if (sb == NULL)
-                       break;
-               NS_PRV_BUFTYPE(sb) = BUF_SM;
-               skb_queue_tail(&card->sbpool.queue, sb);
-               skb_reserve(sb, NS_AAL0_HEADER);
-               push_rxbufs(card, sb);
-       } while (card->sbfqc < card->sbnr.min);
-}
-
-static void ns_lb_destructor(struct sk_buff *lb)
-{
-       ns_dev *card;
-       u32 stat;
-
-       card = (ns_dev *) ATM_SKB(lb)->vcc->dev->dev_data;
-       stat = readl(card->membase + STAT);
-       card->sbfqc = ns_stat_sfbqc_get(stat);
-       card->lbfqc = ns_stat_lfbqc_get(stat);
-
-       do {
-               lb = __dev_alloc_skb(NS_LGSKBSIZE, GFP_KERNEL);
-               if (lb == NULL)
-                       break;
-               NS_PRV_BUFTYPE(lb) = BUF_LG;
-               skb_queue_tail(&card->lbpool.queue, lb);
-               skb_reserve(lb, NS_SMBUFSIZE);
-               push_rxbufs(card, lb);
-       } while (card->lbfqc < card->lbnr.min);
-}
-
-static void ns_hb_destructor(struct sk_buff *hb)
-{
-       ns_dev *card;
-
-       card = (ns_dev *) ATM_SKB(hb)->vcc->dev->dev_data;
-
-       while (card->hbpool.count < card->hbnr.init) {
-               hb = __dev_alloc_skb(NS_HBUFSIZE, GFP_KERNEL);
-               if (hb == NULL)
-                       break;
-               NS_PRV_BUFTYPE(hb) = BUF_NONE;
-               skb_queue_tail(&card->hbpool.queue, hb);
-               card->hbpool.count++;
-       }
-}
-
-#endif /* NS_USE_DESTRUCTORS */
-
 static void recycle_rx_buf(ns_dev * card, struct sk_buff *skb)
 {
        if (unlikely(NS_PRV_BUFTYPE(skb) == BUF_NONE)) {
@@ -2427,9 +2345,6 @@ static void recycle_iov_buf(ns_dev * card, struct sk_buff *iovb)
 static void dequeue_sm_buf(ns_dev * card, struct sk_buff *sb)
 {
        skb_unlink(sb, &card->sbpool.queue);
-#ifdef NS_USE_DESTRUCTORS
-       if (card->sbfqc < card->sbnr.min)
-#else
        if (card->sbfqc < card->sbnr.init) {
                struct sk_buff *new_sb;
                if ((new_sb = dev_alloc_skb(NS_SMSKBSIZE)) != NULL) {
@@ -2440,7 +2355,6 @@ static void dequeue_sm_buf(ns_dev * card, struct sk_buff *sb)
                }
        }
        if (card->sbfqc < card->sbnr.init)
-#endif /* NS_USE_DESTRUCTORS */
        {
                struct sk_buff *new_sb;
                if ((new_sb = dev_alloc_skb(NS_SMSKBSIZE)) != NULL) {
@@ -2455,9 +2369,6 @@ static void dequeue_sm_buf(ns_dev * card, struct sk_buff *sb)
 static void dequeue_lg_buf(ns_dev * card, struct sk_buff *lb)
 {
        skb_unlink(lb, &card->lbpool.queue);
-#ifdef NS_USE_DESTRUCTORS
-       if (card->lbfqc < card->lbnr.min)
-#else
        if (card->lbfqc < card->lbnr.init) {
                struct sk_buff *new_lb;
                if ((new_lb = dev_alloc_skb(NS_LGSKBSIZE)) != NULL) {
@@ -2468,7 +2379,6 @@ static void dequeue_lg_buf(ns_dev * card, struct sk_buff *lb)
                }
        }
        if (card->lbfqc < card->lbnr.init)
-#endif /* NS_USE_DESTRUCTORS */
        {
                struct sk_buff *new_lb;
                if ((new_lb = dev_alloc_skb(NS_LGSKBSIZE)) != NULL) {
index 0ee48be23837e2e6655f137d047a27dc7c3df60d..fc6ffcfa80618efded04b72b1501ca505683a3b0 100644 (file)
@@ -26,6 +26,7 @@ config BCMA_HOST_PCI_POSSIBLE
 config BCMA_HOST_PCI
        bool "Support for BCMA on PCI-host bus"
        depends on BCMA_HOST_PCI_POSSIBLE
+       select BCMA_DRIVER_PCI
        default y
 
 config BCMA_DRIVER_PCI_HOSTMODE
@@ -44,6 +45,22 @@ config BCMA_HOST_SOC
 
          If unsure, say N
 
+config BCMA_DRIVER_PCI
+       bool "BCMA Broadcom PCI core driver"
+       depends on BCMA && PCI
+       default y
+       help
+         BCMA bus may have many versions of PCIe core. This driver
+         supports:
+         1) PCIe core working in clientmode
+         2) PCIe Gen 2 clientmode core
+
+         In general PCIe (Gen 2) clientmode core is required on PCIe
+         hosted buses. It's responsible for initialization and basic
+         hardware management.
+         This driver is also prerequisite for a hostmode PCIe core
+         support.
+
 config BCMA_DRIVER_MIPS
        bool "BCMA Broadcom MIPS core driver"
        depends on BCMA && MIPS
index 838b4b9d352ffc1d6c7790426bde8502712cc8dc..f32af9b76bcd2aaa7e0da61e26b2e33174803f7f 100644 (file)
@@ -3,8 +3,8 @@ bcma-y                                  += driver_chipcommon.o driver_chipcommon_pmu.o
 bcma-y                                 += driver_chipcommon_b.o
 bcma-$(CONFIG_BCMA_SFLASH)             += driver_chipcommon_sflash.o
 bcma-$(CONFIG_BCMA_NFLASH)             += driver_chipcommon_nflash.o
-bcma-y                                 += driver_pci.o
-bcma-y                                 += driver_pcie2.o
+bcma-$(CONFIG_BCMA_DRIVER_PCI)         += driver_pci.o
+bcma-$(CONFIG_BCMA_DRIVER_PCI)         += driver_pcie2.o
 bcma-$(CONFIG_BCMA_DRIVER_PCI_HOSTMODE)        += driver_pci_host.o
 bcma-$(CONFIG_BCMA_DRIVER_MIPS)                += driver_mips.o
 bcma-$(CONFIG_BCMA_DRIVER_GMAC_CMN)    += driver_gmac_cmn.o
index ac6c5fca906d015d585f35fd9b57dc6f68d9d70b..15f2b2e242ea76b9ed882383f51a976a72d7b325 100644 (file)
@@ -26,6 +26,7 @@ bool bcma_wait_value(struct bcma_device *core, u16 reg, u32 mask, u32 value,
                     int timeout);
 void bcma_prepare_core(struct bcma_bus *bus, struct bcma_device *core);
 void bcma_init_bus(struct bcma_bus *bus);
+void bcma_unregister_cores(struct bcma_bus *bus);
 int bcma_bus_register(struct bcma_bus *bus);
 void bcma_bus_unregister(struct bcma_bus *bus);
 int __init bcma_bus_early_register(struct bcma_bus *bus);
@@ -42,6 +43,9 @@ int bcma_bus_scan(struct bcma_bus *bus);
 int bcma_sprom_get(struct bcma_bus *bus);
 
 /* driver_chipcommon.c */
+void bcma_core_chipcommon_early_init(struct bcma_drv_cc *cc);
+void bcma_core_chipcommon_init(struct bcma_drv_cc *cc);
+void bcma_chipco_bcm4331_ext_pa_lines_ctl(struct bcma_drv_cc *cc, bool enable);
 #ifdef CONFIG_BCMA_DRIVER_MIPS
 void bcma_chipco_serial_init(struct bcma_drv_cc *cc);
 extern struct platform_device bcma_pflash_dev;
@@ -52,6 +56,8 @@ int bcma_core_chipcommon_b_init(struct bcma_drv_cc_b *ccb);
 void bcma_core_chipcommon_b_free(struct bcma_drv_cc_b *ccb);
 
 /* driver_chipcommon_pmu.c */
+void bcma_pmu_early_init(struct bcma_drv_cc *cc);
+void bcma_pmu_init(struct bcma_drv_cc *cc);
 u32 bcma_pmu_get_alp_clock(struct bcma_drv_cc *cc);
 u32 bcma_pmu_get_cpu_clock(struct bcma_drv_cc *cc);
 
@@ -100,7 +106,35 @@ static inline void __exit bcma_host_soc_unregister_driver(void)
 #endif /* CONFIG_BCMA_HOST_SOC && CONFIG_OF */
 
 /* driver_pci.c */
+#ifdef CONFIG_BCMA_DRIVER_PCI
 u32 bcma_pcie_read(struct bcma_drv_pci *pc, u32 address);
+void bcma_core_pci_early_init(struct bcma_drv_pci *pc);
+void bcma_core_pci_init(struct bcma_drv_pci *pc);
+void bcma_core_pci_up(struct bcma_drv_pci *pc);
+void bcma_core_pci_down(struct bcma_drv_pci *pc);
+#else
+static inline void bcma_core_pci_early_init(struct bcma_drv_pci *pc)
+{
+       WARN_ON(pc->core->bus->hosttype == BCMA_HOSTTYPE_PCI);
+}
+static inline void bcma_core_pci_init(struct bcma_drv_pci *pc)
+{
+       /* Initialization is required for PCI hosted bus */
+       WARN_ON(pc->core->bus->hosttype == BCMA_HOSTTYPE_PCI);
+}
+#endif
+
+/* driver_pcie2.c */
+#ifdef CONFIG_BCMA_DRIVER_PCI
+void bcma_core_pcie2_init(struct bcma_drv_pcie2 *pcie2);
+void bcma_core_pcie2_up(struct bcma_drv_pcie2 *pcie2);
+#else
+static inline void bcma_core_pcie2_init(struct bcma_drv_pcie2 *pcie2)
+{
+       /* Initialization is required for PCI hosted bus */
+       WARN_ON(pcie2->core->bus->hosttype == BCMA_HOSTTYPE_PCI);
+}
+#endif
 
 extern int bcma_chipco_watchdog_register(struct bcma_drv_cc *cc);
 
@@ -117,6 +151,39 @@ static inline void bcma_core_pci_hostmode_init(struct bcma_drv_pci *pc)
 }
 #endif /* CONFIG_BCMA_DRIVER_PCI_HOSTMODE */
 
+/**************************************************
+ * driver_mips.c
+ **************************************************/
+
+#ifdef CONFIG_BCMA_DRIVER_MIPS
+unsigned int bcma_core_mips_irq(struct bcma_device *dev);
+void bcma_core_mips_early_init(struct bcma_drv_mips *mcore);
+void bcma_core_mips_init(struct bcma_drv_mips *mcore);
+#else
+static inline unsigned int bcma_core_mips_irq(struct bcma_device *dev)
+{
+       return 0;
+}
+static inline void bcma_core_mips_early_init(struct bcma_drv_mips *mcore)
+{
+}
+static inline void bcma_core_mips_init(struct bcma_drv_mips *mcore)
+{
+}
+#endif
+
+/**************************************************
+ * driver_gmac_cmn.c
+ **************************************************/
+
+#ifdef CONFIG_BCMA_DRIVER_GMAC_CMN
+void bcma_core_gmac_cmn_init(struct bcma_drv_gmac_cmn *gc);
+#else
+static inline void bcma_core_gmac_cmn_init(struct bcma_drv_gmac_cmn *gc)
+{
+}
+#endif
+
 #ifdef CONFIG_BCMA_DRIVER_GPIO
 /* driver_gpio.c */
 int bcma_gpio_init(struct bcma_drv_cc *cc);
index 598a6cd9028a70a239f283101d8f65a33e9fdf77..74ccb02e0f10c8c52a1db7ab3948f5d697b3cddc 100644 (file)
@@ -17,6 +17,8 @@
 
 #include "bcma_private.h"
 
+#define BCMA_GPIO_MAX_PINS     32
+
 static inline struct bcma_drv_cc *bcma_gpio_get_cc(struct gpio_chip *chip)
 {
        return container_of(chip, struct bcma_drv_cc, gpio);
@@ -76,7 +78,7 @@ static void bcma_gpio_free(struct gpio_chip *chip, unsigned gpio)
        bcma_chipco_gpio_pullup(cc, 1 << gpio, 0);
 }
 
-#if IS_BUILTIN(CONFIG_BCM47XX)
+#if IS_BUILTIN(CONFIG_BCM47XX) || IS_BUILTIN(CONFIG_ARCH_BCM_5301X)
 static int bcma_gpio_to_irq(struct gpio_chip *chip, unsigned gpio)
 {
        struct bcma_drv_cc *cc = bcma_gpio_get_cc(chip);
@@ -204,6 +206,7 @@ static void bcma_gpio_irq_domain_exit(struct bcma_drv_cc *cc)
 
 int bcma_gpio_init(struct bcma_drv_cc *cc)
 {
+       struct bcma_bus *bus = cc->core->bus;
        struct gpio_chip *chip = &cc->gpio;
        int err;
 
@@ -215,14 +218,14 @@ int bcma_gpio_init(struct bcma_drv_cc *cc)
        chip->set               = bcma_gpio_set_value;
        chip->direction_input   = bcma_gpio_direction_input;
        chip->direction_output  = bcma_gpio_direction_output;
-#if IS_BUILTIN(CONFIG_BCM47XX)
+#if IS_BUILTIN(CONFIG_BCM47XX) || IS_BUILTIN(CONFIG_ARCH_BCM_5301X)
        chip->to_irq            = bcma_gpio_to_irq;
 #endif
 #if IS_BUILTIN(CONFIG_OF)
        if (cc->core->bus->hosttype == BCMA_HOSTTYPE_SOC)
                chip->of_node   = cc->core->dev.of_node;
 #endif
-       switch (cc->core->bus->chipinfo.id) {
+       switch (bus->chipinfo.id) {
        case BCMA_CHIP_ID_BCM5357:
        case BCMA_CHIP_ID_BCM53572:
                chip->ngpio     = 32;
@@ -231,13 +234,17 @@ int bcma_gpio_init(struct bcma_drv_cc *cc)
                chip->ngpio     = 16;
        }
 
-       /* There is just one SoC in one device and its GPIO addresses should be
-        * deterministic to address them more easily. The other buses could get
-        * a random base number. */
-       if (cc->core->bus->hosttype == BCMA_HOSTTYPE_SOC)
-               chip->base              = 0;
-       else
-               chip->base              = -1;
+       /*
+        * On MIPS we register GPIO devices (LEDs, buttons) using absolute GPIO
+        * pin numbers. We don't have Device Tree there and we can't really use
+        * relative (per chip) numbers.
+        * So let's use predictable base for BCM47XX and "random" for all other.
+        */
+#if IS_BUILTIN(CONFIG_BCM47XX)
+       chip->base              = bus->num * BCMA_GPIO_MAX_PINS;
+#else
+       chip->base              = -1;
+#endif
 
        err = bcma_gpio_irq_domain_init(cc);
        if (err)
index 786666488a2dc2f7d0d076fa44096656501493a8..f499a469e66d08d7dfcb50e123c82fc920f32ea9 100644 (file)
@@ -282,39 +282,6 @@ void bcma_core_pci_power_save(struct bcma_bus *bus, bool up)
 }
 EXPORT_SYMBOL_GPL(bcma_core_pci_power_save);
 
-int bcma_core_pci_irq_ctl(struct bcma_drv_pci *pc, struct bcma_device *core,
-                         bool enable)
-{
-       struct pci_dev *pdev;
-       u32 coremask, tmp;
-       int err = 0;
-
-       if (!pc || core->bus->hosttype != BCMA_HOSTTYPE_PCI) {
-               /* This bcma device is not on a PCI host-bus. So the IRQs are
-                * not routed through the PCI core.
-                * So we must not enable routing through the PCI core. */
-               goto out;
-       }
-
-       pdev = pc->core->bus->host_pci;
-
-       err = pci_read_config_dword(pdev, BCMA_PCI_IRQMASK, &tmp);
-       if (err)
-               goto out;
-
-       coremask = BIT(core->core_index) << 8;
-       if (enable)
-               tmp |= coremask;
-       else
-               tmp &= ~coremask;
-
-       err = pci_write_config_dword(pdev, BCMA_PCI_IRQMASK, tmp);
-
-out:
-       return err;
-}
-EXPORT_SYMBOL_GPL(bcma_core_pci_irq_ctl);
-
 static void bcma_core_pci_extend_L1timer(struct bcma_drv_pci *pc, bool extend)
 {
        u32 w;
@@ -328,28 +295,12 @@ static void bcma_core_pci_extend_L1timer(struct bcma_drv_pci *pc, bool extend)
        bcma_pcie_read(pc, BCMA_CORE_PCI_DLLP_PMTHRESHREG);
 }
 
-void bcma_core_pci_up(struct bcma_bus *bus)
+void bcma_core_pci_up(struct bcma_drv_pci *pc)
 {
-       struct bcma_drv_pci *pc;
-
-       if (bus->hosttype != BCMA_HOSTTYPE_PCI)
-               return;
-
-       pc = &bus->drv_pci[0];
-
        bcma_core_pci_extend_L1timer(pc, true);
 }
-EXPORT_SYMBOL_GPL(bcma_core_pci_up);
 
-void bcma_core_pci_down(struct bcma_bus *bus)
+void bcma_core_pci_down(struct bcma_drv_pci *pc)
 {
-       struct bcma_drv_pci *pc;
-
-       if (bus->hosttype != BCMA_HOSTTYPE_PCI)
-               return;
-
-       pc = &bus->drv_pci[0];
-
        bcma_core_pci_extend_L1timer(pc, false);
 }
-EXPORT_SYMBOL_GPL(bcma_core_pci_down);
index c8a6b741967b390e20470f15d55d493fa9bc8ce8..c42cec7c7ecc0a88f649b315c8c8616a0439c2f8 100644 (file)
@@ -11,6 +11,7 @@
 
 #include "bcma_private.h"
 #include <linux/pci.h>
+#include <linux/slab.h>
 #include <linux/export.h>
 #include <linux/bcma/bcma.h>
 #include <asm/paccess.h>
index e4be537b0c66997700f9e0f4ba7a3a1adfff574e..b1a6e327cb23d44d10a2952d9e0aa5f356528b24 100644 (file)
@@ -10,6 +10,7 @@
 
 #include "bcma_private.h"
 #include <linux/bcma/bcma.h>
+#include <linux/pci.h>
 
 /**************************************************
  * R/W ops.
@@ -156,14 +157,23 @@ static void pciedev_reg_pm_clk_period(struct bcma_drv_pcie2 *pcie2)
 
 void bcma_core_pcie2_init(struct bcma_drv_pcie2 *pcie2)
 {
-       struct bcma_chipinfo *ci = &pcie2->core->bus->chipinfo;
+       struct bcma_bus *bus = pcie2->core->bus;
+       struct bcma_chipinfo *ci = &bus->chipinfo;
        u32 tmp;
 
        tmp = pcie2_read32(pcie2, BCMA_CORE_PCIE2_SPROM(54));
        if ((tmp & 0xe) >> 1 == 2)
                bcma_core_pcie2_cfg_write(pcie2, 0x4e0, 0x17);
 
-       /* TODO: Do we need pcie_reqsize? */
+       switch (bus->chipinfo.id) {
+       case BCMA_CHIP_ID_BCM4360:
+       case BCMA_CHIP_ID_BCM4352:
+               pcie2->reqsize = 1024;
+               break;
+       default:
+               pcie2->reqsize = 128;
+               break;
+       }
 
        if (ci->id == BCMA_CHIP_ID_BCM4360 && ci->rev > 3)
                bcma_core_pcie2_war_delay_perst_enab(pcie2, true);
@@ -173,3 +183,18 @@ void bcma_core_pcie2_init(struct bcma_drv_pcie2 *pcie2)
        pciedev_crwlpciegen2_180(pcie2);
        pciedev_crwlpciegen2_182(pcie2);
 }
+
+/**************************************************
+ * Runtime ops.
+ **************************************************/
+
+void bcma_core_pcie2_up(struct bcma_drv_pcie2 *pcie2)
+{
+       struct bcma_bus *bus = pcie2->core->bus;
+       struct pci_dev *dev = bus->host_pci;
+       int err;
+
+       err = pcie_set_readrq(dev, pcie2->reqsize);
+       if (err)
+               bcma_err(bus, "Error setting PCI_EXP_DEVCTL_READRQ: %d\n", err);
+}
index 53c6a8a58859bb8b252b9921eb2c96935d099f7f..0856189c065fd57826df7edab2dba3217d5e283c 100644 (file)
@@ -213,16 +213,26 @@ static int bcma_host_pci_probe(struct pci_dev *dev,
        /* Initialize struct, detect chip */
        bcma_init_bus(bus);
 
+       /* Scan bus to find out generation of PCIe core */
+       err = bcma_bus_scan(bus);
+       if (err)
+               goto err_pci_unmap_mmio;
+
+       if (bcma_find_core(bus, BCMA_CORE_PCIE2))
+               bus->host_is_pcie2 = true;
+
        /* Register */
        err = bcma_bus_register(bus);
        if (err)
-               goto err_pci_unmap_mmio;
+               goto err_unregister_cores;
 
        pci_set_drvdata(dev, bus);
 
 out:
        return err;
 
+err_unregister_cores:
+       bcma_unregister_cores(bus);
 err_pci_unmap_mmio:
        pci_iounmap(dev, bus->mmio);
 err_pci_release_regions:
@@ -283,9 +293,12 @@ static const struct pci_device_id bcma_pci_bridge_tbl[] = {
        { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4357) },
        { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4358) },
        { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4359) },
+       { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4360) },
        { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4365) },
+       { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x43a0) },
        { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x43a9) },
        { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x43aa) },
+       { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x43b1) },
        { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4727) },
        { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 43227) },  /* 0xa8db, BCM43217 (sic!) */
        { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 43228) },  /* 0xa8dc */
@@ -310,3 +323,65 @@ void __exit bcma_host_pci_exit(void)
 {
        pci_unregister_driver(&bcma_pci_bridge_driver);
 }
+
+/**************************************************
+ * Runtime ops for drivers.
+ **************************************************/
+
+/* See also pcicore_up */
+void bcma_host_pci_up(struct bcma_bus *bus)
+{
+       if (bus->hosttype != BCMA_HOSTTYPE_PCI)
+               return;
+
+       if (bus->host_is_pcie2)
+               bcma_core_pcie2_up(&bus->drv_pcie2);
+       else
+               bcma_core_pci_up(&bus->drv_pci[0]);
+}
+EXPORT_SYMBOL_GPL(bcma_host_pci_up);
+
+/* See also pcicore_down */
+void bcma_host_pci_down(struct bcma_bus *bus)
+{
+       if (bus->hosttype != BCMA_HOSTTYPE_PCI)
+               return;
+
+       if (!bus->host_is_pcie2)
+               bcma_core_pci_down(&bus->drv_pci[0]);
+}
+EXPORT_SYMBOL_GPL(bcma_host_pci_down);
+
+/* See also si_pci_setup */
+int bcma_host_pci_irq_ctl(struct bcma_bus *bus, struct bcma_device *core,
+                         bool enable)
+{
+       struct pci_dev *pdev;
+       u32 coremask, tmp;
+       int err = 0;
+
+       if (bus->hosttype != BCMA_HOSTTYPE_PCI) {
+               /* This bcma device is not on a PCI host-bus. So the IRQs are
+                * not routed through the PCI core.
+                * So we must not enable routing through the PCI core. */
+               goto out;
+       }
+
+       pdev = bus->host_pci;
+
+       err = pci_read_config_dword(pdev, BCMA_PCI_IRQMASK, &tmp);
+       if (err)
+               goto out;
+
+       coremask = BIT(core->core_index) << 8;
+       if (enable)
+               tmp |= coremask;
+       else
+               tmp &= ~coremask;
+
+       err = pci_write_config_dword(pdev, BCMA_PCI_IRQMASK, tmp);
+
+out:
+       return err;
+}
+EXPORT_SYMBOL_GPL(bcma_host_pci_irq_ctl);
index 38bde6eab8a41867d92de40f7083e411efe67aeb..9635f1033ce5c46e7aba2863fa04a8bc86421aa9 100644 (file)
@@ -363,7 +363,7 @@ static int bcma_register_devices(struct bcma_bus *bus)
        return 0;
 }
 
-static void bcma_unregister_cores(struct bcma_bus *bus)
+void bcma_unregister_cores(struct bcma_bus *bus)
 {
        struct bcma_device *core, *tmp;
 
index d1f168b73634321dadb6c57571c0cd8a9de8304b..c4fd1e45ce1e82a8f303aacda953539dba99b3be 100644 (file)
@@ -75,6 +75,7 @@
 #include <linux/sysfs.h>
 #include <linux/miscdevice.h>
 #include <linux/falloc.h>
+#include <linux/uio.h>
 #include "loop.h"
 
 #include <asm/uaccess.h>
@@ -229,13 +230,14 @@ lo_do_transfer(struct loop_device *lo, int cmd,
 static int __do_lo_send_write(struct file *file,
                u8 *buf, const int len, loff_t pos)
 {
+       struct kvec kvec = {.iov_base = buf, .iov_len = len};
+       struct iov_iter from;
        ssize_t bw;
-       mm_segment_t old_fs = get_fs();
+
+       iov_iter_kvec(&from, ITER_KVEC | WRITE, &kvec, 1, len);
 
        file_start_write(file);
-       set_fs(get_ds());
-       bw = file->f_op->write(file, buf, len, &pos);
-       set_fs(old_fs);
+       bw = vfs_iter_write(file, &from, &pos);
        file_end_write(file);
        if (likely(bw == len))
                return 0;
@@ -767,7 +769,7 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
                goto out_putf;
 
        if (!(file->f_mode & FMODE_WRITE) || !(mode & FMODE_WRITE) ||
-           !file->f_op->write)
+           !file->f_op->write_iter)
                lo_flags |= LO_FLAGS_READ_ONLY;
 
        lo_blocksize = S_ISBLK(inode->i_mode) ?
index de4c8499cbac958f0100f0004e38884839281729..288547a3c566753d146e28924c7d3b023e8c0d3b 100644 (file)
@@ -65,6 +65,7 @@ static const struct usb_device_id ath3k_table[] = {
        /* Atheros AR3011 with sflash firmware*/
        { USB_DEVICE(0x0489, 0xE027) },
        { USB_DEVICE(0x0489, 0xE03D) },
+       { USB_DEVICE(0x04F2, 0xAFF1) },
        { USB_DEVICE(0x0930, 0x0215) },
        { USB_DEVICE(0x0CF3, 0x3002) },
        { USB_DEVICE(0x0CF3, 0xE019) },
index e75f8ee2512cb08ea17579c6d853780a978b3ada..086f0ec89580627d4c95516c0ef912265445afef 100644 (file)
@@ -111,6 +111,7 @@ struct btmrvl_private {
 
 /* Vendor specific Bluetooth commands */
 #define BT_CMD_PSCAN_WIN_REPORT_ENABLE 0xFC03
+#define BT_CMD_ROUTE_SCO_TO_HOST       0xFC1D
 #define BT_CMD_SET_BDADDR              0xFC22
 #define BT_CMD_AUTO_SLEEP_MODE         0xFC23
 #define BT_CMD_HOST_SLEEP_CONFIG       0xFC59
index 413597789c61d61686737ceb0b7c8501427c3b42..de05deb444ce260a13be75af8b96eb8c8922794a 100644 (file)
@@ -230,6 +230,18 @@ int btmrvl_send_module_cfg_cmd(struct btmrvl_private *priv, u8 subcmd)
 }
 EXPORT_SYMBOL_GPL(btmrvl_send_module_cfg_cmd);
 
+static int btmrvl_enable_sco_routing_to_host(struct btmrvl_private *priv)
+{
+       int ret;
+       u8 subcmd = 0;
+
+       ret = btmrvl_send_sync_cmd(priv, BT_CMD_ROUTE_SCO_TO_HOST, &subcmd, 1);
+       if (ret)
+               BT_ERR("BT_CMD_ROUTE_SCO_TO_HOST command failed: %#x", ret);
+
+       return ret;
+}
+
 int btmrvl_pscan_window_reporting(struct btmrvl_private *priv, u8 subcmd)
 {
        struct btmrvl_sdio_card *card = priv->btmrvl_dev.card;
@@ -558,6 +570,8 @@ static int btmrvl_setup(struct hci_dev *hdev)
 
        btmrvl_check_device_tree(priv);
 
+       btmrvl_enable_sco_routing_to_host(priv);
+
        btmrvl_pscan_window_reporting(priv, 0x01);
 
        priv->btmrvl_dev.psmode = 1;
index 8bfc4c2bba87b61f46dfbb4778463f393226f735..6e4ff16e487b06229346a9284e867962f70f0d5c 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/module.h>
 #include <linux/usb.h>
 #include <linux/firmware.h>
+#include <asm/unaligned.h>
 
 #include <net/bluetooth/bluetooth.h>
 #include <net/bluetooth/hci_core.h>
@@ -52,6 +53,8 @@ static struct usb_driver btusb_driver;
 #define BTUSB_SWAVE            0x1000
 #define BTUSB_INTEL_NEW                0x2000
 #define BTUSB_AMP              0x4000
+#define BTUSB_QCA_ROME         0x8000
+#define BTUSB_BCM_APPLE                0x10000
 
 static const struct usb_device_id btusb_table[] = {
        /* Generic Bluetooth USB device */
@@ -61,7 +64,8 @@ static const struct usb_device_id btusb_table[] = {
        { USB_DEVICE_INFO(0xe0, 0x01, 0x04), .driver_info = BTUSB_AMP },
 
        /* Apple-specific (Broadcom) devices */
-       { USB_VENDOR_AND_INTERFACE_INFO(0x05ac, 0xff, 0x01, 0x01) },
+       { USB_VENDOR_AND_INTERFACE_INFO(0x05ac, 0xff, 0x01, 0x01),
+         .driver_info = BTUSB_BCM_APPLE },
 
        /* MediaTek MT76x0E */
        { USB_DEVICE(0x0e8d, 0x763f) },
@@ -107,13 +111,7 @@ static const struct usb_device_id btusb_table[] = {
        { USB_DEVICE(0x0c10, 0x0000) },
 
        /* Broadcom BCM20702A0 */
-       { USB_DEVICE(0x0489, 0xe042) },
-       { USB_DEVICE(0x04ca, 0x2003) },
-       { USB_DEVICE(0x0b05, 0x17b5) },
-       { USB_DEVICE(0x0b05, 0x17cb) },
        { USB_DEVICE(0x413c, 0x8197) },
-       { USB_DEVICE(0x13d3, 0x3404),
-         .driver_info = BTUSB_BCM_PATCHRAM },
 
        /* Broadcom BCM20702B0 (Dynex/Insignia) */
        { USB_DEVICE(0x19ff, 0x0239), .driver_info = BTUSB_BCM_PATCHRAM },
@@ -135,10 +133,12 @@ static const struct usb_device_id btusb_table[] = {
          .driver_info = BTUSB_BCM_PATCHRAM },
 
        /* Belkin F8065bf - Broadcom based */
-       { USB_VENDOR_AND_INTERFACE_INFO(0x050d, 0xff, 0x01, 0x01) },
+       { USB_VENDOR_AND_INTERFACE_INFO(0x050d, 0xff, 0x01, 0x01),
+         .driver_info = BTUSB_BCM_PATCHRAM },
 
        /* IMC Networks - Broadcom based */
-       { USB_VENDOR_AND_INTERFACE_INFO(0x13d3, 0xff, 0x01, 0x01) },
+       { USB_VENDOR_AND_INTERFACE_INFO(0x13d3, 0xff, 0x01, 0x01),
+         .driver_info = BTUSB_BCM_PATCHRAM },
 
        /* Intel Bluetooth USB Bootloader (RAM module) */
        { USB_DEVICE(0x8087, 0x0a5a),
@@ -159,6 +159,7 @@ static const struct usb_device_id blacklist_table[] = {
        /* Atheros 3011 with sflash firmware */
        { USB_DEVICE(0x0489, 0xe027), .driver_info = BTUSB_IGNORE },
        { USB_DEVICE(0x0489, 0xe03d), .driver_info = BTUSB_IGNORE },
+       { USB_DEVICE(0x04f2, 0xaff1), .driver_info = BTUSB_IGNORE },
        { USB_DEVICE(0x0930, 0x0215), .driver_info = BTUSB_IGNORE },
        { USB_DEVICE(0x0cf3, 0x3002), .driver_info = BTUSB_IGNORE },
        { USB_DEVICE(0x0cf3, 0xe019), .driver_info = BTUSB_IGNORE },
@@ -212,6 +213,10 @@ static const struct usb_device_id blacklist_table[] = {
        { USB_DEVICE(0x0489, 0xe036), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x0489, 0xe03c), .driver_info = BTUSB_ATH3012 },
 
+       /* QCA ROME chipset */
+       { USB_DEVICE(0x0cf3, 0xe300), .driver_info = BTUSB_QCA_ROME },
+       { USB_DEVICE(0x0cf3, 0xe360), .driver_info = BTUSB_QCA_ROME },
+
        /* Broadcom BCM2035 */
        { USB_DEVICE(0x0a5c, 0x2009), .driver_info = BTUSB_BCM92035 },
        { USB_DEVICE(0x0a5c, 0x200a), .driver_info = BTUSB_WRONG_SCO_MTU },
@@ -337,17 +342,9 @@ struct btusb_data {
 
        int (*recv_event)(struct hci_dev *hdev, struct sk_buff *skb);
        int (*recv_bulk)(struct btusb_data *data, void *buffer, int count);
-};
 
-static int btusb_wait_on_bit_timeout(void *word, int bit, unsigned long timeout,
-                                    unsigned mode)
-{
-       might_sleep();
-       if (!test_bit(bit, word))
-               return 0;
-       return out_of_line_wait_on_bit_timeout(word, bit, bit_wait_timeout,
-                                              mode, timeout);
-}
+       int (*setup_on_usb)(struct hci_dev *hdev);
+};
 
 static inline void btusb_free_frags(struct btusb_data *data)
 {
@@ -888,6 +885,15 @@ static int btusb_open(struct hci_dev *hdev)
 
        BT_DBG("%s", hdev->name);
 
+       /* Patching USB firmware files prior to starting any URBs of HCI path
+        * It is more safe to use USB bulk channel for downloading USB patch
+        */
+       if (data->setup_on_usb) {
+               err = data->setup_on_usb(hdev);
+               if (err <0)
+                       return err;
+       }
+
        err = usb_autopm_get_interface(data->intf);
        if (err < 0)
                return err;
@@ -1263,6 +1269,28 @@ static void btusb_waker(struct work_struct *work)
        usb_autopm_put_interface(data->intf);
 }
 
+static struct sk_buff *btusb_read_local_version(struct hci_dev *hdev)
+{
+       struct sk_buff *skb;
+
+       skb = __hci_cmd_sync(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL,
+                            HCI_INIT_TIMEOUT);
+       if (IS_ERR(skb)) {
+               BT_ERR("%s: HCI_OP_READ_LOCAL_VERSION failed (%ld)",
+                      hdev->name, PTR_ERR(skb));
+               return skb;
+       }
+
+       if (skb->len != sizeof(struct hci_rp_read_local_version)) {
+               BT_ERR("%s: HCI_OP_READ_LOCAL_VERSION event length mismatch",
+                      hdev->name);
+               kfree_skb(skb);
+               return ERR_PTR(-EIO);
+       }
+
+       return skb;
+}
+
 static int btusb_setup_bcm92035(struct hci_dev *hdev)
 {
        struct sk_buff *skb;
@@ -1287,12 +1315,9 @@ static int btusb_setup_csr(struct hci_dev *hdev)
 
        BT_DBG("%s", hdev->name);
 
-       skb = __hci_cmd_sync(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL,
-                            HCI_INIT_TIMEOUT);
-       if (IS_ERR(skb)) {
-               BT_ERR("Reading local version failed (%ld)", -PTR_ERR(skb));
+       skb = btusb_read_local_version(hdev);
+       if (IS_ERR(skb))
                return -PTR_ERR(skb);
-       }
 
        rp = (struct hci_rp_read_local_version *)skb->data;
 
@@ -2197,9 +2222,9 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
         * and thus just timeout if that happens and fail the setup
         * of this device.
         */
-       err = btusb_wait_on_bit_timeout(&data->flags, BTUSB_DOWNLOADING,
-                                       msecs_to_jiffies(5000),
-                                       TASK_INTERRUPTIBLE);
+       err = wait_on_bit_timeout(&data->flags, BTUSB_DOWNLOADING,
+                                 TASK_INTERRUPTIBLE,
+                                 msecs_to_jiffies(5000));
        if (err == 1) {
                BT_ERR("%s: Firmware loading interrupted", hdev->name);
                err = -EINTR;
@@ -2250,9 +2275,9 @@ done:
         */
        BT_INFO("%s: Waiting for device to boot", hdev->name);
 
-       err = btusb_wait_on_bit_timeout(&data->flags, BTUSB_BOOTING,
-                                       msecs_to_jiffies(1000),
-                                       TASK_INTERRUPTIBLE);
+       err = wait_on_bit_timeout(&data->flags, BTUSB_BOOTING,
+                                 TASK_INTERRUPTIBLE,
+                                 msecs_to_jiffies(1000));
 
        if (err == 1) {
                BT_ERR("%s: Device boot interrupted", hdev->name);
@@ -2332,6 +2357,27 @@ static int btusb_set_bdaddr_intel(struct hci_dev *hdev, const bdaddr_t *bdaddr)
        return 0;
 }
 
+static int btusb_shutdown_intel(struct hci_dev *hdev)
+{
+       struct sk_buff *skb;
+       long ret;
+
+       /* Some platforms have an issue with BT LED when the interface is
+        * down or BT radio is turned off, which takes 5 seconds to BT LED
+        * goes off. This command turns off the BT LED immediately.
+        */
+       skb = __hci_cmd_sync(hdev, 0xfc3f, 0, NULL, HCI_INIT_TIMEOUT);
+       if (IS_ERR(skb)) {
+               ret = PTR_ERR(skb);
+               BT_ERR("%s: turning off Intel device LED failed (%ld)",
+                      hdev->name, ret);
+               return ret;
+       }
+       kfree_skb(skb);
+
+       return 0;
+}
+
 static int btusb_set_bdaddr_marvell(struct hci_dev *hdev,
                                    const bdaddr_t *bdaddr)
 {
@@ -2355,6 +2401,23 @@ static int btusb_set_bdaddr_marvell(struct hci_dev *hdev,
        return 0;
 }
 
+static const struct {
+       u16 subver;
+       const char *name;
+} bcm_subver_table[] = {
+       { 0x210b, "BCM43142A0"  },      /* 001.001.011 */
+       { 0x2112, "BCM4314A0"   },      /* 001.001.018 */
+       { 0x2118, "BCM20702A0"  },      /* 001.001.024 */
+       { 0x2126, "BCM4335A0"   },      /* 001.001.038 */
+       { 0x220e, "BCM20702A1"  },      /* 001.002.014 */
+       { 0x230f, "BCM4354A2"   },      /* 001.003.015 */
+       { 0x4106, "BCM4335B0"   },      /* 002.001.006 */
+       { 0x410e, "BCM20702B0"  },      /* 002.001.014 */
+       { 0x6109, "BCM4335C0"   },      /* 003.001.009 */
+       { 0x610c, "BCM4354"     },      /* 003.001.012 */
+       { }
+};
+
 #define BDADDR_BCM20702A0 (&(bdaddr_t) {{0x00, 0xa0, 0x02, 0x70, 0x20, 0x00}})
 
 static int btusb_setup_bcm_patchram(struct hci_dev *hdev)
@@ -2367,56 +2430,74 @@ static int btusb_setup_bcm_patchram(struct hci_dev *hdev)
        size_t fw_size;
        const struct hci_command_hdr *cmd;
        const u8 *cmd_param;
-       u16 opcode;
+       u16 opcode, subver, rev;
+       const char *hw_name = NULL;
        struct sk_buff *skb;
        struct hci_rp_read_local_version *ver;
        struct hci_rp_read_bd_addr *bda;
        long ret;
-
-       snprintf(fw_name, sizeof(fw_name), "brcm/%s-%04x-%04x.hcd",
-                udev->product ? udev->product : "BCM",
-                le16_to_cpu(udev->descriptor.idVendor),
-                le16_to_cpu(udev->descriptor.idProduct));
-
-       ret = request_firmware(&fw, fw_name, &hdev->dev);
-       if (ret < 0) {
-               BT_INFO("%s: BCM: patch %s not found", hdev->name, fw_name);
-               return 0;
-       }
+       int i;
 
        /* Reset */
        skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, HCI_INIT_TIMEOUT);
        if (IS_ERR(skb)) {
                ret = PTR_ERR(skb);
                BT_ERR("%s: HCI_OP_RESET failed (%ld)", hdev->name, ret);
-               goto done;
+               return ret;
        }
        kfree_skb(skb);
 
        /* Read Local Version Info */
-       skb = __hci_cmd_sync(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL,
-                            HCI_INIT_TIMEOUT);
+       skb = btusb_read_local_version(hdev);
+       if (IS_ERR(skb))
+               return PTR_ERR(skb);
+
+       ver = (struct hci_rp_read_local_version *)skb->data;
+       rev = le16_to_cpu(ver->hci_rev);
+       subver = le16_to_cpu(ver->lmp_subver);
+       kfree_skb(skb);
+
+       /* Read Verbose Config Version Info */
+       skb = __hci_cmd_sync(hdev, 0xfc79, 0, NULL, HCI_INIT_TIMEOUT);
        if (IS_ERR(skb)) {
                ret = PTR_ERR(skb);
-               BT_ERR("%s: HCI_OP_READ_LOCAL_VERSION failed (%ld)",
+               BT_ERR("%s: BCM: Read Verbose Version failed (%ld)",
                       hdev->name, ret);
-               goto done;
+               return ret;
        }
 
-       if (skb->len != sizeof(*ver)) {
-               BT_ERR("%s: HCI_OP_READ_LOCAL_VERSION event length mismatch",
+       if (skb->len != 7) {
+               BT_ERR("%s: BCM: Read Verbose Version event length mismatch",
                       hdev->name);
                kfree_skb(skb);
-               ret = -EIO;
-               goto done;
+               return -EIO;
        }
 
-       ver = (struct hci_rp_read_local_version *)skb->data;
-       BT_INFO("%s: BCM: patching hci_ver=%02x hci_rev=%04x lmp_ver=%02x "
-               "lmp_subver=%04x", hdev->name, ver->hci_ver, ver->hci_rev,
-               ver->lmp_ver, ver->lmp_subver);
+       BT_INFO("%s: BCM: chip id %u", hdev->name, skb->data[1]);
        kfree_skb(skb);
 
+       for (i = 0; bcm_subver_table[i].name; i++) {
+               if (subver == bcm_subver_table[i].subver) {
+                       hw_name = bcm_subver_table[i].name;
+                       break;
+               }
+       }
+
+       BT_INFO("%s: %s (%3.3u.%3.3u.%3.3u) build %4.4u", hdev->name,
+               hw_name ? : "BCM", (subver & 0x7000) >> 13,
+               (subver & 0x1f00) >> 8, (subver & 0x00ff), rev & 0x0fff);
+
+       snprintf(fw_name, sizeof(fw_name), "brcm/%s-%4.4x-%4.4x.hcd",
+                hw_name ? : "BCM",
+                le16_to_cpu(udev->descriptor.idVendor),
+                le16_to_cpu(udev->descriptor.idProduct));
+
+       ret = request_firmware(&fw, fw_name, &hdev->dev);
+       if (ret < 0) {
+               BT_INFO("%s: BCM: patch %s not found", hdev->name, fw_name);
+               return 0;
+       }
+
        /* Start Download */
        skb = __hci_cmd_sync(hdev, 0xfc2e, 0, NULL, HCI_INIT_TIMEOUT);
        if (IS_ERR(skb)) {
@@ -2476,29 +2557,21 @@ reset_fw:
        kfree_skb(skb);
 
        /* Read Local Version Info */
-       skb = __hci_cmd_sync(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL,
-                            HCI_INIT_TIMEOUT);
+       skb = btusb_read_local_version(hdev);
        if (IS_ERR(skb)) {
                ret = PTR_ERR(skb);
-               BT_ERR("%s: HCI_OP_READ_LOCAL_VERSION failed (%ld)",
-                      hdev->name, ret);
-               goto done;
-       }
-
-       if (skb->len != sizeof(*ver)) {
-               BT_ERR("%s: HCI_OP_READ_LOCAL_VERSION event length mismatch",
-                      hdev->name);
-               kfree_skb(skb);
-               ret = -EIO;
                goto done;
        }
 
        ver = (struct hci_rp_read_local_version *)skb->data;
-       BT_INFO("%s: BCM: firmware hci_ver=%02x hci_rev=%04x lmp_ver=%02x "
-               "lmp_subver=%04x", hdev->name, ver->hci_ver, ver->hci_rev,
-               ver->lmp_ver, ver->lmp_subver);
+       rev = le16_to_cpu(ver->hci_rev);
+       subver = le16_to_cpu(ver->lmp_subver);
        kfree_skb(skb);
 
+       BT_INFO("%s: %s (%3.3u.%3.3u.%3.3u) build %4.4u", hdev->name,
+               hw_name ? : "BCM", (subver & 0x7000) >> 13,
+               (subver & 0x1f00) >> 8, (subver & 0x00ff), rev & 0x0fff);
+
        /* Read BD Address */
        skb = __hci_cmd_sync(hdev, HCI_OP_READ_BD_ADDR, 0, NULL,
                             HCI_INIT_TIMEOUT);
@@ -2560,6 +2633,34 @@ static int btusb_set_bdaddr_bcm(struct hci_dev *hdev, const bdaddr_t *bdaddr)
        return 0;
 }
 
+static int btusb_setup_bcm_apple(struct hci_dev *hdev)
+{
+       struct sk_buff *skb;
+       int err;
+
+       /* Read Verbose Config Version Info */
+       skb = __hci_cmd_sync(hdev, 0xfc79, 0, NULL, HCI_INIT_TIMEOUT);
+       if (IS_ERR(skb)) {
+               err = PTR_ERR(skb);
+               BT_ERR("%s: BCM: Read Verbose Version failed (%d)",
+                      hdev->name, err);
+               return err;
+       }
+
+       if (skb->len != 7) {
+               BT_ERR("%s: BCM: Read Verbose Version event length mismatch",
+                      hdev->name);
+               kfree_skb(skb);
+               return -EIO;
+       }
+
+       BT_INFO("%s: BCM: chip id %u build %4.4u", hdev->name, skb->data[1],
+               get_unaligned_le16(skb->data + 5));
+       kfree_skb(skb);
+
+       return 0;
+}
+
 static int btusb_set_bdaddr_ath3012(struct hci_dev *hdev,
                                    const bdaddr_t *bdaddr)
 {
@@ -2585,6 +2686,258 @@ static int btusb_set_bdaddr_ath3012(struct hci_dev *hdev,
        return 0;
 }
 
+#define QCA_DFU_PACKET_LEN     4096
+
+#define QCA_GET_TARGET_VERSION 0x09
+#define QCA_CHECK_STATUS       0x05
+#define QCA_DFU_DOWNLOAD       0x01
+
+#define QCA_SYSCFG_UPDATED     0x40
+#define QCA_PATCH_UPDATED      0x80
+#define QCA_DFU_TIMEOUT                3000
+
+struct qca_version {
+       __le32  rom_version;
+       __le32  patch_version;
+       __le32  ram_version;
+       __le32  ref_clock;
+       __u8    reserved[4];
+} __packed;
+
+struct qca_rampatch_version {
+       __le16  rom_version;
+       __le16  patch_version;
+} __packed;
+
+struct qca_device_info {
+       u32     rom_version;
+       u8      rampatch_hdr;   /* length of header in rampatch */
+       u8      nvm_hdr;        /* length of header in NVM */
+       u8      ver_offset;     /* offset of version structure in rampatch */
+};
+
+static const struct qca_device_info qca_devices_table[] = {
+       { 0x00000100, 20, 4, 10 }, /* Rome 1.0 */
+       { 0x00000101, 20, 4, 10 }, /* Rome 1.1 */
+       { 0x00000201, 28, 4, 18 }, /* Rome 2.1 */
+       { 0x00000300, 28, 4, 18 }, /* Rome 3.0 */
+       { 0x00000302, 28, 4, 18 }, /* Rome 3.2 */
+};
+
+static int btusb_qca_send_vendor_req(struct hci_dev *hdev, u8 request,
+                                    void *data, u16 size)
+{
+       struct btusb_data *btdata = hci_get_drvdata(hdev);
+       struct usb_device *udev = btdata->udev;
+       int pipe, err;
+       u8 *buf;
+
+       buf = kmalloc(size, GFP_KERNEL);
+       if (!buf)
+               return -ENOMEM;
+
+       /* Found some of USB hosts have IOT issues with ours so that we should
+        * not wait until HCI layer is ready.
+        */
+       pipe = usb_rcvctrlpipe(udev, 0);
+       err = usb_control_msg(udev, pipe, request, USB_TYPE_VENDOR | USB_DIR_IN,
+                             0, 0, buf, size, USB_CTRL_SET_TIMEOUT);
+       if (err < 0) {
+               BT_ERR("%s: Failed to access otp area (%d)", hdev->name, err);
+               goto done;
+       }
+
+       memcpy(data, buf, size);
+
+done:
+       kfree(buf);
+
+       return err;
+}
+
+static int btusb_setup_qca_download_fw(struct hci_dev *hdev,
+                                      const struct firmware *firmware,
+                                      size_t hdr_size)
+{
+       struct btusb_data *btdata = hci_get_drvdata(hdev);
+       struct usb_device *udev = btdata->udev;
+       size_t count, size, sent = 0;
+       int pipe, len, err;
+       u8 *buf;
+
+       buf = kmalloc(QCA_DFU_PACKET_LEN, GFP_KERNEL);
+       if (!buf)
+               return -ENOMEM;
+
+       count = firmware->size;
+
+       size = min_t(size_t, count, hdr_size);
+       memcpy(buf, firmware->data, size);
+
+       /* USB patches should go down to controller through USB path
+        * because binary format fits to go down through USB channel.
+        * USB control path is for patching headers and USB bulk is for
+        * patch body.
+        */
+       pipe = usb_sndctrlpipe(udev, 0);
+       err = usb_control_msg(udev, pipe, QCA_DFU_DOWNLOAD, USB_TYPE_VENDOR,
+                             0, 0, buf, size, USB_CTRL_SET_TIMEOUT);
+       if (err < 0) {
+               BT_ERR("%s: Failed to send headers (%d)", hdev->name, err);
+               goto done;
+       }
+
+       sent += size;
+       count -= size;
+
+       while (count) {
+               size = min_t(size_t, count, QCA_DFU_PACKET_LEN);
+
+               memcpy(buf, firmware->data + sent, size);
+
+               pipe = usb_sndbulkpipe(udev, 0x02);
+               err = usb_bulk_msg(udev, pipe, buf, size, &len,
+                                  QCA_DFU_TIMEOUT);
+               if (err < 0) {
+                       BT_ERR("%s: Failed to send body at %zd of %zd (%d)",
+                              hdev->name, sent, firmware->size, err);
+                       break;
+               }
+
+               if (size != len) {
+                       BT_ERR("%s: Failed to get bulk buffer", hdev->name);
+                       err = -EILSEQ;
+                       break;
+               }
+
+               sent  += size;
+               count -= size;
+       }
+
+done:
+       kfree(buf);
+       return err;
+}
+
+static int btusb_setup_qca_load_rampatch(struct hci_dev *hdev,
+                                        struct qca_version *ver,
+                                        const struct qca_device_info *info)
+{
+       struct qca_rampatch_version *rver;
+       const struct firmware *fw;
+       u32 ver_rom, ver_patch;
+       u16 rver_rom, rver_patch;
+       char fwname[64];
+       int err;
+
+       ver_rom = le32_to_cpu(ver->rom_version);
+       ver_patch = le32_to_cpu(ver->patch_version);
+
+       snprintf(fwname, sizeof(fwname), "qca/rampatch_usb_%08x.bin", ver_rom);
+
+       err = request_firmware(&fw, fwname, &hdev->dev);
+       if (err) {
+               BT_ERR("%s: failed to request rampatch file: %s (%d)",
+                      hdev->name, fwname, err);
+               return err;
+       }
+
+       BT_INFO("%s: using rampatch file: %s", hdev->name, fwname);
+
+       rver = (struct qca_rampatch_version *)(fw->data + info->ver_offset);
+       rver_rom = le16_to_cpu(rver->rom_version);
+       rver_patch = le16_to_cpu(rver->patch_version);
+
+       BT_INFO("%s: QCA: patch rome 0x%x build 0x%x, firmware rome 0x%x "
+               "build 0x%x", hdev->name, rver_rom, rver_patch, ver_rom,
+               ver_patch);
+
+       if (rver_rom != ver_rom || rver_patch <= ver_patch) {
+               BT_ERR("%s: rampatch file version did not match with firmware",
+                      hdev->name);
+               err = -EINVAL;
+               goto done;
+       }
+
+       err = btusb_setup_qca_download_fw(hdev, fw, info->rampatch_hdr);
+
+done:
+       release_firmware(fw);
+
+       return err;
+}
+
+static int btusb_setup_qca_load_nvm(struct hci_dev *hdev,
+                                   struct qca_version *ver,
+                                   const struct qca_device_info *info)
+{
+       const struct firmware *fw;
+       char fwname[64];
+       int err;
+
+       snprintf(fwname, sizeof(fwname), "qca/nvm_usb_%08x.bin",
+                le32_to_cpu(ver->rom_version));
+
+       err = request_firmware(&fw, fwname, &hdev->dev);
+       if (err) {
+               BT_ERR("%s: failed to request NVM file: %s (%d)",
+                      hdev->name, fwname, err);
+               return err;
+       }
+
+       BT_INFO("%s: using NVM file: %s", hdev->name, fwname);
+
+       err = btusb_setup_qca_download_fw(hdev, fw, info->nvm_hdr);
+
+       release_firmware(fw);
+
+       return err;
+}
+
+static int btusb_setup_qca(struct hci_dev *hdev)
+{
+       const struct qca_device_info *info = NULL;
+       struct qca_version ver;
+       u32 ver_rom;
+       u8 status;
+       int i, err;
+
+       err = btusb_qca_send_vendor_req(hdev, QCA_GET_TARGET_VERSION, &ver,
+                                       sizeof(ver));
+       if (err < 0)
+               return err;
+
+       ver_rom = le32_to_cpu(ver.rom_version);
+       for (i = 0; i < ARRAY_SIZE(qca_devices_table); i++) {
+               if (ver_rom == qca_devices_table[i].rom_version)
+                       info = &qca_devices_table[i];
+       }
+       if (!info) {
+               BT_ERR("%s: don't support firmware rome 0x%x", hdev->name,
+                      ver_rom);
+               return -ENODEV;
+       }
+
+       err = btusb_qca_send_vendor_req(hdev, QCA_CHECK_STATUS, &status,
+                                       sizeof(status));
+       if (err < 0)
+               return err;
+
+       if (!(status & QCA_PATCH_UPDATED)) {
+               err = btusb_setup_qca_load_rampatch(hdev, &ver, info);
+               if (err < 0)
+                       return err;
+       }
+
+       if (!(status & QCA_SYSCFG_UPDATED)) {
+               err = btusb_setup_qca_load_nvm(hdev, &ver, info);
+               if (err < 0)
+                       return err;
+       }
+
+       return 0;
+}
+
 static int btusb_probe(struct usb_interface *intf,
                       const struct usb_device_id *id)
 {
@@ -2707,10 +3060,17 @@ static int btusb_probe(struct usb_interface *intf,
                set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks);
        }
 
+       if (id->driver_info & BTUSB_BCM_APPLE) {
+               hdev->setup = btusb_setup_bcm_apple;
+               set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks);
+       }
+
        if (id->driver_info & BTUSB_INTEL) {
                hdev->setup = btusb_setup_intel;
+               hdev->shutdown = btusb_shutdown_intel;
                hdev->set_bdaddr = btusb_set_bdaddr_intel;
                set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks);
+               set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks);
        }
 
        if (id->driver_info & BTUSB_INTEL_NEW) {
@@ -2734,9 +3094,15 @@ static int btusb_probe(struct usb_interface *intf,
 
        if (id->driver_info & BTUSB_ATH3012) {
                hdev->set_bdaddr = btusb_set_bdaddr_ath3012;
+               set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks);
                set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks);
        }
 
+       if (id->driver_info & BTUSB_QCA_ROME) {
+               data->setup_on_usb = btusb_setup_qca;
+               hdev->set_bdaddr = btusb_set_bdaddr_ath3012;
+       }
+
        if (id->driver_info & BTUSB_AMP) {
                /* AMP controllers do not support SCO packets */
                data->isoc = NULL;
@@ -2772,6 +3138,8 @@ static int btusb_probe(struct usb_interface *intf,
                /* Fake CSR devices with broken commands */
                if (bcdDevice <= 0x100)
                        hdev->setup = btusb_setup_csr;
+
+               set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks);
        }
 
        if (id->driver_info & BTUSB_SNIFFER) {
index dc487b5d11568f9ebcfec033ee0bc47f6d34ece0..1363dc616ace2f751443d6f70adeea976f5a461d 100644 (file)
@@ -261,6 +261,16 @@ static int hci_uart_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
        return 0;
 }
 
+static int hci_uart_setup(struct hci_dev *hdev)
+{
+       struct hci_uart *hu = hci_get_drvdata(hdev);
+
+       if (hu->proto->setup)
+               return hu->proto->setup(hu);
+
+       return 0;
+}
+
 /* ------ LDISC part ------ */
 /* hci_uart_tty_open
  *
@@ -426,6 +436,7 @@ static int hci_uart_register_dev(struct hci_uart *hu)
        hdev->close = hci_uart_close;
        hdev->flush = hci_uart_flush;
        hdev->send  = hci_uart_send_frame;
+       hdev->setup = hci_uart_setup;
        SET_HCIDEV_DEV(hdev, hu->tty->dev);
 
        if (test_bit(HCI_UART_RAW_DEVICE, &hu->hdev_flags))
@@ -488,7 +499,7 @@ static int hci_uart_set_flags(struct hci_uart *hu, unsigned long flags)
                                    BIT(HCI_UART_INIT_PENDING) |
                                    BIT(HCI_UART_EXT_CONFIG);
 
-       if ((flags & ~valid_flags))
+       if (flags & ~valid_flags)
                return -EINVAL;
 
        hu->hdev_flags = flags;
index 247488edcbf93711168c9ee072a97edf923ecd9c..074ed29092b487b700838b34f1504e8379335dd9 100644 (file)
@@ -59,6 +59,7 @@ struct hci_uart_proto {
        int (*flush)(struct hci_uart *hu);
        int (*recv)(struct hci_uart *hu, void *data, int len);
        int (*enqueue)(struct hci_uart *hu, struct sk_buff *skb);
+       int (*setup)(struct hci_uart *hu);
        struct sk_buff *(*dequeue)(struct hci_uart *hu);
 };
 
index 297110c12635d3b8c36b53a7f089aa2666024eca..6b1721f978c2945f4af716636c8373fe8807dca0 100644 (file)
@@ -26,7 +26,7 @@
 #include <linux/pfn.h>
 #include <linux/export.h>
 #include <linux/io.h>
-#include <linux/aio.h>
+#include <linux/uio.h>
 
 #include <linux/uaccess.h>
 
@@ -607,16 +607,16 @@ static ssize_t write_null(struct file *file, const char __user *buf,
        return count;
 }
 
-static ssize_t aio_read_null(struct kiocb *iocb, const struct iovec *iov,
-                            unsigned long nr_segs, loff_t pos)
+static ssize_t read_iter_null(struct kiocb *iocb, struct iov_iter *to)
 {
        return 0;
 }
 
-static ssize_t aio_write_null(struct kiocb *iocb, const struct iovec *iov,
-                             unsigned long nr_segs, loff_t pos)
+static ssize_t write_iter_null(struct kiocb *iocb, struct iov_iter *from)
 {
-       return iov_length(iov, nr_segs);
+       size_t count = iov_iter_count(from);
+       iov_iter_advance(from, count);
+       return count;
 }
 
 static int pipe_to_null(struct pipe_inode_info *info, struct pipe_buffer *buf,
@@ -718,7 +718,7 @@ static int open_port(struct inode *inode, struct file *filp)
 #define zero_lseek     null_lseek
 #define full_lseek      null_lseek
 #define write_zero     write_null
-#define aio_write_zero aio_write_null
+#define write_iter_zero        write_iter_null
 #define open_mem       open_port
 #define open_kmem      open_mem
 
@@ -750,8 +750,8 @@ static const struct file_operations null_fops = {
        .llseek         = null_lseek,
        .read           = read_null,
        .write          = write_null,
-       .aio_read       = aio_read_null,
-       .aio_write      = aio_write_null,
+       .read_iter      = read_iter_null,
+       .write_iter     = write_iter_null,
        .splice_write   = splice_write_null,
 };
 
@@ -764,10 +764,9 @@ static const struct file_operations __maybe_unused port_fops = {
 
 static const struct file_operations zero_fops = {
        .llseek         = zero_lseek,
-       .read           = new_sync_read,
        .write          = write_zero,
        .read_iter      = read_iter_zero,
-       .aio_write      = aio_write_zero,
+       .write_iter     = write_iter_zero,
        .mmap           = mmap_zero,
 #ifndef CONFIG_MMU
        .mmap_capabilities = zero_mmap_capabilities,
@@ -776,7 +775,6 @@ static const struct file_operations zero_fops = {
 
 static const struct file_operations full_fops = {
        .llseek         = full_lseek,
-       .read           = new_sync_read,
        .read_iter      = read_iter_zero,
        .write          = write_full,
 };
index 6e29bf2db5367732eaf68f05d1c75dd22adf7966..5fc291c6157e3121f57d0a30e8d52a5021e3253c 100644 (file)
@@ -282,9 +282,7 @@ static long raw_ctl_compat_ioctl(struct file *file, unsigned int cmd,
 #endif
 
 static const struct file_operations raw_fops = {
-       .read           = new_sync_read,
        .read_iter      = blkdev_read_iter,
-       .write          = new_sync_write,
        .write_iter     = blkdev_write_iter,
        .fsync          = blkdev_fsync,
        .open           = raw_open,
index 02e76ac6d282d5a26a31598b05b4d89a6fefd671..69f6b4acc377143d87a54d5e46dd9710f4d53f73 100644 (file)
@@ -27,7 +27,6 @@
 #include <linux/types.h>       /* size_t */
 #include <linux/proc_fs.h>
 #include <linux/fcntl.h>       /* O_ACCMODE */
-#include <linux/aio.h>
 #include <linux/pagemap.h>
 #include <linux/hugetlb.h>
 #include <linux/uaccess.h>
index 2c68da1ceeeef9ac39c272e0309dbb7ed9c85514..f4ea80d602f73bd6bc5adac2a32bbbe2b58de716 100644 (file)
@@ -237,18 +237,6 @@ static int fwnet_header_create(struct sk_buff *skb, struct net_device *net,
        return -net->hard_header_len;
 }
 
-static int fwnet_header_rebuild(struct sk_buff *skb)
-{
-       struct fwnet_header *h = (struct fwnet_header *)skb->data;
-
-       if (get_unaligned_be16(&h->h_proto) == ETH_P_IP)
-               return arp_find((unsigned char *)&h->h_dest, skb);
-
-       dev_notice(&skb->dev->dev, "unable to resolve type %04x addresses\n",
-                  be16_to_cpu(h->h_proto));
-       return 0;
-}
-
 static int fwnet_header_cache(const struct neighbour *neigh,
                              struct hh_cache *hh, __be16 type)
 {
@@ -282,7 +270,6 @@ static int fwnet_header_parse(const struct sk_buff *skb, unsigned char *haddr)
 
 static const struct header_ops fwnet_header_ops = {
        .create         = fwnet_header_create,
-       .rebuild        = fwnet_header_rebuild,
        .cache          = fwnet_header_cache,
        .cache_update   = fwnet_header_cache_update,
        .parse          = fwnet_header_parse,
index 69fac068669fde566f41013cefbdf48db023466c..2eebd28b4c40af2789c32e0008f2b60006fc03ac 100644 (file)
@@ -86,10 +86,13 @@ static void dmi_table(u8 *buf, u32 len, int num,
        int i = 0;
 
        /*
-        *      Stop when we see all the items the table claimed to have
-        *      OR we run off the end of the table (also happens)
+        * Stop when we have seen all the items the table claimed to have
+        * (SMBIOS < 3.0 only) OR we reach an end-of-table marker OR we run
+        * off the end of the table (should never happen but sometimes does
+        * on bogus implementations.)
         */
-       while ((i < num) && (data - buf + sizeof(struct dmi_header)) <= len) {
+       while ((!num || i < num) &&
+              (data - buf + sizeof(struct dmi_header)) <= len) {
                const struct dmi_header *dm = (const struct dmi_header *)data;
 
                /*
@@ -529,21 +532,10 @@ static int __init dmi_smbios3_present(const u8 *buf)
        if (memcmp(buf, "_SM3_", 5) == 0 &&
            buf[6] < 32 && dmi_checksum(buf, buf[6])) {
                dmi_ver = get_unaligned_be16(buf + 7);
+               dmi_num = 0;                    /* No longer specified */
                dmi_len = get_unaligned_le32(buf + 12);
                dmi_base = get_unaligned_le64(buf + 16);
 
-               /*
-                * The 64-bit SMBIOS 3.0 entry point no longer has a field
-                * containing the number of structures present in the table.
-                * Instead, it defines the table size as a maximum size, and
-                * relies on the end-of-table structure type (#127) to be used
-                * to signal the end of the table.
-                * So let's define dmi_num as an upper bound as well: each
-                * structure has a 4 byte header, so dmi_len / 4 is an upper
-                * bound for the number of structures in the table.
-                */
-               dmi_num = dmi_len / 4;
-
                if (dmi_walk_early(dmi_decode) == 0) {
                        pr_info("SMBIOS %d.%d present.\n",
                                dmi_ver >> 8, dmi_ver & 0xFF);
index 1096da327130526080139e628fdd0a0130d32861..75c6d2103e07adad3b11919687e81f8dd7a8fca3 100644 (file)
@@ -659,7 +659,7 @@ static irqreturn_t bma180_trigger_handler(int irq, void *p)
 
        mutex_lock(&data->mutex);
 
-       for_each_set_bit(bit, indio_dev->buffer->scan_mask,
+       for_each_set_bit(bit, indio_dev->active_scan_mask,
                         indio_dev->masklength) {
                ret = bma180_get_data_reg(data, bit);
                if (ret < 0) {
index 066d0c04072c69943fa21313fb47f06c72fedcea..75567fd457dcc4b9bd7c147fdc4cb229cdbaf0c4 100644 (file)
@@ -168,14 +168,14 @@ static const struct {
        int val;
        int val2;
        u8 bw_bits;
-} bmc150_accel_samp_freq_table[] = { {7, 810000, 0x08},
-                                    {15, 630000, 0x09},
-                                    {31, 250000, 0x0A},
-                                    {62, 500000, 0x0B},
-                                    {125, 0, 0x0C},
-                                    {250, 0, 0x0D},
-                                    {500, 0, 0x0E},
-                                    {1000, 0, 0x0F} };
+} bmc150_accel_samp_freq_table[] = { {15, 620000, 0x08},
+                                    {31, 260000, 0x09},
+                                    {62, 500000, 0x0A},
+                                    {125, 0, 0x0B},
+                                    {250, 0, 0x0C},
+                                    {500, 0, 0x0D},
+                                    {1000, 0, 0x0E},
+                                    {2000, 0, 0x0F} };
 
 static const struct {
        int bw_bits;
@@ -840,7 +840,7 @@ static int bmc150_accel_validate_trigger(struct iio_dev *indio_dev,
 }
 
 static IIO_CONST_ATTR_SAMP_FREQ_AVAIL(
-               "7.810000 15.630000 31.250000 62.500000 125 250 500 1000");
+               "15.620000 31.260000 62.50000 125 250 500 1000 2000");
 
 static struct attribute *bmc150_accel_attributes[] = {
        &iio_const_attr_sampling_frequency_available.dev_attr.attr,
@@ -986,7 +986,7 @@ static irqreturn_t bmc150_accel_trigger_handler(int irq, void *p)
        int bit, ret, i = 0;
 
        mutex_lock(&data->mutex);
-       for_each_set_bit(bit, indio_dev->buffer->scan_mask,
+       for_each_set_bit(bit, indio_dev->active_scan_mask,
                         indio_dev->masklength) {
                ret = i2c_smbus_read_word_data(data->client,
                                               BMC150_ACCEL_AXIS_TO_REG(bit));
index 567de269cc00650191541a98ac8d5818d0ea8661..1a6379525fa47e73497b17866be4276fc88c8065 100644 (file)
@@ -956,7 +956,7 @@ static irqreturn_t kxcjk1013_trigger_handler(int irq, void *p)
 
        mutex_lock(&data->mutex);
 
-       for_each_set_bit(bit, indio_dev->buffer->scan_mask,
+       for_each_set_bit(bit, indio_dev->active_scan_mask,
                         indio_dev->masklength) {
                ret = kxcjk1013_get_acc_reg(data, bit);
                if (ret < 0) {
index 202daf889be276315d24575b1301349f295fe4c8..46379b1fb25b59b10018a121b2cc8dc78082d4a6 100644 (file)
@@ -137,7 +137,8 @@ config AXP288_ADC
 
 config CC10001_ADC
        tristate "Cosmic Circuits 10001 ADC driver"
-       depends on HAS_IOMEM || HAVE_CLK || REGULATOR
+       depends on HAVE_CLK || REGULATOR
+       depends on HAS_IOMEM
        select IIO_BUFFER
        select IIO_TRIGGERED_BUFFER
        help
index ff61ae55dd3ff8ac73c0925c0b88fa6a75a1083d..8a0eb4a04fb55b9cb2436db5b16f678f654b8a26 100644 (file)
@@ -544,7 +544,6 @@ static int at91_adc_configure_trigger(struct iio_trigger *trig, bool state)
 {
        struct iio_dev *idev = iio_trigger_get_drvdata(trig);
        struct at91_adc_state *st = iio_priv(idev);
-       struct iio_buffer *buffer = idev->buffer;
        struct at91_adc_reg_desc *reg = st->registers;
        u32 status = at91_adc_readl(st, reg->trigger_register);
        int value;
@@ -564,7 +563,7 @@ static int at91_adc_configure_trigger(struct iio_trigger *trig, bool state)
                at91_adc_writel(st, reg->trigger_register,
                                status | value);
 
-               for_each_set_bit(bit, buffer->scan_mask,
+               for_each_set_bit(bit, idev->active_scan_mask,
                                 st->num_channels) {
                        struct iio_chan_spec const *chan = idev->channels + bit;
                        at91_adc_writel(st, AT91_ADC_CHER,
@@ -579,7 +578,7 @@ static int at91_adc_configure_trigger(struct iio_trigger *trig, bool state)
                at91_adc_writel(st, reg->trigger_register,
                                status & ~value);
 
-               for_each_set_bit(bit, buffer->scan_mask,
+               for_each_set_bit(bit, idev->active_scan_mask,
                                 st->num_channels) {
                        struct iio_chan_spec const *chan = idev->channels + bit;
                        at91_adc_writel(st, AT91_ADC_CHDR,
index 2e5cc4409f78884e82f309c729febabe3ff7f982..a0e7161f040c91daca74a469d27ff641be9ea915 100644 (file)
@@ -188,12 +188,11 @@ static int tiadc_buffer_preenable(struct iio_dev *indio_dev)
 static int tiadc_buffer_postenable(struct iio_dev *indio_dev)
 {
        struct tiadc_device *adc_dev = iio_priv(indio_dev);
-       struct iio_buffer *buffer = indio_dev->buffer;
        unsigned int enb = 0;
        u8 bit;
 
        tiadc_step_config(indio_dev);
-       for_each_set_bit(bit, buffer->scan_mask, adc_dev->channels)
+       for_each_set_bit(bit, indio_dev->active_scan_mask, adc_dev->channels)
                enb |= (get_adc_step_bit(adc_dev, bit) << 1);
        adc_dev->buffer_en_ch_steps = enb;
 
index 8ec353c01d98e02e7074df9d4d295ec19f772f09..e63b8e76d4c3d54edc25d23561f28a11afeb05e7 100644 (file)
@@ -141,9 +141,13 @@ struct vf610_adc {
        struct regulator *vref;
        struct vf610_adc_feature adc_feature;
 
+       u32 sample_freq_avail[5];
+
        struct completion completion;
 };
 
+static const u32 vf610_hw_avgs[] = { 1, 4, 8, 16, 32 };
+
 #define VF610_ADC_CHAN(_idx, _chan_type) {                     \
        .type = (_chan_type),                                   \
        .indexed = 1,                                           \
@@ -180,35 +184,47 @@ static const struct iio_chan_spec vf610_adc_iio_channels[] = {
        /* sentinel */
 };
 
-/*
- * ADC sample frequency, unit is ADCK cycles.
- * ADC clk source is ipg clock, which is the same as bus clock.
- *
- * ADC conversion time = SFCAdder + AverageNum x (BCT + LSTAdder)
- * SFCAdder: fixed to 6 ADCK cycles
- * AverageNum: 1, 4, 8, 16, 32 samples for hardware average.
- * BCT (Base Conversion Time): fixed to 25 ADCK cycles for 12 bit mode
- * LSTAdder(Long Sample Time): fixed to 3 ADCK cycles
- *
- * By default, enable 12 bit resolution mode, clock source
- * set to ipg clock, So get below frequency group:
- */
-static const u32 vf610_sample_freq_avail[5] =
-{1941176, 559332, 286957, 145374, 73171};
+static inline void vf610_adc_calculate_rates(struct vf610_adc *info)
+{
+       unsigned long adck_rate, ipg_rate = clk_get_rate(info->clk);
+       int i;
+
+       /*
+        * Calculate ADC sample frequencies
+        * Sample time unit is ADCK cycles. ADCK clk source is ipg clock,
+        * which is the same as bus clock.
+        *
+        * ADC conversion time = SFCAdder + AverageNum x (BCT + LSTAdder)
+        * SFCAdder: fixed to 6 ADCK cycles
+        * AverageNum: 1, 4, 8, 16, 32 samples for hardware average.
+        * BCT (Base Conversion Time): fixed to 25 ADCK cycles for 12 bit mode
+        * LSTAdder(Long Sample Time): fixed to 3 ADCK cycles
+        */
+       adck_rate = ipg_rate / info->adc_feature.clk_div;
+       for (i = 0; i < ARRAY_SIZE(vf610_hw_avgs); i++)
+               info->sample_freq_avail[i] =
+                       adck_rate / (6 + vf610_hw_avgs[i] * (25 + 3));
+}
 
 static inline void vf610_adc_cfg_init(struct vf610_adc *info)
 {
+       struct vf610_adc_feature *adc_feature = &info->adc_feature;
+
        /* set default Configuration for ADC controller */
-       info->adc_feature.clk_sel = VF610_ADCIOC_BUSCLK_SET;
-       info->adc_feature.vol_ref = VF610_ADCIOC_VR_VREF_SET;
+       adc_feature->clk_sel = VF610_ADCIOC_BUSCLK_SET;
+       adc_feature->vol_ref = VF610_ADCIOC_VR_VREF_SET;
+
+       adc_feature->calibration = true;
+       adc_feature->ovwren = true;
+
+       adc_feature->res_mode = 12;
+       adc_feature->sample_rate = 1;
+       adc_feature->lpm = true;
 
-       info->adc_feature.calibration = true;
-       info->adc_feature.ovwren = true;
+       /* Use a save ADCK which is below 20MHz on all devices */
+       adc_feature->clk_div = 8;
 
-       info->adc_feature.clk_div = 1;
-       info->adc_feature.res_mode = 12;
-       info->adc_feature.sample_rate = 1;
-       info->adc_feature.lpm = true;
+       vf610_adc_calculate_rates(info);
 }
 
 static void vf610_adc_cfg_post_set(struct vf610_adc *info)
@@ -290,12 +306,10 @@ static void vf610_adc_cfg_set(struct vf610_adc *info)
 
        cfg_data = readl(info->regs + VF610_REG_ADC_CFG);
 
-       /* low power configuration */
        cfg_data &= ~VF610_ADC_ADLPC_EN;
        if (adc_feature->lpm)
                cfg_data |= VF610_ADC_ADLPC_EN;
 
-       /* disable high speed */
        cfg_data &= ~VF610_ADC_ADHSC_EN;
 
        writel(cfg_data, info->regs + VF610_REG_ADC_CFG);
@@ -435,10 +449,27 @@ static irqreturn_t vf610_adc_isr(int irq, void *dev_id)
        return IRQ_HANDLED;
 }
 
-static IIO_CONST_ATTR_SAMP_FREQ_AVAIL("1941176, 559332, 286957, 145374, 73171");
+static ssize_t vf610_show_samp_freq_avail(struct device *dev,
+                               struct device_attribute *attr, char *buf)
+{
+       struct vf610_adc *info = iio_priv(dev_to_iio_dev(dev));
+       size_t len = 0;
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(info->sample_freq_avail); i++)
+               len += scnprintf(buf + len, PAGE_SIZE - len,
+                       "%u ", info->sample_freq_avail[i]);
+
+       /* replace trailing space by newline */
+       buf[len - 1] = '\n';
+
+       return len;
+}
+
+static IIO_DEV_ATTR_SAMP_FREQ_AVAIL(vf610_show_samp_freq_avail);
 
 static struct attribute *vf610_attributes[] = {
-       &iio_const_attr_sampling_frequency_available.dev_attr.attr,
+       &iio_dev_attr_sampling_frequency_available.dev_attr.attr,
        NULL
 };
 
@@ -502,7 +533,7 @@ static int vf610_read_raw(struct iio_dev *indio_dev,
                return IIO_VAL_FRACTIONAL_LOG2;
 
        case IIO_CHAN_INFO_SAMP_FREQ:
-               *val = vf610_sample_freq_avail[info->adc_feature.sample_rate];
+               *val = info->sample_freq_avail[info->adc_feature.sample_rate];
                *val2 = 0;
                return IIO_VAL_INT;
 
@@ -525,9 +556,9 @@ static int vf610_write_raw(struct iio_dev *indio_dev,
        switch (mask) {
                case IIO_CHAN_INFO_SAMP_FREQ:
                        for (i = 0;
-                               i < ARRAY_SIZE(vf610_sample_freq_avail);
+                               i < ARRAY_SIZE(info->sample_freq_avail);
                                i++)
-                               if (val == vf610_sample_freq_avail[i]) {
+                               if (val == info->sample_freq_avail[i]) {
                                        info->adc_feature.sample_rate = i;
                                        vf610_adc_sample_set(info);
                                        return 0;
index 60451b32824212a039b8ac9358751faa1af12819..ccf3ea7e1afa8ca1848937b9a8b92e1f2cefc8be 100644 (file)
@@ -822,7 +822,7 @@ static irqreturn_t bmg160_trigger_handler(int irq, void *p)
        int bit, ret, i = 0;
 
        mutex_lock(&data->mutex);
-       for_each_set_bit(bit, indio_dev->buffer->scan_mask,
+       for_each_set_bit(bit, indio_dev->active_scan_mask,
                         indio_dev->masklength) {
                ret = i2c_smbus_read_word_data(data->client,
                                               BMG160_AXIS_TO_REG(bit));
index e0017c22bb9c6ce3b4f4f9c753a8ce1364621b19..f53e9a803a0e1ec1589a5b0ad25d7aa1f8d54e70 100644 (file)
@@ -60,7 +60,7 @@ int adis_probe_trigger(struct adis *adis, struct iio_dev *indio_dev)
        iio_trigger_set_drvdata(adis->trig, adis);
        ret = iio_trigger_register(adis->trig);
 
-       indio_dev->trig = adis->trig;
+       indio_dev->trig = iio_trigger_get(adis->trig);
        if (ret)
                goto error_free_irq;
 
index d8d5bed65e072cae577968edb78e2e592c2a5bfa..ef76afe2643cb0bebe512124ca8c9326e09229c2 100644 (file)
@@ -410,42 +410,46 @@ error_read_raw:
        }
 }
 
-static int inv_mpu6050_write_fsr(struct inv_mpu6050_state *st, int fsr)
+static int inv_mpu6050_write_gyro_scale(struct inv_mpu6050_state *st, int val)
 {
-       int result;
+       int result, i;
        u8 d;
 
-       if (fsr < 0 || fsr > INV_MPU6050_MAX_GYRO_FS_PARAM)
-               return -EINVAL;
-       if (fsr == st->chip_config.fsr)
-               return 0;
+       for (i = 0; i < ARRAY_SIZE(gyro_scale_6050); ++i) {
+               if (gyro_scale_6050[i] == val) {
+                       d = (i << INV_MPU6050_GYRO_CONFIG_FSR_SHIFT);
+                       result = inv_mpu6050_write_reg(st,
+                                       st->reg->gyro_config, d);
+                       if (result)
+                               return result;
 
-       d = (fsr << INV_MPU6050_GYRO_CONFIG_FSR_SHIFT);
-       result = inv_mpu6050_write_reg(st, st->reg->gyro_config, d);
-       if (result)
-               return result;
-       st->chip_config.fsr = fsr;
+                       st->chip_config.fsr = i;
+                       return 0;
+               }
+       }
 
-       return 0;
+       return -EINVAL;
 }
 
-static int inv_mpu6050_write_accel_fs(struct inv_mpu6050_state *st, int fs)
+static int inv_mpu6050_write_accel_scale(struct inv_mpu6050_state *st, int val)
 {
-       int result;
+       int result, i;
        u8 d;
 
-       if (fs < 0 || fs > INV_MPU6050_MAX_ACCL_FS_PARAM)
-               return -EINVAL;
-       if (fs == st->chip_config.accl_fs)
-               return 0;
+       for (i = 0; i < ARRAY_SIZE(accel_scale); ++i) {
+               if (accel_scale[i] == val) {
+                       d = (i << INV_MPU6050_ACCL_CONFIG_FSR_SHIFT);
+                       result = inv_mpu6050_write_reg(st,
+                                       st->reg->accl_config, d);
+                       if (result)
+                               return result;
 
-       d = (fs << INV_MPU6050_ACCL_CONFIG_FSR_SHIFT);
-       result = inv_mpu6050_write_reg(st, st->reg->accl_config, d);
-       if (result)
-               return result;
-       st->chip_config.accl_fs = fs;
+                       st->chip_config.accl_fs = i;
+                       return 0;
+               }
+       }
 
-       return 0;
+       return -EINVAL;
 }
 
 static int inv_mpu6050_write_raw(struct iio_dev *indio_dev,
@@ -471,10 +475,10 @@ static int inv_mpu6050_write_raw(struct iio_dev *indio_dev,
        case IIO_CHAN_INFO_SCALE:
                switch (chan->type) {
                case IIO_ANGL_VEL:
-                       result = inv_mpu6050_write_fsr(st, val);
+                       result = inv_mpu6050_write_gyro_scale(st, val2);
                        break;
                case IIO_ACCEL:
-                       result = inv_mpu6050_write_accel_fs(st, val);
+                       result = inv_mpu6050_write_accel_scale(st, val2);
                        break;
                default:
                        result = -EINVAL;
index 0cd306a72a6e347391ea97d7a02d05e54b6c64ab..ba27e277511fc52585f8fa25fd20d49bca61be96 100644 (file)
 #include <linux/poll.h>
 #include "inv_mpu_iio.h"
 
+static void inv_clear_kfifo(struct inv_mpu6050_state *st)
+{
+       unsigned long flags;
+
+       /* take the spin lock sem to avoid interrupt kick in */
+       spin_lock_irqsave(&st->time_stamp_lock, flags);
+       kfifo_reset(&st->timestamps);
+       spin_unlock_irqrestore(&st->time_stamp_lock, flags);
+}
+
 int inv_reset_fifo(struct iio_dev *indio_dev)
 {
        int result;
@@ -50,6 +60,10 @@ int inv_reset_fifo(struct iio_dev *indio_dev)
                                        INV_MPU6050_BIT_FIFO_RST);
        if (result)
                goto reset_fifo_fail;
+
+       /* clear timestamps fifo */
+       inv_clear_kfifo(st);
+
        /* enable interrupt */
        if (st->chip_config.accl_fifo_enable ||
            st->chip_config.gyro_fifo_enable) {
@@ -83,16 +97,6 @@ reset_fifo_fail:
        return result;
 }
 
-static void inv_clear_kfifo(struct inv_mpu6050_state *st)
-{
-       unsigned long flags;
-
-       /* take the spin lock sem to avoid interrupt kick in */
-       spin_lock_irqsave(&st->time_stamp_lock, flags);
-       kfifo_reset(&st->timestamps);
-       spin_unlock_irqrestore(&st->time_stamp_lock, flags);
-}
-
 /**
  * inv_mpu6050_irq_handler() - Cache a timestamp at each data ready interrupt.
  */
@@ -184,7 +188,6 @@ end_session:
 flush_fifo:
        /* Flush HW and SW FIFOs. */
        inv_reset_fifo(indio_dev);
-       inv_clear_kfifo(st);
        mutex_unlock(&indio_dev->mlock);
        iio_trigger_notify_done(indio_dev->trig);
 
index 5cc3692acf377664dbf255a698bb5e64606fe864..b3a36376c719317006cf8b9d30e1369f8b6af8fb 100644 (file)
@@ -1227,7 +1227,7 @@ static irqreturn_t kmx61_trigger_handler(int irq, void *p)
                base = KMX61_MAG_XOUT_L;
 
        mutex_lock(&data->lock);
-       for_each_set_bit(bit, indio_dev->buffer->scan_mask,
+       for_each_set_bit(bit, indio_dev->active_scan_mask,
                         indio_dev->masklength) {
                ret = kmx61_read_measurement(data, base, bit);
                if (ret < 0) {
index aaba9d3d980ee623ad6b78111a9c9708198b23e0..4df97f650e448e80053e037fed6d5e208493c687 100644 (file)
@@ -847,8 +847,7 @@ static int iio_device_add_channel_sysfs(struct iio_dev *indio_dev,
  * @attr_list: List of IIO device attributes
  *
  * This function frees the memory allocated for each of the IIO device
- * attributes in the list. Note: if you want to reuse the list after calling
- * this function you have to reinitialize it using INIT_LIST_HEAD().
+ * attributes in the list.
  */
 void iio_free_chan_devattr_list(struct list_head *attr_list)
 {
@@ -856,6 +855,7 @@ void iio_free_chan_devattr_list(struct list_head *attr_list)
 
        list_for_each_entry_safe(p, n, attr_list, l) {
                kfree(p->dev_attr.attr.name);
+               list_del(&p->l);
                kfree(p);
        }
 }
@@ -936,6 +936,7 @@ static void iio_device_unregister_sysfs(struct iio_dev *indio_dev)
 
        iio_free_chan_devattr_list(&indio_dev->channel_attr_list);
        kfree(indio_dev->chan_attr_group.attrs);
+       indio_dev->chan_attr_group.attrs = NULL;
 }
 
 static void iio_dev_release(struct device *device)
index a4b397048f71f9fe2e22be46f45fca0fb06cb8ff..a99692ba91bc75fb3186f69f2d55bfc9efd6652c 100644 (file)
@@ -500,6 +500,7 @@ int iio_device_register_eventset(struct iio_dev *indio_dev)
 error_free_setup_event_lines:
        iio_free_chan_devattr_list(&indio_dev->event_interface->dev_attr_list);
        kfree(indio_dev->event_interface);
+       indio_dev->event_interface = NULL;
        return ret;
 }
 
index 74dff4e4a11acdda1ec44ec6eaec75ef4d0543e4..89fca3a7075039b9b9a0de5514c1b96758d4593b 100644 (file)
@@ -494,7 +494,7 @@ static irqreturn_t sx9500_trigger_handler(int irq, void *private)
 
        mutex_lock(&data->mutex);
 
-       for_each_set_bit(bit, indio_dev->buffer->scan_mask,
+       for_each_set_bit(bit, indio_dev->active_scan_mask,
                         indio_dev->masklength) {
                ret = sx9500_read_proximity(data, &indio_dev->channels[bit],
                                            &val);
index 6d7f453b4d05ef7da7f74aeafe22608b85dc00fc..450d1596500512c7f41174dec8d078bb15edb79a 100644 (file)
@@ -40,9 +40,9 @@
 #include <linux/slab.h>
 #include <linux/highmem.h>
 #include <linux/io.h>
-#include <linux/aio.h>
 #include <linux/jiffies.h>
 #include <linux/cpu.h>
+#include <linux/uio.h>
 #include <asm/pgtable.h>
 
 #include "ipath_kernel.h"
@@ -53,15 +53,19 @@ static int ipath_open(struct inode *, struct file *);
 static int ipath_close(struct inode *, struct file *);
 static ssize_t ipath_write(struct file *, const char __user *, size_t,
                           loff_t *);
-static ssize_t ipath_writev(struct kiocb *, const struct iovec *,
-                           unsigned long , loff_t);
+static ssize_t ipath_write_iter(struct kiocb *, struct iov_iter *from);
 static unsigned int ipath_poll(struct file *, struct poll_table_struct *);
 static int ipath_mmap(struct file *, struct vm_area_struct *);
 
+/*
+ * This is really, really weird shit - write() and writev() here
+ * have completely unrelated semantics.  Sucky userland ABI,
+ * film at 11.
+ */
 static const struct file_operations ipath_file_ops = {
        .owner = THIS_MODULE,
        .write = ipath_write,
-       .aio_write = ipath_writev,
+       .write_iter = ipath_write_iter,
        .open = ipath_open,
        .release = ipath_close,
        .poll = ipath_poll,
@@ -2414,18 +2418,17 @@ bail:
        return ret;
 }
 
-static ssize_t ipath_writev(struct kiocb *iocb, const struct iovec *iov,
-                           unsigned long dim, loff_t off)
+static ssize_t ipath_write_iter(struct kiocb *iocb, struct iov_iter *from)
 {
        struct file *filp = iocb->ki_filp;
        struct ipath_filedata *fp = filp->private_data;
        struct ipath_portdata *pd = port_fp(filp);
        struct ipath_user_sdma_queue *pq = fp->pq;
 
-       if (!dim)
+       if (!iter_is_iovec(from) || !from->nr_segs)
                return -EINVAL;
 
-       return ipath_user_sdma_writev(pd->port_dd, pq, iov, dim);
+       return ipath_user_sdma_writev(pd->port_dd, pq, from->iov, from->nr_segs);
 }
 
 static struct class *ipath_class;
index b972c0b41799b51e2f554c1abc703d78e0c4636e..976bea794b5f7726cd642cb08de4073abd5dcb58 100644 (file)
@@ -587,8 +587,9 @@ static int mlx4_ib_SET_PORT(struct mlx4_ib_dev *dev, u8 port, int reset_qkey_vio
                ((__be32 *) mailbox->buf)[1] = cpu_to_be32(cap_mask);
        }
 
-       err = mlx4_cmd(dev->dev, mailbox->dma, port, 0, MLX4_CMD_SET_PORT,
-                      MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED);
+       err = mlx4_cmd(dev->dev, mailbox->dma, port, MLX4_SET_PORT_IB_OPCODE,
+                      MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
+                      MLX4_CMD_WRAPPED);
 
        mlx4_free_cmd_mailbox(dev->dev, mailbox);
        return err;
@@ -1525,8 +1526,8 @@ static void update_gids_task(struct work_struct *work)
        memcpy(gids, gw->gids, sizeof gw->gids);
 
        err = mlx4_cmd(dev, mailbox->dma, MLX4_SET_PORT_GID_TABLE << 8 | gw->port,
-                      1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
-                      MLX4_CMD_WRAPPED);
+                      MLX4_SET_PORT_ETH_OPCODE, MLX4_CMD_SET_PORT,
+                      MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED);
        if (err)
                pr_warn("set port command failed\n");
        else
@@ -1564,7 +1565,7 @@ static void reset_gids_task(struct work_struct *work)
                                    IB_LINK_LAYER_ETHERNET) {
                err = mlx4_cmd(dev, mailbox->dma,
                               MLX4_SET_PORT_GID_TABLE << 8 | gw->port,
-                              1, MLX4_CMD_SET_PORT,
+                              MLX4_SET_PORT_ETH_OPCODE, MLX4_CMD_SET_PORT,
                               MLX4_CMD_TIME_CLASS_B,
                               MLX4_CMD_WRAPPED);
                if (err)
index 39ab0caefdf9759a230b3c0c43377394fb7036c7..66080580e24db3e90c887bcea7132e6687bc3d33 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, Mellanox Technologies inc.  All rights reserved.
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
  *
  * This software is available to you under a choice of one of two
  * licenses.  You may choose to be licensed under the terms of the GNU
index c463e7bba5f453f0303a74b88ec96275c796ee0f..2ee6b105197544abb2799e552b129d37eff53906 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, Mellanox Technologies inc.  All rights reserved.
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
  *
  * This software is available to you under a choice of one of two
  * licenses.  You may choose to be licensed under the terms of the GNU
@@ -572,11 +572,15 @@ int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
 
 int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
 {
+       struct mlx5_core_dev *mdev = to_mdev(ibcq->device)->mdev;
+       void __iomem *uar_page = mdev->priv.uuari.uars[0].map;
+
        mlx5_cq_arm(&to_mcq(ibcq)->mcq,
                    (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ?
                    MLX5_CQ_DB_REQ_NOT_SOL : MLX5_CQ_DB_REQ_NOT,
-                   to_mdev(ibcq->device)->mdev->priv.uuari.uars[0].map,
-                   MLX5_GET_DOORBELL_LOCK(&to_mdev(ibcq->device)->mdev->priv.cq_uar_lock));
+                   uar_page,
+                   MLX5_GET_DOORBELL_LOCK(&mdev->priv.cq_uar_lock),
+                   to_mcq(ibcq)->mcq.cons_index);
 
        return 0;
 }
@@ -697,8 +701,6 @@ static int create_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
 
        cq->mcq.set_ci_db  = cq->db.db;
        cq->mcq.arm_db     = cq->db.db + 1;
-       *cq->mcq.set_ci_db = 0;
-       *cq->mcq.arm_db    = 0;
        cq->mcq.cqe_sz = cqe_size;
 
        err = alloc_cq_buf(dev, &cq->buf, entries, cqe_size);
@@ -782,7 +784,7 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev, int entries,
        cq->cqe_size = cqe_size;
        cqb->ctx.cqe_sz_flags = cqe_sz_to_mlx_sz(cqe_size) << 5;
        cqb->ctx.log_sz_usr_page = cpu_to_be32((ilog2(entries) << 24) | index);
-       err = mlx5_vector2eqn(dev, vector, &eqn, &irqn);
+       err = mlx5_vector2eqn(dev->mdev, vector, &eqn, &irqn);
        if (err)
                goto err_cqb;
 
index ece028fc47d681bf7b77b1242bb46cdb1467d527..a0e4e6ddb71ac55fe79222bf181d1fe229ebd6d9 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, Mellanox Technologies inc.  All rights reserved.
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
  *
  * This software is available to you under a choice of one of two
  * licenses.  You may choose to be licensed under the terms of the GNU
index 657af9a1167cc9ee8c38889ba8cef071c8892c5b..9cf9a37bb5ff9360303a0ea9197869b5fcfbaefc 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, Mellanox Technologies inc.  All rights reserved.
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
  *
  * This software is available to you under a choice of one of two
  * licenses.  You may choose to be licensed under the terms of the GNU
index cc4ac1e583b29725af01e03e40bebaee758c6e01..57c9809e8b8774e8aac47806134216ef97c46883 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, Mellanox Technologies inc.  All rights reserved.
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
  *
  * This software is available to you under a choice of one of two
  * licenses.  You may choose to be licensed under the terms of the GNU
@@ -62,95 +62,6 @@ static char mlx5_version[] =
        DRIVER_NAME ": Mellanox Connect-IB Infiniband driver v"
        DRIVER_VERSION " (" DRIVER_RELDATE ")\n";
 
-int mlx5_vector2eqn(struct mlx5_ib_dev *dev, int vector, int *eqn, int *irqn)
-{
-       struct mlx5_eq_table *table = &dev->mdev->priv.eq_table;
-       struct mlx5_eq *eq, *n;
-       int err = -ENOENT;
-
-       spin_lock(&table->lock);
-       list_for_each_entry_safe(eq, n, &dev->eqs_list, list) {
-               if (eq->index == vector) {
-                       *eqn = eq->eqn;
-                       *irqn = eq->irqn;
-                       err = 0;
-                       break;
-               }
-       }
-       spin_unlock(&table->lock);
-
-       return err;
-}
-
-static int alloc_comp_eqs(struct mlx5_ib_dev *dev)
-{
-       struct mlx5_eq_table *table = &dev->mdev->priv.eq_table;
-       char name[MLX5_MAX_EQ_NAME];
-       struct mlx5_eq *eq, *n;
-       int ncomp_vec;
-       int nent;
-       int err;
-       int i;
-
-       INIT_LIST_HEAD(&dev->eqs_list);
-       ncomp_vec = table->num_comp_vectors;
-       nent = MLX5_COMP_EQ_SIZE;
-       for (i = 0; i < ncomp_vec; i++) {
-               eq = kzalloc(sizeof(*eq), GFP_KERNEL);
-               if (!eq) {
-                       err = -ENOMEM;
-                       goto clean;
-               }
-
-               snprintf(name, MLX5_MAX_EQ_NAME, "mlx5_comp%d", i);
-               err = mlx5_create_map_eq(dev->mdev, eq,
-                                        i + MLX5_EQ_VEC_COMP_BASE, nent, 0,
-                                        name, &dev->mdev->priv.uuari.uars[0]);
-               if (err) {
-                       kfree(eq);
-                       goto clean;
-               }
-               mlx5_ib_dbg(dev, "allocated completion EQN %d\n", eq->eqn);
-               eq->index = i;
-               spin_lock(&table->lock);
-               list_add_tail(&eq->list, &dev->eqs_list);
-               spin_unlock(&table->lock);
-       }
-
-       dev->num_comp_vectors = ncomp_vec;
-       return 0;
-
-clean:
-       spin_lock(&table->lock);
-       list_for_each_entry_safe(eq, n, &dev->eqs_list, list) {
-               list_del(&eq->list);
-               spin_unlock(&table->lock);
-               if (mlx5_destroy_unmap_eq(dev->mdev, eq))
-                       mlx5_ib_warn(dev, "failed to destroy EQ 0x%x\n", eq->eqn);
-               kfree(eq);
-               spin_lock(&table->lock);
-       }
-       spin_unlock(&table->lock);
-       return err;
-}
-
-static void free_comp_eqs(struct mlx5_ib_dev *dev)
-{
-       struct mlx5_eq_table *table = &dev->mdev->priv.eq_table;
-       struct mlx5_eq *eq, *n;
-
-       spin_lock(&table->lock);
-       list_for_each_entry_safe(eq, n, &dev->eqs_list, list) {
-               list_del(&eq->list);
-               spin_unlock(&table->lock);
-               if (mlx5_destroy_unmap_eq(dev->mdev, eq))
-                       mlx5_ib_warn(dev, "failed to destroy EQ 0x%x\n", eq->eqn);
-               kfree(eq);
-               spin_lock(&table->lock);
-       }
-       spin_unlock(&table->lock);
-}
-
 static int mlx5_ib_query_device(struct ib_device *ibdev,
                                struct ib_device_attr *props)
 {
@@ -1291,10 +1202,6 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
 
        get_ext_port_caps(dev);
 
-       err = alloc_comp_eqs(dev);
-       if (err)
-               goto err_dealloc;
-
        MLX5_INIT_DOORBELL_LOCK(&dev->uar_lock);
 
        strlcpy(dev->ib_dev.name, "mlx5_%d", IB_DEVICE_NAME_MAX);
@@ -1303,7 +1210,8 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
        dev->ib_dev.local_dma_lkey      = mdev->caps.gen.reserved_lkey;
        dev->num_ports          = mdev->caps.gen.num_ports;
        dev->ib_dev.phys_port_cnt     = dev->num_ports;
-       dev->ib_dev.num_comp_vectors    = dev->num_comp_vectors;
+       dev->ib_dev.num_comp_vectors    =
+               dev->mdev->priv.eq_table.num_comp_vectors;
        dev->ib_dev.dma_device  = &mdev->pdev->dev;
 
        dev->ib_dev.uverbs_abi_ver      = MLX5_IB_UVERBS_ABI_VERSION;
@@ -1390,13 +1298,13 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
 
        err = init_node_data(dev);
        if (err)
-               goto err_eqs;
+               goto err_dealloc;
 
        mutex_init(&dev->cap_mask_mutex);
 
        err = create_dev_resources(&dev->devr);
        if (err)
-               goto err_eqs;
+               goto err_dealloc;
 
        err = mlx5_ib_odp_init_one(dev);
        if (err)
@@ -1433,9 +1341,6 @@ err_odp:
 err_rsrc:
        destroy_dev_resources(&dev->devr);
 
-err_eqs:
-       free_comp_eqs(dev);
-
 err_dealloc:
        ib_dealloc_device((struct ib_device *)dev);
 
@@ -1450,7 +1355,6 @@ static void mlx5_ib_remove(struct mlx5_core_dev *mdev, void *context)
        destroy_umrc_res(dev);
        mlx5_ib_odp_remove_one(dev);
        destroy_dev_resources(&dev->devr);
-       free_comp_eqs(dev);
        ib_dealloc_device(&dev->ib_dev);
 }
 
@@ -1458,6 +1362,7 @@ static struct mlx5_interface mlx5_ib_interface = {
        .add            = mlx5_ib_add,
        .remove         = mlx5_ib_remove,
        .event          = mlx5_ib_event,
+       .protocol       = MLX5_INTERFACE_PROTOCOL_IB,
 };
 
 static int __init mlx5_ib_init(void)
index 611a9fdf2f383cf0982415d4603906e208f24da8..40df2cca0609a6c3dfff09e738bec5450685f8e0 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, Mellanox Technologies inc.  All rights reserved.
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
  *
  * This software is available to you under a choice of one of two
  * licenses.  You may choose to be licensed under the terms of the GNU
index 83f22fe297c8ac522efa80001819990cdc31508b..dff1cfcdf476cfed06d8835cd5316d234df09e1e 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, Mellanox Technologies inc.  All rights reserved.
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
  *
  * This software is available to you under a choice of one of two
  * licenses.  You may choose to be licensed under the terms of the GNU
@@ -421,9 +421,7 @@ struct mlx5_ib_dev {
        struct ib_device                ib_dev;
        struct mlx5_core_dev            *mdev;
        MLX5_DECLARE_DOORBELL_LOCK(uar_lock);
-       struct list_head                eqs_list;
        int                             num_ports;
-       int                             num_comp_vectors;
        /* serialize update of capability mask
         */
        struct mutex                    cap_mask_mutex;
@@ -594,7 +592,6 @@ struct ib_xrcd *mlx5_ib_alloc_xrcd(struct ib_device *ibdev,
                                          struct ib_ucontext *context,
                                          struct ib_udata *udata);
 int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd);
-int mlx5_vector2eqn(struct mlx5_ib_dev *dev, int vector, int *eqn, int *irqn);
 int mlx5_ib_get_buf_offset(u64 addr, int page_shift, u32 *offset);
 int mlx5_query_ext_port_caps(struct mlx5_ib_dev *dev, u8 port);
 int mlx5_ib_query_port(struct ib_device *ibdev, u8 port,
index cd9822eeacae3f1ab138731ea9c6a67963974bc2..71c5935838649e71a4a2f6b6cc16cb18f9a1cc16 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, Mellanox Technologies inc.  All rights reserved.
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
  *
  * This software is available to you under a choice of one of two
  * licenses.  You may choose to be licensed under the terms of the GNU
index a2c541c4809a583dc330db186593a03e01825f53..5099db08afd2c80c1b9049e3fc71fef4cfbb269b 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
  *
  * This software is available to you under a choice of one of two
  * licenses.  You may choose to be licensed under the terms of the GNU
index be0cd358b080977ed50fdc2dedcd2e0257a19a56..4d7024b899cb091a12aacfa9450af1e7750d4f45 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, Mellanox Technologies inc.  All rights reserved.
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
  *
  * This software is available to you under a choice of one of two
  * licenses.  You may choose to be licensed under the terms of the GNU
@@ -796,9 +796,6 @@ static int create_kernel_qp(struct mlx5_ib_dev *dev,
                goto err_free;
        }
 
-       qp->db.db[0] = 0;
-       qp->db.db[1] = 0;
-
        qp->sq.wrid = kmalloc(qp->sq.wqe_cnt * sizeof(*qp->sq.wrid), GFP_KERNEL);
        qp->sq.wr_data = kmalloc(qp->sq.wqe_cnt * sizeof(*qp->sq.wr_data), GFP_KERNEL);
        qp->rq.wrid = kmalloc(qp->rq.wqe_cnt * sizeof(*qp->rq.wrid), GFP_KERNEL);
@@ -1162,10 +1159,11 @@ static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp)
        in = kzalloc(sizeof(*in), GFP_KERNEL);
        if (!in)
                return;
+
        if (qp->state != IB_QPS_RESET) {
                mlx5_ib_qp_disable_pagefaults(qp);
                if (mlx5_core_qp_modify(dev->mdev, to_mlx5_state(qp->state),
-                                       MLX5_QP_STATE_RST, in, sizeof(*in), &qp->mqp))
+                                       MLX5_QP_STATE_RST, in, 0, &qp->mqp))
                        mlx5_ib_warn(dev, "mlx5_ib: modify QP %06x to RESET failed\n",
                                     qp->mqp.qpn);
        }
index 41fec66217dd3b2b7f61fcff7068f01a06493933..02d77a29764d5e1ab925423b64bdb20157fbc781 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, Mellanox Technologies inc.  All rights reserved.
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
  *
  * This software is available to you under a choice of one of two
  * licenses.  You may choose to be licensed under the terms of the GNU
@@ -165,8 +165,6 @@ static int create_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq,
                return err;
        }
 
-       *srq->db.db = 0;
-
        if (mlx5_buf_alloc(dev->mdev, buf_size, PAGE_SIZE * 2, &srq->buf)) {
                mlx5_ib_dbg(dev, "buf alloc failed\n");
                err = -ENOMEM;
index d0ba264ac1ed259ab4b58473d3ba12c2d499f4b0..76fb7b927d373ef2710d412ec015309fb24fdd72 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, Mellanox Technologies inc.  All rights reserved.
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
  *
  * This software is available to you under a choice of one of two
  * licenses.  You may choose to be licensed under the terms of the GNU
index 41937c6f888af13deadb6c7b25678cfc34596cf8..9ea6c440a00ca7012f6422f1ffd7f525b8d80548 100644 (file)
 #include <linux/vmalloc.h>
 #include <linux/highmem.h>
 #include <linux/io.h>
-#include <linux/aio.h>
 #include <linux/jiffies.h>
 #include <asm/pgtable.h>
 #include <linux/delay.h>
 #include <linux/export.h>
+#include <linux/uio.h>
 
 #include "qib.h"
 #include "qib_common.h"
 static int qib_open(struct inode *, struct file *);
 static int qib_close(struct inode *, struct file *);
 static ssize_t qib_write(struct file *, const char __user *, size_t, loff_t *);
-static ssize_t qib_aio_write(struct kiocb *, const struct iovec *,
-                            unsigned long, loff_t);
+static ssize_t qib_write_iter(struct kiocb *, struct iov_iter *);
 static unsigned int qib_poll(struct file *, struct poll_table_struct *);
 static int qib_mmapf(struct file *, struct vm_area_struct *);
 
+/*
+ * This is really, really weird shit - write() and writev() here
+ * have completely unrelated semantics.  Sucky userland ABI,
+ * film at 11.
+ */
 static const struct file_operations qib_file_ops = {
        .owner = THIS_MODULE,
        .write = qib_write,
-       .aio_write = qib_aio_write,
+       .write_iter = qib_write_iter,
        .open = qib_open,
        .release = qib_close,
        .poll = qib_poll,
@@ -2249,17 +2253,16 @@ bail:
        return ret;
 }
 
-static ssize_t qib_aio_write(struct kiocb *iocb, const struct iovec *iov,
-                            unsigned long dim, loff_t off)
+static ssize_t qib_write_iter(struct kiocb *iocb, struct iov_iter *from)
 {
        struct qib_filedata *fp = iocb->ki_filp->private_data;
        struct qib_ctxtdata *rcd = ctxt_fp(iocb->ki_filp);
        struct qib_user_sdma_queue *pq = fp->pq;
 
-       if (!dim || !pq)
+       if (!iter_is_iovec(from) || !from->nr_segs || !pq)
                return -EINVAL;
-
-       return qib_user_sdma_writev(rcd, pq, iov, dim);
+                        
+       return qib_user_sdma_writev(rcd, pq, from->iov, from->nr_segs);
 }
 
 static struct class *qib_class;
index 58b5aa3b6f2dded5d2e6d15aff080551aa9eddd9..657b89b1d291c90c76c57f8d4b8183b87c61aaad 100644 (file)
@@ -842,6 +842,13 @@ static void ipoib_set_mcast_list(struct net_device *dev)
        queue_work(ipoib_workqueue, &priv->restart_task);
 }
 
+static int ipoib_get_iflink(const struct net_device *dev)
+{
+       struct ipoib_dev_priv *priv = netdev_priv(dev);
+
+       return priv->parent->ifindex;
+}
+
 static u32 ipoib_addr_hash(struct ipoib_neigh_hash *htbl, u8 *daddr)
 {
        /*
@@ -1341,6 +1348,7 @@ static const struct net_device_ops ipoib_netdev_ops = {
        .ndo_start_xmit          = ipoib_start_xmit,
        .ndo_tx_timeout          = ipoib_timeout,
        .ndo_set_rx_mode         = ipoib_set_mcast_list,
+       .ndo_get_iflink          = ipoib_get_iflink,
 };
 
 void ipoib_setup(struct net_device *dev)
index 9fad7b5ac8b91910af7e6b1527f20165323e7dc8..4dd1313056a42169ecbe7211bc0fd750d5d9f88e 100644 (file)
@@ -102,7 +102,6 @@ int __ipoib_vlan_add(struct ipoib_dev_priv *ppriv, struct ipoib_dev_priv *priv,
        }
 
        priv->child_type  = type;
-       priv->dev->iflink = ppriv->dev->ifindex;
        list_add_tail(&priv->list, &ppriv->child_intfs);
 
        return 0;
index 1bd15ebc01f2df5002eca38f7089a61701471f5d..27bcdbc950c9fc2df9067504cfe055bf379eee78 100644 (file)
@@ -1154,10 +1154,28 @@ out:
        mutex_unlock(&alps_mutex);
 }
 
-static void alps_report_bare_ps2_packet(struct input_dev *dev,
+static void alps_report_bare_ps2_packet(struct psmouse *psmouse,
                                        unsigned char packet[],
                                        bool report_buttons)
 {
+       struct alps_data *priv = psmouse->private;
+       struct input_dev *dev;
+
+       /* Figure out which device to use to report the bare packet */
+       if (priv->proto_version == ALPS_PROTO_V2 &&
+           (priv->flags & ALPS_DUALPOINT)) {
+               /* On V2 devices the DualPoint Stick reports bare packets */
+               dev = priv->dev2;
+       } else if (unlikely(IS_ERR_OR_NULL(priv->dev3))) {
+               /* Register dev3 mouse if we received PS/2 packet first time */
+               if (!IS_ERR(priv->dev3))
+                       psmouse_queue_work(psmouse, &priv->dev3_register_work,
+                                          0);
+               return;
+       } else {
+               dev = priv->dev3;
+       }
+
        if (report_buttons)
                alps_report_buttons(dev, NULL,
                                packet[0] & 1, packet[0] & 2, packet[0] & 4);
@@ -1232,8 +1250,8 @@ static psmouse_ret_t alps_handle_interleaved_ps2(struct psmouse *psmouse)
                 * de-synchronization.
                 */
 
-               alps_report_bare_ps2_packet(priv->dev2,
-                                           &psmouse->packet[3], false);
+               alps_report_bare_ps2_packet(psmouse, &psmouse->packet[3],
+                                           false);
 
                /*
                 * Continue with the standard ALPS protocol handling,
@@ -1289,18 +1307,9 @@ static psmouse_ret_t alps_process_byte(struct psmouse *psmouse)
         * properly we only do this if the device is fully synchronized.
         */
        if (!psmouse->out_of_sync_cnt && (psmouse->packet[0] & 0xc8) == 0x08) {
-
-               /* Register dev3 mouse if we received PS/2 packet first time */
-               if (unlikely(!priv->dev3))
-                       psmouse_queue_work(psmouse,
-                                          &priv->dev3_register_work, 0);
-
                if (psmouse->pktcnt == 3) {
-                       /* Once dev3 mouse device is registered report data */
-                       if (likely(!IS_ERR_OR_NULL(priv->dev3)))
-                               alps_report_bare_ps2_packet(priv->dev3,
-                                                           psmouse->packet,
-                                                           true);
+                       alps_report_bare_ps2_packet(psmouse, psmouse->packet,
+                                                   true);
                        return PSMOUSE_FULL_PACKET;
                }
                return PSMOUSE_GOOD_DATA;
@@ -2281,10 +2290,12 @@ static int alps_set_protocol(struct psmouse *psmouse,
                priv->set_abs_params = alps_set_abs_params_mt;
                priv->nibble_commands = alps_v3_nibble_commands;
                priv->addr_command = PSMOUSE_CMD_RESET_WRAP;
-               priv->x_max = 1360;
-               priv->y_max = 660;
                priv->x_bits = 23;
                priv->y_bits = 12;
+
+               if (alps_dolphin_get_device_area(psmouse, priv))
+                       return -EIO;
+
                break;
 
        case ALPS_PROTO_V6:
@@ -2303,9 +2314,8 @@ static int alps_set_protocol(struct psmouse *psmouse,
                priv->set_abs_params = alps_set_abs_params_mt;
                priv->nibble_commands = alps_v3_nibble_commands;
                priv->addr_command = PSMOUSE_CMD_RESET_WRAP;
-
-               if (alps_dolphin_get_device_area(psmouse, priv))
-                       return -EIO;
+               priv->x_max = 0xfff;
+               priv->y_max = 0x7ff;
 
                if (priv->fw_ver[1] != 0xba)
                        priv->flags |= ALPS_BUTTONPAD;
index dda605836546847afbdbdd4c77c976134a914a2e..3b06c8a360b661f02ed3aa826e5a996b93e5b358 100644 (file)
@@ -152,6 +152,11 @@ static const struct min_max_quirk min_max_pnpid_table[] = {
                {ANY_BOARD_ID, ANY_BOARD_ID},
                1024, 5022, 2508, 4832
        },
+       {
+               (const char * const []){"LEN2006", NULL},
+               {2691, 2691},
+               1024, 5045, 2457, 4832
+       },
        {
                (const char * const []){"LEN2006", NULL},
                {ANY_BOARD_ID, ANY_BOARD_ID},
@@ -189,7 +194,7 @@ static const char * const topbuttonpad_pnp_ids[] = {
        "LEN2003",
        "LEN2004", /* L440 */
        "LEN2005",
-       "LEN2006",
+       "LEN2006", /* Edge E440/E540 */
        "LEN2007",
        "LEN2008",
        "LEN2009",
index c8ced12fa45276d365db89427273cc7de17ba322..1cfcea62aed995d701cf2f11bd882d8aae6e1407 100644 (file)
@@ -389,22 +389,49 @@ zsau_resp[] =
        {NULL,                          ZSAU_UNKNOWN}
 };
 
-/* retrieve CID from parsed response
- * returns 0 if no CID, -1 if invalid CID, or CID value 1..65535
+/* check for and remove fixed string prefix
+ * If s starts with prefix terminated by a non-alphanumeric character,
+ * return pointer to the first character after that, otherwise return NULL.
  */
-static int cid_of_response(char *s)
+static char *skip_prefix(char *s, const char *prefix)
 {
-       int cid;
-       int rc;
-
-       if (s[-1] != ';')
-               return 0;       /* no CID separator */
-       rc = kstrtoint(s, 10, &cid);
-       if (rc)
-               return 0;       /* CID not numeric */
-       if (cid < 1 || cid > 65535)
-               return -1;      /* CID out of range */
-       return cid;
+       while (*prefix)
+               if (*s++ != *prefix++)
+                       return NULL;
+       if (isalnum(*s))
+               return NULL;
+       return s;
+}
+
+/* queue event with CID */
+static void add_cid_event(struct cardstate *cs, int cid, int type,
+                         void *ptr, int parameter)
+{
+       unsigned long flags;
+       unsigned next, tail;
+       struct event_t *event;
+
+       gig_dbg(DEBUG_EVENT, "queueing event %d for cid %d", type, cid);
+
+       spin_lock_irqsave(&cs->ev_lock, flags);
+
+       tail = cs->ev_tail;
+       next = (tail + 1) % MAX_EVENTS;
+       if (unlikely(next == cs->ev_head)) {
+               dev_err(cs->dev, "event queue full\n");
+               kfree(ptr);
+       } else {
+               event = cs->events + tail;
+               event->type = type;
+               event->cid = cid;
+               event->ptr = ptr;
+               event->arg = NULL;
+               event->parameter = parameter;
+               event->at_state = NULL;
+               cs->ev_tail = next;
+       }
+
+       spin_unlock_irqrestore(&cs->ev_lock, flags);
 }
 
 /**
@@ -417,190 +444,188 @@ static int cid_of_response(char *s)
  */
 void gigaset_handle_modem_response(struct cardstate *cs)
 {
-       unsigned char *argv[MAX_REC_PARAMS + 1];
-       int params;
-       int i, j;
+       char *eoc, *psep, *ptr;
        const struct resp_type_t *rt;
        const struct zsau_resp_t *zr;
-       int curarg;
-       unsigned long flags;
-       unsigned next, tail, head;
-       struct event_t *event;
-       int resp_code;
-       int param_type;
-       int abort;
-       size_t len;
-       int cid;
-       int rawstring;
-
-       len = cs->cbytes;
-       if (!len) {
+       int cid, parameter;
+       u8 type, value;
+
+       if (!cs->cbytes) {
                /* ignore additional LFs/CRs (M10x config mode or cx100) */
                gig_dbg(DEBUG_MCMD, "skipped EOL [%02X]", cs->respdata[0]);
                return;
        }
-       cs->respdata[len] = 0;
-       argv[0] = cs->respdata;
-       params = 1;
+       cs->respdata[cs->cbytes] = 0;
+
        if (cs->at_state.getstring) {
-               /* getstring only allowed without cid at the moment */
+               /* state machine wants next line verbatim */
                cs->at_state.getstring = 0;
-               rawstring = 1;
-               cid = 0;
-       } else {
-               /* parse line */
-               for (i = 0; i < len; i++)
-                       switch (cs->respdata[i]) {
-                       case ';':
-                       case ',':
-                       case '=':
-                               if (params > MAX_REC_PARAMS) {
-                                       dev_warn(cs->dev,
-                                                "too many parameters in response\n");
-                                       /* need last parameter (might be CID) */
-                                       params--;
-                               }
-                               argv[params++] = cs->respdata + i + 1;
-                       }
-
-               rawstring = 0;
-               cid = params > 1 ? cid_of_response(argv[params - 1]) : 0;
-               if (cid < 0) {
-                       gigaset_add_event(cs, &cs->at_state, RSP_INVAL,
-                                         NULL, 0, NULL);
-                       return;
-               }
+               ptr = kstrdup(cs->respdata, GFP_ATOMIC);
+               gig_dbg(DEBUG_EVENT, "string==%s", ptr ? ptr : "NULL");
+               add_cid_event(cs, 0, RSP_STRING, ptr, 0);
+               return;
+       }
 
-               for (j = 1; j < params; ++j)
-                       argv[j][-1] = 0;
+       /* look up response type */
+       for (rt = resp_type; rt->response; ++rt) {
+               eoc = skip_prefix(cs->respdata, rt->response);
+               if (eoc)
+                       break;
+       }
+       if (!rt->response) {
+               add_cid_event(cs, 0, RSP_NONE, NULL, 0);
+               gig_dbg(DEBUG_EVENT, "unknown modem response: '%s'\n",
+                       cs->respdata);
+               return;
+       }
 
-               gig_dbg(DEBUG_EVENT, "CMD received: %s", argv[0]);
-               if (cid) {
-                       --params;
-                       gig_dbg(DEBUG_EVENT, "CID: %s", argv[params]);
-               }
-               gig_dbg(DEBUG_EVENT, "available params: %d", params - 1);
-               for (j = 1; j < params; j++)
-                       gig_dbg(DEBUG_EVENT, "param %d: %s", j, argv[j]);
+       /* check for CID */
+       psep = strrchr(cs->respdata, ';');
+       if (psep &&
+           !kstrtoint(psep + 1, 10, &cid) &&
+           cid >= 1 && cid <= 65535) {
+               /* valid CID: chop it off */
+               *psep = 0;
+       } else {
+               /* no valid CID: leave unchanged */
+               cid = 0;
        }
 
-       spin_lock_irqsave(&cs->ev_lock, flags);
-       head = cs->ev_head;
-       tail = cs->ev_tail;
+       gig_dbg(DEBUG_EVENT, "CMD received: %s", cs->respdata);
+       if (cid)
+               gig_dbg(DEBUG_EVENT, "CID: %d", cid);
 
-       abort = 1;
-       curarg = 0;
-       while (curarg < params) {
-               next = (tail + 1) % MAX_EVENTS;
-               if (unlikely(next == head)) {
-                       dev_err(cs->dev, "event queue full\n");
-                       break;
-               }
+       switch (rt->type) {
+       case RT_NOTHING:
+               /* check parameter separator */
+               if (*eoc)
+                       goto bad_param; /* extra parameter */
 
-               event = cs->events + tail;
-               event->at_state = NULL;
-               event->cid = cid;
-               event->ptr = NULL;
-               event->arg = NULL;
-               tail = next;
+               add_cid_event(cs, cid, rt->resp_code, NULL, 0);
+               break;
 
-               if (rawstring) {
-                       resp_code = RSP_STRING;
-                       param_type = RT_STRING;
-               } else {
-                       for (rt = resp_type; rt->response; ++rt)
-                               if (!strcmp(argv[curarg], rt->response))
+       case RT_RING:
+               /* check parameter separator */
+               if (!*eoc)
+                       eoc = NULL;     /* no parameter */
+               else if (*eoc++ != ',')
+                       goto bad_param;
+
+               add_cid_event(cs, 0, rt->resp_code, NULL, cid);
+
+               /* process parameters as individual responses */
+               while (eoc) {
+                       /* look up parameter type */
+                       psep = NULL;
+                       for (rt = resp_type; rt->response; ++rt) {
+                               psep = skip_prefix(eoc, rt->response);
+                               if (psep)
                                        break;
+                       }
 
-                       if (!rt->response) {
-                               event->type = RSP_NONE;
-                               gig_dbg(DEBUG_EVENT,
-                                       "unknown modem response: '%s'\n",
-                                       argv[curarg]);
-                               break;
+                       /* all legal parameters are of type RT_STRING */
+                       if (!psep || rt->type != RT_STRING) {
+                               dev_warn(cs->dev,
+                                        "illegal RING parameter: '%s'\n",
+                                        eoc);
+                               return;
                        }
 
-                       resp_code = rt->resp_code;
-                       param_type = rt->type;
-                       ++curarg;
-               }
+                       /* skip parameter value separator */
+                       if (*psep++ != '=')
+                               goto bad_param;
 
-               event->type = resp_code;
+                       /* look up end of parameter */
+                       eoc = strchr(psep, ',');
+                       if (eoc)
+                               *eoc++ = 0;
 
-               switch (param_type) {
-               case RT_NOTHING:
-                       break;
-               case RT_RING:
-                       if (!cid) {
-                               dev_err(cs->dev,
-                                       "received RING without CID!\n");
-                               event->type = RSP_INVAL;
-                               abort = 1;
-                       } else {
-                               event->cid = 0;
-                               event->parameter = cid;
-                               abort = 0;
-                       }
+                       /* retrieve parameter value */
+                       ptr = kstrdup(psep, GFP_ATOMIC);
+
+                       /* queue event */
+                       add_cid_event(cs, cid, rt->resp_code, ptr, 0);
+               }
+               break;
+
+       case RT_ZSAU:
+               /* check parameter separator */
+               if (!*eoc) {
+                       /* no parameter */
+                       add_cid_event(cs, cid, rt->resp_code, NULL, ZSAU_NONE);
                        break;
-               case RT_ZSAU:
-                       if (curarg >= params) {
-                               event->parameter = ZSAU_NONE;
+               }
+               if (*eoc++ != '=')
+                       goto bad_param;
+
+               /* look up parameter value */
+               for (zr = zsau_resp; zr->str; ++zr)
+                       if (!strcmp(eoc, zr->str))
                                break;
-                       }
-                       for (zr = zsau_resp; zr->str; ++zr)
-                               if (!strcmp(argv[curarg], zr->str))
-                                       break;
-                       event->parameter = zr->code;
-                       if (!zr->str)
-                               dev_warn(cs->dev,
-                                        "%s: unknown parameter %s after ZSAU\n",
-                                        __func__, argv[curarg]);
-                       ++curarg;
-                       break;
-               case RT_STRING:
-                       if (curarg < params) {
-                               event->ptr = kstrdup(argv[curarg], GFP_ATOMIC);
-                               if (!event->ptr)
-                                       dev_err(cs->dev, "out of memory\n");
-                               ++curarg;
-                       }
-                       gig_dbg(DEBUG_EVENT, "string==%s",
-                               event->ptr ? (char *) event->ptr : "NULL");
-                       break;
-               case RT_ZCAU:
-                       event->parameter = -1;
-                       if (curarg + 1 < params) {
-                               u8 type, value;
-
-                               i = kstrtou8(argv[curarg++], 16, &type);
-                               j = kstrtou8(argv[curarg++], 16, &value);
-                               if (i == 0 && j == 0)
-                                       event->parameter = (type << 8) | value;
-                       } else
-                               curarg = params - 1;
-                       break;
-               case RT_NUMBER:
-                       if (curarg >= params ||
-                           kstrtoint(argv[curarg++], 10, &event->parameter))
-                               event->parameter = -1;
-                       gig_dbg(DEBUG_EVENT, "parameter==%d", event->parameter);
-                       break;
+               if (!zr->str)
+                       goto bad_param;
+
+               add_cid_event(cs, cid, rt->resp_code, NULL, zr->code);
+               break;
+
+       case RT_STRING:
+               /* check parameter separator */
+               if (*eoc++ != '=')
+                       goto bad_param;
+
+               /* retrieve parameter value */
+               ptr = kstrdup(eoc, GFP_ATOMIC);
+
+               /* queue event */
+               add_cid_event(cs, cid, rt->resp_code, ptr, 0);
+               break;
+
+       case RT_ZCAU:
+               /* check parameter separators */
+               if (*eoc++ != '=')
+                       goto bad_param;
+               psep = strchr(eoc, ',');
+               if (!psep)
+                       goto bad_param;
+               *psep++ = 0;
+
+               /* decode parameter values */
+               if (kstrtou8(eoc, 16, &type) || kstrtou8(psep, 16, &value)) {
+                       *--psep = ',';
+                       goto bad_param;
                }
+               parameter = (type << 8) | value;
 
-               if (resp_code == RSP_ZDLE)
-                       cs->dle = event->parameter;
+               add_cid_event(cs, cid, rt->resp_code, NULL, parameter);
+               break;
 
-               if (abort)
-                       break;
-       }
+       case RT_NUMBER:
+               /* check parameter separator */
+               if (*eoc++ != '=')
+                       goto bad_param;
 
-       cs->ev_tail = tail;
-       spin_unlock_irqrestore(&cs->ev_lock, flags);
+               /* decode parameter value */
+               if (kstrtoint(eoc, 10, &parameter))
+                       goto bad_param;
+
+               /* special case ZDLE: set flag before queueing event */
+               if (rt->resp_code == RSP_ZDLE)
+                       cs->dle = parameter;
 
-       if (curarg != params)
-               gig_dbg(DEBUG_EVENT,
-                       "invalid number of processed parameters: %d/%d",
-                       curarg, params);
+               add_cid_event(cs, cid, rt->resp_code, NULL, parameter);
+               break;
+
+bad_param:
+               /* parameter unexpected, incomplete or malformed */
+               dev_warn(cs->dev, "bad parameter in response '%s'\n",
+                        cs->respdata);
+               add_cid_event(cs, cid, rt->resp_code, NULL, -1);
+               break;
+
+       default:
+               dev_err(cs->dev, "%s: internal error on '%s'\n",
+                       __func__, cs->respdata);
+       }
 }
 EXPORT_SYMBOL_GPL(gigaset_handle_modem_response);
 
index 94affa5e6f2895725c6f61c9d53b8b988b1a9bca..546b7e81161dd21df253aa7d5f0f41722a30c15e 100644 (file)
@@ -1951,38 +1951,6 @@ static int isdn_net_header(struct sk_buff *skb, struct net_device *dev,
        return len;
 }
 
-/* We don't need to send arp, because we have point-to-point connections. */
-static int
-isdn_net_rebuild_header(struct sk_buff *skb)
-{
-       struct net_device *dev = skb->dev;
-       isdn_net_local *lp = netdev_priv(dev);
-       int ret = 0;
-
-       if (lp->p_encap == ISDN_NET_ENCAP_ETHER) {
-               struct ethhdr *eth = (struct ethhdr *) skb->data;
-
-               /*
-                *      Only ARP/IP is currently supported
-                */
-
-               if (eth->h_proto != htons(ETH_P_IP)) {
-                       printk(KERN_WARNING
-                              "isdn_net: %s don't know how to resolve type %d addresses?\n",
-                              dev->name, (int) eth->h_proto);
-                       memcpy(eth->h_source, dev->dev_addr, dev->addr_len);
-                       return 0;
-               }
-               /*
-                *      Try to get ARP to resolve the header.
-                */
-#ifdef CONFIG_INET
-               ret = arp_find(eth->h_dest, skb);
-#endif
-       }
-       return ret;
-}
-
 static int isdn_header_cache(const struct neighbour *neigh, struct hh_cache *hh,
                             __be16 type)
 {
@@ -2005,7 +1973,6 @@ static void isdn_header_cache_update(struct hh_cache *hh,
 
 static const struct header_ops isdn_header_ops = {
        .create = isdn_net_header,
-       .rebuild = isdn_net_rebuild_header,
        .cache = isdn_header_cache,
        .cache_update = isdn_header_cache_update,
 };
index 84b35925ee4dfba74eecaefe85913ee471a74494..8dc7290089bbc29ab8f2ec752c02dc3dfb73e806 100644 (file)
@@ -112,8 +112,8 @@ mISDN_sock_cmsg(struct sock *sk, struct msghdr *msg, struct sk_buff *skb)
 }
 
 static int
-mISDN_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
-                  struct msghdr *msg, size_t len, int flags)
+mISDN_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
+                  int flags)
 {
        struct sk_buff          *skb;
        struct sock             *sk = sock->sk;
@@ -173,8 +173,7 @@ mISDN_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
 }
 
 static int
-mISDN_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
-                  struct msghdr *msg, size_t len)
+mISDN_sock_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
 {
        struct sock             *sk = sock->sk;
        struct sk_buff          *skb;
index 686d3277dad123113d5464b1f00bb4e5b3d8b069..4a77cb02dffc3edeb42cc8cff8c8ad10af38306b 100644 (file)
@@ -1190,7 +1190,6 @@ static int dvb_net_stop(struct net_device *dev)
 static const struct header_ops dvb_header_ops = {
        .create         = eth_header,
        .parse          = eth_header_parse,
-       .rebuild        = eth_rebuild_header,
 };
 
 
index c4cb9a984a5fb3965bba581eab0dd5c9096ec851..40ea639fa413a92f0239e1835894e73d36ddc1c1 100644 (file)
@@ -19,7 +19,6 @@
 #include <linux/errno.h>
 #include <linux/types.h>
 #include <linux/fcntl.h>
-#include <linux/aio.h>
 #include <linux/ioctl.h>
 #include <linux/cdev.h>
 #include <linux/list.h>
index 3c019c0e60eb859ede0621f26c4d71d72fca8abe..47680c84801c766f158bf65c2e2dd3893fdff300 100644 (file)
@@ -22,7 +22,6 @@
 #include <linux/errno.h>
 #include <linux/types.h>
 #include <linux/fcntl.h>
-#include <linux/aio.h>
 #include <linux/poll.h>
 #include <linux/init.h>
 #include <linux/ioctl.h>
index bd3039ab8f98e67e86de56ef8429dfd164c89d86..af44ee26075d8b520401a2cc52150b74d45157f0 100644 (file)
@@ -21,7 +21,6 @@
 #include <linux/errno.h>
 #include <linux/types.h>
 #include <linux/fcntl.h>
-#include <linux/aio.h>
 #include <linux/pci.h>
 #include <linux/poll.h>
 #include <linux/ioctl.h>
index 09de683c167ecf692ac5178644e46b96ac3d7853..10f71c732b5995c9121acf41724848f49994b49f 100644 (file)
@@ -104,7 +104,6 @@ EXPORT_SYMBOL(arcnet_timeout);
 static int arcnet_header(struct sk_buff *skb, struct net_device *dev,
                         unsigned short type, const void *daddr,
                         const void *saddr, unsigned len);
-static int arcnet_rebuild_header(struct sk_buff *skb);
 static int go_tx(struct net_device *dev);
 
 static int debug = ARCNET_DEBUG;
@@ -312,7 +311,6 @@ static int choose_mtu(void)
 
 static const struct header_ops arcnet_header_ops = {
        .create = arcnet_header,
-       .rebuild = arcnet_rebuild_header,
 };
 
 static const struct net_device_ops arcnet_netdev_ops = {
@@ -538,59 +536,6 @@ static int arcnet_header(struct sk_buff *skb, struct net_device *dev,
        return proto->build_header(skb, dev, type, _daddr);
 }
 
-
-/* 
- * Rebuild the ARCnet hard header. This is called after an ARP (or in the
- * future other address resolution) has completed on this sk_buff. We now
- * let ARP fill in the destination field.
- */
-static int arcnet_rebuild_header(struct sk_buff *skb)
-{
-       struct net_device *dev = skb->dev;
-       struct arcnet_local *lp = netdev_priv(dev);
-       int status = 0;         /* default is failure */
-       unsigned short type;
-       uint8_t daddr=0;
-       struct ArcProto *proto;
-       /*
-        * XXX: Why not use skb->mac_len?
-        */
-       if (skb->network_header - skb->mac_header != 2) {
-               BUGMSG(D_NORMAL,
-                      "rebuild_header: shouldn't be here! (hdrsize=%d)\n",
-                      (int)(skb->network_header - skb->mac_header));
-               return 0;
-       }
-       type = *(uint16_t *) skb_pull(skb, 2);
-       BUGMSG(D_DURING, "rebuild header for protocol %Xh\n", type);
-
-       if (type == ETH_P_IP) {
-#ifdef CONFIG_INET
-               BUGMSG(D_DURING, "rebuild header for ethernet protocol %Xh\n", type);
-               status = arp_find(&daddr, skb) ? 1 : 0;
-               BUGMSG(D_DURING, " rebuilt: dest is %d; protocol %Xh\n",
-                      daddr, type);
-#endif
-       } else {
-               BUGMSG(D_NORMAL,
-                      "I don't understand ethernet protocol %Xh addresses!\n", type);
-               dev->stats.tx_errors++;
-               dev->stats.tx_aborted_errors++;
-       }
-
-       /* if we couldn't resolve the address... give up. */
-       if (!status)
-               return 0;
-
-       /* add the _real_ header this time! */
-       proto = arc_proto_map[lp->default_proto[daddr]];
-       proto->build_header(skb, dev, type, daddr);
-
-       return 1;               /* success */
-}
-
-
-
 /* Called by the kernel in order to transmit a packet. */
 netdev_tx_t arcnet_send_packet(struct sk_buff *skb,
                                     struct net_device *dev)
index cfc4a9c1000abb109e95c3d233405ef57febe5c1..374696de796cd707c99cf150ec12349335d5f958 100644 (file)
@@ -70,6 +70,7 @@
 #define AD_PORT_STANDBY         0x80
 #define AD_PORT_SELECTED        0x100
 #define AD_PORT_MOVED           0x200
+#define AD_PORT_CHURNED         (AD_PORT_ACTOR_CHURN | AD_PORT_PARTNER_CHURN)
 
 /* Port Key definitions
  * key is determined according to the link speed, duplex and
@@ -1013,16 +1014,19 @@ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port)
        /* check if state machine should change state */
 
        /* first, check if port was reinitialized */
-       if (port->sm_vars & AD_PORT_BEGIN)
+       if (port->sm_vars & AD_PORT_BEGIN) {
                port->sm_rx_state = AD_RX_INITIALIZE;
+               port->sm_vars |= AD_PORT_CHURNED;
        /* check if port is not enabled */
-       else if (!(port->sm_vars & AD_PORT_BEGIN)
+       else if (!(port->sm_vars & AD_PORT_BEGIN)
                 && !port->is_enabled && !(port->sm_vars & AD_PORT_MOVED))
                port->sm_rx_state = AD_RX_PORT_DISABLED;
        /* check if new lacpdu arrived */
        else if (lacpdu && ((port->sm_rx_state == AD_RX_EXPIRED) ||
                 (port->sm_rx_state == AD_RX_DEFAULTED) ||
                 (port->sm_rx_state == AD_RX_CURRENT))) {
+               if (port->sm_rx_state != AD_RX_CURRENT)
+                       port->sm_vars |= AD_PORT_CHURNED;
                port->sm_rx_timer_counter = 0;
                port->sm_rx_state = AD_RX_CURRENT;
        } else {
@@ -1100,9 +1104,11 @@ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port)
                         */
                        port->partner_oper.port_state &= ~AD_STATE_SYNCHRONIZATION;
                        port->sm_vars &= ~AD_PORT_MATCHED;
+                       port->partner_oper.port_state |= AD_STATE_LACP_TIMEOUT;
                        port->partner_oper.port_state |= AD_STATE_LACP_ACTIVITY;
                        port->sm_rx_timer_counter = __ad_timer_to_ticks(AD_CURRENT_WHILE_TIMER, (u16)(AD_SHORT_TIMEOUT));
                        port->actor_oper_port_state |= AD_STATE_EXPIRED;
+                       port->sm_vars |= AD_PORT_CHURNED;
                        break;
                case AD_RX_DEFAULTED:
                        __update_default_selected(port);
@@ -1131,6 +1137,45 @@ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port)
        }
 }
 
+/**
+ * ad_churn_machine - handle port churn's state machine
+ * @port: the port we're looking at
+ *
+ */
+static void ad_churn_machine(struct port *port)
+{
+       if (port->sm_vars & AD_PORT_CHURNED) {
+               port->sm_vars &= ~AD_PORT_CHURNED;
+               port->sm_churn_actor_state = AD_CHURN_MONITOR;
+               port->sm_churn_partner_state = AD_CHURN_MONITOR;
+               port->sm_churn_actor_timer_counter =
+                       __ad_timer_to_ticks(AD_ACTOR_CHURN_TIMER, 0);
+                port->sm_churn_partner_timer_counter =
+                        __ad_timer_to_ticks(AD_PARTNER_CHURN_TIMER, 0);
+               return;
+       }
+       if (port->sm_churn_actor_timer_counter &&
+           !(--port->sm_churn_actor_timer_counter) &&
+           port->sm_churn_actor_state == AD_CHURN_MONITOR) {
+               if (port->actor_oper_port_state & AD_STATE_SYNCHRONIZATION) {
+                       port->sm_churn_actor_state = AD_NO_CHURN;
+               } else {
+                       port->churn_actor_count++;
+                       port->sm_churn_actor_state = AD_CHURN;
+               }
+       }
+       if (port->sm_churn_partner_timer_counter &&
+           !(--port->sm_churn_partner_timer_counter) &&
+           port->sm_churn_partner_state == AD_CHURN_MONITOR) {
+               if (port->partner_oper.port_state & AD_STATE_SYNCHRONIZATION) {
+                       port->sm_churn_partner_state = AD_NO_CHURN;
+               } else {
+                       port->churn_partner_count++;
+                       port->sm_churn_partner_state = AD_CHURN;
+               }
+       }
+}
+
 /**
  * ad_tx_machine - handle a port's tx state machine
  * @port: the port we're looking at
@@ -1731,7 +1776,7 @@ static void ad_initialize_port(struct port *port, int lacp_fast)
 
                port->is_enabled = true;
                /* private parameters */
-               port->sm_vars = 0x3;
+               port->sm_vars = AD_PORT_BEGIN | AD_PORT_LACP_ENABLED;
                port->sm_rx_state = 0;
                port->sm_rx_timer_counter = 0;
                port->sm_periodic_state = 0;
@@ -1745,6 +1790,13 @@ static void ad_initialize_port(struct port *port, int lacp_fast)
                port->next_port_in_aggregator = NULL;
                port->transaction_id = 0;
 
+               port->sm_churn_actor_timer_counter = 0;
+               port->sm_churn_actor_state = 0;
+               port->churn_actor_count = 0;
+               port->sm_churn_partner_timer_counter = 0;
+               port->sm_churn_partner_state = 0;
+               port->churn_partner_count = 0;
+
                memcpy(&port->lacpdu, &lacpdu, sizeof(lacpdu));
        }
 }
@@ -2164,6 +2216,7 @@ void bond_3ad_state_machine_handler(struct work_struct *work)
                ad_port_selection_logic(port, &update_slave_arr);
                ad_mux_machine(port, &update_slave_arr);
                ad_tx_machine(port);
+               ad_churn_machine(port);
 
                /* turn off the BEGIN bit, since we already handled it */
                if (port->sm_vars & AD_PORT_BEGIN)
@@ -2362,12 +2415,15 @@ void bond_3ad_handle_link_change(struct slave *slave, char link)
                port->actor_admin_port_key &= ~AD_SPEED_KEY_MASKS;
                port->actor_oper_port_key = port->actor_admin_port_key |=
                        (__get_link_speed(port) << 1);
+               if (port->actor_oper_port_key & AD_DUPLEX_KEY_MASKS)
+                       port->sm_vars |= AD_PORT_LACP_ENABLED;
        } else {
                /* link has failed */
                port->is_enabled = false;
                port->actor_admin_port_key &= ~AD_DUPLEX_KEY_MASKS;
                port->actor_oper_port_key = (port->actor_admin_port_key &=
                                             ~AD_SPEED_KEY_MASKS);
+               port->sm_vars &= ~AD_PORT_LACP_ENABLED;
        }
        netdev_dbg(slave->bond->dev, "Port %d changed link status to %s\n",
                   port->actor_port_number,
@@ -2485,6 +2541,9 @@ int bond_3ad_lacpdu_recv(const struct sk_buff *skb, struct bonding *bond,
        if (skb->protocol != PKT_TYPE_LACPDU)
                return RX_HANDLER_ANOTHER;
 
+       if (!MAC_ADDRESS_EQUAL(eth_hdr(skb)->h_dest, lacpdu_mcast_addr))
+               return RX_HANDLER_ANOTHER;
+
        lacpdu = skb_header_pointer(skb, 0, sizeof(_lacpdu), &_lacpdu);
        if (!lacpdu)
                return RX_HANDLER_ANOTHER;
index 089a4028859d121d5611d04dde0139196753fb00..78dde56ae6e6fa9dd7d04e64ae1969023d285238 100644 (file)
@@ -928,6 +928,39 @@ static inline void slave_disable_netpoll(struct slave *slave)
 
 static void bond_poll_controller(struct net_device *bond_dev)
 {
+       struct bonding *bond = netdev_priv(bond_dev);
+       struct slave *slave = NULL;
+       struct list_head *iter;
+       struct ad_info ad_info;
+       struct netpoll_info *ni;
+       const struct net_device_ops *ops;
+
+       if (BOND_MODE(bond) == BOND_MODE_8023AD)
+               if (bond_3ad_get_active_agg_info(bond, &ad_info))
+                       return;
+
+       rcu_read_lock_bh();
+       bond_for_each_slave_rcu(bond, slave, iter) {
+               ops = slave->dev->netdev_ops;
+               if (!bond_slave_is_up(slave) || !ops->ndo_poll_controller)
+                       continue;
+
+               if (BOND_MODE(bond) == BOND_MODE_8023AD) {
+                       struct aggregator *agg =
+                           SLAVE_AD_INFO(slave)->port.aggregator;
+
+                       if (agg &&
+                           agg->aggregator_identifier != ad_info.aggregator_id)
+                               continue;
+               }
+
+               ni = rcu_dereference_bh(slave->dev->npinfo);
+               if (down_trylock(&ni->dev_lock))
+                       continue;
+               ops->ndo_poll_controller(slave->dev);
+               up(&ni->dev_lock);
+       }
+       rcu_read_unlock_bh();
 }
 
 static void bond_netpoll_cleanup(struct net_device *bond_dev)
@@ -2900,6 +2933,8 @@ static int bond_slave_netdev_event(unsigned long event,
                        if (old_duplex != slave->duplex)
                                bond_3ad_adapter_duplex_changed(slave);
                }
+               /* Fallthrough */
+       case NETDEV_DOWN:
                /* Refresh slave-array if applicable!
                 * If the setup does not use miimon or arpmon (mode-specific!),
                 * then these events will not cause the slave-array to be
@@ -2911,10 +2946,6 @@ static int bond_slave_netdev_event(unsigned long event,
                if (bond_mode_uses_xmit_hash(bond))
                        bond_update_slave_arr(bond, NULL);
                break;
-       case NETDEV_DOWN:
-               if (bond_mode_uses_xmit_hash(bond))
-                       bond_update_slave_arr(bond, NULL);
-               break;
        case NETDEV_CHANGEMTU:
                /* TODO: Should slaves be allowed to
                 * independently alter their MTU?  For
@@ -4008,6 +4039,7 @@ static const struct net_device_ops bond_netdev_ops = {
        .ndo_fix_features       = bond_fix_features,
        .ndo_bridge_setlink     = ndo_dflt_netdev_switch_port_bridge_setlink,
        .ndo_bridge_dellink     = ndo_dflt_netdev_switch_port_bridge_dellink,
+       .ndo_features_check     = passthru_features_check,
 };
 
 static const struct device_type bond_type = {
index 976f5ad2a0f2d17975e0523a70c0d497b5a4ee32..62694cfc05b6548aff5c4f7186021f8c2a4ee570 100644 (file)
@@ -176,18 +176,51 @@ static void bond_info_show_slave(struct seq_file *seq,
                   slave->link_failure_count);
 
        seq_printf(seq, "Permanent HW addr: %pM\n", slave->perm_hwaddr);
+       seq_printf(seq, "Slave queue ID: %d\n", slave->queue_id);
 
        if (BOND_MODE(bond) == BOND_MODE_8023AD) {
-               const struct aggregator *agg
-                       = SLAVE_AD_INFO(slave)->port.aggregator;
+               const struct port *port = &SLAVE_AD_INFO(slave)->port;
+               const struct aggregator *agg = port->aggregator;
 
-               if (agg)
+               if (agg) {
                        seq_printf(seq, "Aggregator ID: %d\n",
                                   agg->aggregator_identifier);
-               else
+                       seq_printf(seq, "Actor Churn State: %s\n",
+                                  bond_3ad_churn_desc(port->sm_churn_actor_state));
+                       seq_printf(seq, "Partner Churn State: %s\n",
+                                  bond_3ad_churn_desc(port->sm_churn_partner_state));
+                       seq_printf(seq, "Actor Churned Count: %d\n",
+                                  port->churn_actor_count);
+                       seq_printf(seq, "Partner Churned Count: %d\n",
+                                  port->churn_partner_count);
+
+                       seq_puts(seq, "details actor lacp pdu:\n");
+                       seq_printf(seq, "    system priority: %d\n",
+                                  port->actor_system_priority);
+                       seq_printf(seq, "    port key: %d\n",
+                                  port->actor_oper_port_key);
+                       seq_printf(seq, "    port priority: %d\n",
+                                  port->actor_port_priority);
+                       seq_printf(seq, "    port number: %d\n",
+                                  port->actor_port_number);
+                       seq_printf(seq, "    port state: %d\n",
+                                  port->actor_oper_port_state);
+
+                       seq_puts(seq, "details partner lacp pdu:\n");
+                       seq_printf(seq, "    system priority: %d\n",
+                                  port->partner_oper.system_priority);
+                       seq_printf(seq, "    oper key: %d\n",
+                                  port->partner_oper.key);
+                       seq_printf(seq, "    port priority: %d\n",
+                                  port->partner_oper.port_priority);
+                       seq_printf(seq, "    port number: %d\n",
+                                  port->partner_oper.port_number);
+                       seq_printf(seq, "    port state: %d\n",
+                                  port->partner_oper.port_state);
+               } else {
                        seq_puts(seq, "Aggregator ID: N/A\n");
+               }
        }
-       seq_printf(seq, "Slave queue ID: %d\n", slave->queue_id);
 }
 
 static int bond_info_seq_show(struct seq_file *seq, void *v)
index 27bbc56de15fa37497676f8b389cdc17eced09a0..9da06537237ff220a16b3c5831c728d809d91bb1 100644 (file)
@@ -70,7 +70,6 @@ struct ser_device {
        struct tty_struct *tty;
        bool tx_started;
        unsigned long state;
-       char *tty_name;
 #ifdef CONFIG_DEBUG_FS
        struct dentry *debugfs_tty_dir;
        struct debugfs_blob_wrapper tx_blob;
index eeb4b8b6b335c293863f48fe5ac9a92f89f0127c..f4e40aa4d2a21db2be91eb39f46d1e410ac6c0e0 100644 (file)
@@ -291,13 +291,13 @@ static inline unsigned int get_tx_echo_mb(const struct at91_priv *priv)
 
 static inline u32 at91_read(const struct at91_priv *priv, enum at91_reg reg)
 {
-       return __raw_readl(priv->reg_base + reg);
+       return readl_relaxed(priv->reg_base + reg);
 }
 
 static inline void at91_write(const struct at91_priv *priv, enum at91_reg reg,
                u32 value)
 {
-       __raw_writel(value, priv->reg_base + reg);
+       writel_relaxed(value, priv->reg_base + reg);
 }
 
 static inline void set_mb_mode_prio(const struct at91_priv *priv,
index e7a6363e736b6f2b35256dfd8fb957cf62abdbe0..27ad312e7abf34bdf94a86ce66fa5ae900da1aed 100644 (file)
 #include <linux/can/dev.h>
 #include <linux/can/error.h>
 
-#include <asm/bfin_can.h>
 #include <asm/portmux.h>
 
 #define DRV_NAME "bfin_can"
 #define BFIN_CAN_TIMEOUT 100
 #define TX_ECHO_SKB_MAX  1
 
+/* transmit and receive channels */
+#define TRANSMIT_CHL 24
+#define RECEIVE_STD_CHL 0
+#define RECEIVE_EXT_CHL 4
+#define RECEIVE_RTR_CHL 8
+#define RECEIVE_EXT_RTR_CHL 12
+#define MAX_CHL_NUMBER 32
+
+/* All Blackfin system MMRs are padded to 32bits even if the register
+ * itself is only 16bits.  So use a helper macro to streamline this
+ */
+#define __BFP(m) u16 m; u16 __pad_##m
+
+/* bfin can registers layout */
+struct bfin_can_mask_regs {
+       __BFP(aml);
+       __BFP(amh);
+};
+
+struct bfin_can_channel_regs {
+       /* data[0,2,4,6] -> data{0,1,2,3} while data[1,3,5,7] is padding */
+       u16 data[8];
+       __BFP(dlc);
+       __BFP(tsv);
+       __BFP(id0);
+       __BFP(id1);
+};
+
+struct bfin_can_regs {
+       /* global control and status registers */
+       __BFP(mc1);             /* offset 0x00 */
+       __BFP(md1);             /* offset 0x04 */
+       __BFP(trs1);            /* offset 0x08 */
+       __BFP(trr1);            /* offset 0x0c */
+       __BFP(ta1);             /* offset 0x10 */
+       __BFP(aa1);             /* offset 0x14 */
+       __BFP(rmp1);            /* offset 0x18 */
+       __BFP(rml1);            /* offset 0x1c */
+       __BFP(mbtif1);          /* offset 0x20 */
+       __BFP(mbrif1);          /* offset 0x24 */
+       __BFP(mbim1);           /* offset 0x28 */
+       __BFP(rfh1);            /* offset 0x2c */
+       __BFP(opss1);           /* offset 0x30 */
+       u32 __pad1[3];
+       __BFP(mc2);             /* offset 0x40 */
+       __BFP(md2);             /* offset 0x44 */
+       __BFP(trs2);            /* offset 0x48 */
+       __BFP(trr2);            /* offset 0x4c */
+       __BFP(ta2);             /* offset 0x50 */
+       __BFP(aa2);             /* offset 0x54 */
+       __BFP(rmp2);            /* offset 0x58 */
+       __BFP(rml2);            /* offset 0x5c */
+       __BFP(mbtif2);          /* offset 0x60 */
+       __BFP(mbrif2);          /* offset 0x64 */
+       __BFP(mbim2);           /* offset 0x68 */
+       __BFP(rfh2);            /* offset 0x6c */
+       __BFP(opss2);           /* offset 0x70 */
+       u32 __pad2[3];
+       __BFP(clock);           /* offset 0x80 */
+       __BFP(timing);          /* offset 0x84 */
+       __BFP(debug);           /* offset 0x88 */
+       __BFP(status);          /* offset 0x8c */
+       __BFP(cec);             /* offset 0x90 */
+       __BFP(gis);             /* offset 0x94 */
+       __BFP(gim);             /* offset 0x98 */
+       __BFP(gif);             /* offset 0x9c */
+       __BFP(control);         /* offset 0xa0 */
+       __BFP(intr);            /* offset 0xa4 */
+       __BFP(version);         /* offset 0xa8 */
+       __BFP(mbtd);            /* offset 0xac */
+       __BFP(ewr);             /* offset 0xb0 */
+       __BFP(esr);             /* offset 0xb4 */
+       u32 __pad3[2];
+       __BFP(ucreg);           /* offset 0xc0 */
+       __BFP(uccnt);           /* offset 0xc4 */
+       __BFP(ucrc);            /* offset 0xc8 */
+       __BFP(uccnf);           /* offset 0xcc */
+       u32 __pad4[1];
+       __BFP(version2);        /* offset 0xd4 */
+       u32 __pad5[10];
+
+       /* channel(mailbox) mask and message registers */
+       struct bfin_can_mask_regs msk[MAX_CHL_NUMBER];          /* offset 0x100 */
+       struct bfin_can_channel_regs chl[MAX_CHL_NUMBER];       /* offset 0x200 */
+};
+
+#undef __BFP
+
+#define SRS 0x0001             /* Software Reset */
+#define SER 0x0008             /* Stuff Error */
+#define BOIM 0x0008            /* Enable Bus Off Interrupt */
+#define CCR 0x0080             /* CAN Configuration Mode Request */
+#define CCA 0x0080             /* Configuration Mode Acknowledge */
+#define SAM 0x0080             /* Sampling */
+#define AME 0x8000             /* Acceptance Mask Enable */
+#define RMLIM 0x0080           /* Enable RX Message Lost Interrupt */
+#define RMLIS 0x0080           /* RX Message Lost IRQ Status */
+#define RTR 0x4000             /* Remote Frame Transmission Request */
+#define BOIS 0x0008            /* Bus Off IRQ Status */
+#define IDE 0x2000             /* Identifier Extension */
+#define EPIS 0x0004            /* Error-Passive Mode IRQ Status */
+#define EPIM 0x0004            /* Enable Error-Passive Mode Interrupt */
+#define EWTIS 0x0001           /* TX Error Count IRQ Status */
+#define EWRIS 0x0002           /* RX Error Count IRQ Status */
+#define BEF 0x0040             /* Bit Error Flag */
+#define FER 0x0080             /* Form Error Flag */
+#define SMR 0x0020             /* Sleep Mode Request */
+#define SMACK 0x0008           /* Sleep Mode Acknowledge */
+
 /*
  * bfin can private data
  */
@@ -78,8 +186,8 @@ static int bfin_can_set_bittiming(struct net_device *dev)
        if (priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES)
                timing |= SAM;
 
-       bfin_write(&reg->clock, clk);
-       bfin_write(&reg->timing, timing);
+       writew(clk, &reg->clock);
+       writew(timing, &reg->timing);
 
        netdev_info(dev, "setting CLOCK=0x%04x TIMING=0x%04x\n", clk, timing);
 
@@ -94,16 +202,14 @@ static void bfin_can_set_reset_mode(struct net_device *dev)
        int i;
 
        /* disable interrupts */
-       bfin_write(&reg->mbim1, 0);
-       bfin_write(&reg->mbim2, 0);
-       bfin_write(&reg->gim, 0);
+       writew(0, &reg->mbim1);
+       writew(0, &reg->mbim2);
+       writew(0, &reg->gim);
 
        /* reset can and enter configuration mode */
-       bfin_write(&reg->control, SRS | CCR);
-       SSYNC();
-       bfin_write(&reg->control, CCR);
-       SSYNC();
-       while (!(bfin_read(&reg->control) & CCA)) {
+       writew(SRS | CCR, &reg->control);
+       writew(CCR, &reg->control);
+       while (!(readw(&reg->control) & CCA)) {
                udelay(10);
                if (--timeout == 0) {
                        netdev_err(dev, "fail to enter configuration mode\n");
@@ -116,34 +222,33 @@ static void bfin_can_set_reset_mode(struct net_device *dev)
         * by writing to CAN Mailbox Configuration Registers 1 and 2
         * For all bits: 0 - Mailbox disabled, 1 - Mailbox enabled
         */
-       bfin_write(&reg->mc1, 0);
-       bfin_write(&reg->mc2, 0);
+       writew(0, &reg->mc1);
+       writew(0, &reg->mc2);
 
        /* Set Mailbox Direction */
-       bfin_write(&reg->md1, 0xFFFF);   /* mailbox 1-16 are RX */
-       bfin_write(&reg->md2, 0);   /* mailbox 17-32 are TX */
+       writew(0xFFFF, &reg->md1);   /* mailbox 1-16 are RX */
+       writew(0, &reg->md2);   /* mailbox 17-32 are TX */
 
        /* RECEIVE_STD_CHL */
        for (i = 0; i < 2; i++) {
-               bfin_write(&reg->chl[RECEIVE_STD_CHL + i].id0, 0);
-               bfin_write(&reg->chl[RECEIVE_STD_CHL + i].id1, AME);
-               bfin_write(&reg->chl[RECEIVE_STD_CHL + i].dlc, 0);
-               bfin_write(&reg->msk[RECEIVE_STD_CHL + i].amh, 0x1FFF);
-               bfin_write(&reg->msk[RECEIVE_STD_CHL + i].aml, 0xFFFF);
+               writew(0, &reg->chl[RECEIVE_STD_CHL + i].id0);
+               writew(AME, &reg->chl[RECEIVE_STD_CHL + i].id1);
+               writew(0, &reg->chl[RECEIVE_STD_CHL + i].dlc);
+               writew(0x1FFF, &reg->msk[RECEIVE_STD_CHL + i].amh);
+               writew(0xFFFF, &reg->msk[RECEIVE_STD_CHL + i].aml);
        }
 
        /* RECEIVE_EXT_CHL */
        for (i = 0; i < 2; i++) {
-               bfin_write(&reg->chl[RECEIVE_EXT_CHL + i].id0, 0);
-               bfin_write(&reg->chl[RECEIVE_EXT_CHL + i].id1, AME | IDE);
-               bfin_write(&reg->chl[RECEIVE_EXT_CHL + i].dlc, 0);
-               bfin_write(&reg->msk[RECEIVE_EXT_CHL + i].amh, 0x1FFF);
-               bfin_write(&reg->msk[RECEIVE_EXT_CHL + i].aml, 0xFFFF);
+               writew(0, &reg->chl[RECEIVE_EXT_CHL + i].id0);
+               writew(AME | IDE, &reg->chl[RECEIVE_EXT_CHL + i].id1);
+               writew(0, &reg->chl[RECEIVE_EXT_CHL + i].dlc);
+               writew(0x1FFF, &reg->msk[RECEIVE_EXT_CHL + i].amh);
+               writew(0xFFFF, &reg->msk[RECEIVE_EXT_CHL + i].aml);
        }
 
-       bfin_write(&reg->mc2, BIT(TRANSMIT_CHL - 16));
-       bfin_write(&reg->mc1, BIT(RECEIVE_STD_CHL) + BIT(RECEIVE_EXT_CHL));
-       SSYNC();
+       writew(BIT(TRANSMIT_CHL - 16), &reg->mc2);
+       writew(BIT(RECEIVE_STD_CHL) + BIT(RECEIVE_EXT_CHL), &reg->mc1);
 
        priv->can.state = CAN_STATE_STOPPED;
 }
@@ -157,9 +262,9 @@ static void bfin_can_set_normal_mode(struct net_device *dev)
        /*
         * leave configuration mode
         */
-       bfin_write(&reg->control, bfin_read(&reg->control) & ~CCR);
+       writew(readw(&reg->control) & ~CCR, &reg->control);
 
-       while (bfin_read(&reg->status) & CCA) {
+       while (readw(&reg->status) & CCA) {
                udelay(10);
                if (--timeout == 0) {
                        netdev_err(dev, "fail to leave configuration mode\n");
@@ -170,26 +275,25 @@ static void bfin_can_set_normal_mode(struct net_device *dev)
        /*
         * clear _All_  tx and rx interrupts
         */
-       bfin_write(&reg->mbtif1, 0xFFFF);
-       bfin_write(&reg->mbtif2, 0xFFFF);
-       bfin_write(&reg->mbrif1, 0xFFFF);
-       bfin_write(&reg->mbrif2, 0xFFFF);
+       writew(0xFFFF, &reg->mbtif1);
+       writew(0xFFFF, &reg->mbtif2);
+       writew(0xFFFF, &reg->mbrif1);
+       writew(0xFFFF, &reg->mbrif2);
 
        /*
         * clear global interrupt status register
         */
-       bfin_write(&reg->gis, 0x7FF); /* overwrites with '1' */
+       writew(0x7FF, &reg->gis); /* overwrites with '1' */
 
        /*
         * Initialize Interrupts
         * - set bits in the mailbox interrupt mask register
         * - global interrupt mask
         */
-       bfin_write(&reg->mbim1, BIT(RECEIVE_STD_CHL) + BIT(RECEIVE_EXT_CHL));
-       bfin_write(&reg->mbim2, BIT(TRANSMIT_CHL - 16));
+       writew(BIT(RECEIVE_STD_CHL) + BIT(RECEIVE_EXT_CHL), &reg->mbim1);
+       writew(BIT(TRANSMIT_CHL - 16), &reg->mbim2);
 
-       bfin_write(&reg->gim, EPIM | BOIM | RMLIM);
-       SSYNC();
+       writew(EPIM | BOIM | RMLIM, &reg->gim);
 }
 
 static void bfin_can_start(struct net_device *dev)
@@ -226,7 +330,7 @@ static int bfin_can_get_berr_counter(const struct net_device *dev,
        struct bfin_can_priv *priv = netdev_priv(dev);
        struct bfin_can_regs __iomem *reg = priv->membase;
 
-       u16 cec = bfin_read(&reg->cec);
+       u16 cec = readw(&reg->cec);
 
        bec->txerr = cec >> 8;
        bec->rxerr = cec;
@@ -252,28 +356,28 @@ static int bfin_can_start_xmit(struct sk_buff *skb, struct net_device *dev)
 
        /* fill id */
        if (id & CAN_EFF_FLAG) {
-               bfin_write(&reg->chl[TRANSMIT_CHL].id0, id);
+               writew(id, &reg->chl[TRANSMIT_CHL].id0);
                val = ((id & 0x1FFF0000) >> 16) | IDE;
        } else
                val = (id << 2);
        if (id & CAN_RTR_FLAG)
                val |= RTR;
-       bfin_write(&reg->chl[TRANSMIT_CHL].id1, val | AME);
+       writew(val | AME, &reg->chl[TRANSMIT_CHL].id1);
 
        /* fill payload */
        for (i = 0; i < 8; i += 2) {
                val = ((7 - i) < dlc ? (data[7 - i]) : 0) +
                        ((6 - i) < dlc ? (data[6 - i] << 8) : 0);
-               bfin_write(&reg->chl[TRANSMIT_CHL].data[i], val);
+               writew(val, &reg->chl[TRANSMIT_CHL].data[i]);
        }
 
        /* fill data length code */
-       bfin_write(&reg->chl[TRANSMIT_CHL].dlc, dlc);
+       writew(dlc, &reg->chl[TRANSMIT_CHL].dlc);
 
        can_put_echo_skb(skb, dev, 0);
 
        /* set transmit request */
-       bfin_write(&reg->trs2, BIT(TRANSMIT_CHL - 16));
+       writew(BIT(TRANSMIT_CHL - 16), &reg->trs2);
 
        return 0;
 }
@@ -296,26 +400,26 @@ static void bfin_can_rx(struct net_device *dev, u16 isrc)
        /* get id */
        if (isrc & BIT(RECEIVE_EXT_CHL)) {
                /* extended frame format (EFF) */
-               cf->can_id = ((bfin_read(&reg->chl[RECEIVE_EXT_CHL].id1)
+               cf->can_id = ((readw(&reg->chl[RECEIVE_EXT_CHL].id1)
                             & 0x1FFF) << 16)
-                            + bfin_read(&reg->chl[RECEIVE_EXT_CHL].id0);
+                            + readw(&reg->chl[RECEIVE_EXT_CHL].id0);
                cf->can_id |= CAN_EFF_FLAG;
                obj = RECEIVE_EXT_CHL;
        } else {
                /* standard frame format (SFF) */
-               cf->can_id = (bfin_read(&reg->chl[RECEIVE_STD_CHL].id1)
+               cf->can_id = (readw(&reg->chl[RECEIVE_STD_CHL].id1)
                             & 0x1ffc) >> 2;
                obj = RECEIVE_STD_CHL;
        }
-       if (bfin_read(&reg->chl[obj].id1) & RTR)
+       if (readw(&reg->chl[obj].id1) & RTR)
                cf->can_id |= CAN_RTR_FLAG;
 
        /* get data length code */
-       cf->can_dlc = get_can_dlc(bfin_read(&reg->chl[obj].dlc) & 0xF);
+       cf->can_dlc = get_can_dlc(readw(&reg->chl[obj].dlc) & 0xF);
 
        /* get payload */
        for (i = 0; i < 8; i += 2) {
-               val = bfin_read(&reg->chl[obj].data[i]);
+               val = readw(&reg->chl[obj].data[i]);
                cf->data[7 - i] = (7 - i) < cf->can_dlc ? val : 0;
                cf->data[6 - i] = (6 - i) < cf->can_dlc ? (val >> 8) : 0;
        }
@@ -369,7 +473,7 @@ static int bfin_can_err(struct net_device *dev, u16 isrc, u16 status)
 
        if (state != priv->can.state && (state == CAN_STATE_ERROR_WARNING ||
                                state == CAN_STATE_ERROR_PASSIVE)) {
-               u16 cec = bfin_read(&reg->cec);
+               u16 cec = readw(&reg->cec);
                u8 rxerr = cec;
                u8 txerr = cec >> 8;
 
@@ -420,23 +524,23 @@ static irqreturn_t bfin_can_interrupt(int irq, void *dev_id)
        struct net_device_stats *stats = &dev->stats;
        u16 status, isrc;
 
-       if ((irq == priv->tx_irq) && bfin_read(&reg->mbtif2)) {
+       if ((irq == priv->tx_irq) && readw(&reg->mbtif2)) {
                /* transmission complete interrupt */
-               bfin_write(&reg->mbtif2, 0xFFFF);
+               writew(0xFFFF, &reg->mbtif2);
                stats->tx_packets++;
-               stats->tx_bytes += bfin_read(&reg->chl[TRANSMIT_CHL].dlc);
+               stats->tx_bytes += readw(&reg->chl[TRANSMIT_CHL].dlc);
                can_get_echo_skb(dev, 0);
                netif_wake_queue(dev);
-       } else if ((irq == priv->rx_irq) && bfin_read(&reg->mbrif1)) {
+       } else if ((irq == priv->rx_irq) && readw(&reg->mbrif1)) {
                /* receive interrupt */
-               isrc = bfin_read(&reg->mbrif1);
-               bfin_write(&reg->mbrif1, 0xFFFF);
+               isrc = readw(&reg->mbrif1);
+               writew(0xFFFF, &reg->mbrif1);
                bfin_can_rx(dev, isrc);
-       } else if ((irq == priv->err_irq) && bfin_read(&reg->gis)) {
+       } else if ((irq == priv->err_irq) && readw(&reg->gis)) {
                /* error interrupt */
-               isrc = bfin_read(&reg->gis);
-               status = bfin_read(&reg->esr);
-               bfin_write(&reg->gis, 0x7FF);
+               isrc = readw(&reg->gis);
+               status = readw(&reg->esr);
+               writew(0x7FF, &reg->gis);
                bfin_can_err(dev, isrc, status);
        } else {
                return IRQ_NONE;
@@ -556,16 +660,10 @@ static int bfin_can_probe(struct platform_device *pdev)
                goto exit;
        }
 
-       if (!request_mem_region(res_mem->start, resource_size(res_mem),
-                               dev_name(&pdev->dev))) {
-               err = -EBUSY;
-               goto exit;
-       }
-
        /* request peripheral pins */
        err = peripheral_request_list(pdata, dev_name(&pdev->dev));
        if (err)
-               goto exit_mem_release;
+               goto exit;
 
        dev = alloc_bfin_candev();
        if (!dev) {
@@ -574,7 +672,13 @@ static int bfin_can_probe(struct platform_device *pdev)
        }
 
        priv = netdev_priv(dev);
-       priv->membase = (void __iomem *)res_mem->start;
+
+       priv->membase = devm_ioremap_resource(&pdev->dev, res_mem);
+       if (IS_ERR(priv->membase)) {
+               err = PTR_ERR(priv->membase);
+               goto exit_peri_pin_free;
+       }
+
        priv->rx_irq = rx_irq->start;
        priv->tx_irq = tx_irq->start;
        priv->err_irq = err_irq->start;
@@ -606,8 +710,6 @@ exit_candev_free:
        free_candev(dev);
 exit_peri_pin_free:
        peripheral_free_list(pdata);
-exit_mem_release:
-       release_mem_region(res_mem->start, resource_size(res_mem));
 exit:
        return err;
 }
@@ -616,15 +718,11 @@ static int bfin_can_remove(struct platform_device *pdev)
 {
        struct net_device *dev = platform_get_drvdata(pdev);
        struct bfin_can_priv *priv = netdev_priv(dev);
-       struct resource *res;
 
        bfin_can_set_reset_mode(dev);
 
        unregister_candev(dev);
 
-       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       release_mem_region(res->start, resource_size(res));
-
        peripheral_free_list(priv->pin_list);
 
        free_candev(dev);
@@ -641,9 +739,8 @@ static int bfin_can_suspend(struct platform_device *pdev, pm_message_t mesg)
 
        if (netif_running(dev)) {
                /* enter sleep mode */
-               bfin_write(&reg->control, bfin_read(&reg->control) | SMR);
-               SSYNC();
-               while (!(bfin_read(&reg->intr) & SMACK)) {
+               writew(readw(&reg->control) | SMR, &reg->control);
+               while (!(readw(&reg->intr) & SMACK)) {
                        udelay(10);
                        if (--timeout == 0) {
                                netdev_err(dev, "fail to enter sleep mode\n");
@@ -663,8 +760,7 @@ static int bfin_can_resume(struct platform_device *pdev)
 
        if (netif_running(dev)) {
                /* leave sleep mode */
-               bfin_write(&reg->intr, 0);
-               SSYNC();
+               writew(0, &reg->intr);
        }
 
        return 0;
index b1e8851d3cc4cae2b35d2d853f48866bad1922cd..866e5e12fdd27c5f2c73436df486540293f85f2e 100644 (file)
@@ -254,7 +254,7 @@ static int cc770_platform_remove(struct platform_device *pdev)
        return 0;
 }
 
-static struct of_device_id cc770_platform_table[] = {
+static const struct of_device_id cc770_platform_table[] = {
        {.compatible = "bosch,cc770"}, /* CC770 from Bosch */
        {.compatible = "intc,82527"},  /* AN82527 from Intel CP */
        {},
index fed1bbd0b0d29ec45fd3ece838c8a093bb46fc9b..e3d7e22a4fa080504544a245d9e21adb80aea036 100644 (file)
@@ -1725,7 +1725,7 @@ static int grcan_remove(struct platform_device *ofdev)
        return 0;
 }
 
-static struct of_device_id grcan_match[] = {
+static const struct of_device_id grcan_match[] = {
        {.name = "GAISLER_GRCAN"},
        {.name = "01_03d"},
        {.name = "GAISLER_GRHCAN"},
index ab7f1b01be499a50266d5998c404f15b06cbdad6..c1b667675fa1becc2e9e190c6423eb7f3c10b181 100644 (file)
@@ -30,20 +30,28 @@ void can_led_event(struct net_device *netdev, enum can_led_event event)
        case CAN_LED_EVENT_OPEN:
                led_trigger_event(priv->tx_led_trig, LED_FULL);
                led_trigger_event(priv->rx_led_trig, LED_FULL);
+               led_trigger_event(priv->rxtx_led_trig, LED_FULL);
                break;
        case CAN_LED_EVENT_STOP:
                led_trigger_event(priv->tx_led_trig, LED_OFF);
                led_trigger_event(priv->rx_led_trig, LED_OFF);
+               led_trigger_event(priv->rxtx_led_trig, LED_OFF);
                break;
        case CAN_LED_EVENT_TX:
-               if (led_delay)
+               if (led_delay) {
                        led_trigger_blink_oneshot(priv->tx_led_trig,
                                                  &led_delay, &led_delay, 1);
+                       led_trigger_blink_oneshot(priv->rxtx_led_trig,
+                                                 &led_delay, &led_delay, 1);
+               }
                break;
        case CAN_LED_EVENT_RX:
-               if (led_delay)
+               if (led_delay) {
                        led_trigger_blink_oneshot(priv->rx_led_trig,
                                                  &led_delay, &led_delay, 1);
+                       led_trigger_blink_oneshot(priv->rxtx_led_trig,
+                                                 &led_delay, &led_delay, 1);
+               }
                break;
        }
 }
@@ -55,6 +63,7 @@ static void can_led_release(struct device *gendev, void *res)
 
        led_trigger_unregister_simple(priv->tx_led_trig);
        led_trigger_unregister_simple(priv->rx_led_trig);
+       led_trigger_unregister_simple(priv->rxtx_led_trig);
 }
 
 /* Register CAN LED triggers for a CAN device
@@ -76,11 +85,15 @@ void devm_can_led_init(struct net_device *netdev)
                 "%s-tx", netdev->name);
        snprintf(priv->rx_led_trig_name, sizeof(priv->rx_led_trig_name),
                 "%s-rx", netdev->name);
+       snprintf(priv->rxtx_led_trig_name, sizeof(priv->rxtx_led_trig_name),
+                "%s-rxtx", netdev->name);
 
        led_trigger_register_simple(priv->tx_led_trig_name,
                                    &priv->tx_led_trig);
        led_trigger_register_simple(priv->rx_led_trig_name,
                                    &priv->rx_led_trig);
+       led_trigger_register_simple(priv->rxtx_led_trig_name,
+                                   &priv->rxtx_led_trig);
 
        devres_add(&netdev->dev, res);
 }
@@ -97,7 +110,7 @@ static int can_led_notifier(struct notifier_block *nb, unsigned long msg,
        if (!priv)
                return NOTIFY_DONE;
 
-       if (!priv->tx_led_trig || !priv->rx_led_trig)
+       if (!priv->tx_led_trig || !priv->rx_led_trig || !priv->rxtx_led_trig)
                return NOTIFY_DONE;
 
        if (msg == NETDEV_CHANGENAME) {
@@ -106,6 +119,9 @@ static int can_led_notifier(struct notifier_block *nb, unsigned long msg,
 
                snprintf(name, sizeof(name), "%s-rx", netdev->name);
                led_trigger_rename_static(name, priv->rx_led_trig);
+
+               snprintf(name, sizeof(name), "%s-rxtx", netdev->name);
+               led_trigger_rename_static(name, priv->rxtx_led_trig);
        }
 
        return NOTIFY_DONE;
index 2e04b3aeeb374101b54cfe4dc0506df3a1f3420d..ef655177bb5e5b62a7be1f821d08a88226ba7b84 100644 (file)
@@ -312,8 +312,8 @@ static inline u32 m_can_fifo_read(const struct m_can_priv *priv,
 static inline void m_can_fifo_write(const struct m_can_priv *priv,
                                    u32 fpi, unsigned int offset, u32 val)
 {
-       return writel(val, priv->mram_base + priv->mcfg[MRAM_TXB].off +
-                     fpi * TXB_ELEMENT_SIZE + offset);
+       writel(val, priv->mram_base + priv->mcfg[MRAM_TXB].off +
+              fpi * TXB_ELEMENT_SIZE + offset);
 }
 
 static inline void m_can_config_endisable(const struct m_can_priv *priv,
index ad024e60ba8c332e1a42f58fa3b1f1d25a3523d9..c7427bdd3a4bff957aaef3fdb1d2f8ed0ead41cb 100644 (file)
@@ -43,7 +43,7 @@ struct mpc5xxx_can_data {
 };
 
 #ifdef CONFIG_PPC_MPC52xx
-static struct of_device_id mpc52xx_cdm_ids[] = {
+static const struct of_device_id mpc52xx_cdm_ids[] = {
        { .compatible = "fsl,mpc5200-cdm", },
        {}
 };
index 93115250eaf527a0206c2981580ff3c89ffd6473..0552ed46a206c5af03785bb63adbf3c2b0b43f69 100644 (file)
@@ -242,7 +242,7 @@ static int sp_remove(struct platform_device *pdev)
        return 0;
 }
 
-static struct of_device_id sp_of_table[] = {
+static const struct of_device_id sp_of_table[] = {
        {.compatible = "nxp,sja1000"},
        {},
 };
index 9376f5e5b94ed2956c85808c5642ccbdb34036bd..866bac0ae7e966855d1085f5a8735988832a1bb7 100644 (file)
@@ -123,7 +123,7 @@ MODULE_LICENSE("GPL v2");
  * CPC_MSG_TYPE_EXT_CAN_FRAME or CPC_MSG_TYPE_EXT_RTR_FRAME.
  */
 struct cpc_can_msg {
-       u32 id;
+       __le32 id;
        u8 length;
        u8 msg[8];
 };
@@ -200,8 +200,8 @@ struct __packed ems_cpc_msg {
        u8 type;        /* type of message */
        u8 length;      /* length of data within union 'msg' */
        u8 msgid;       /* confirmation handle */
-       u32 ts_sec;     /* timestamp in seconds */
-       u32 ts_nsec;    /* timestamp in nano seconds */
+       __le32 ts_sec;  /* timestamp in seconds */
+       __le32 ts_nsec; /* timestamp in nano seconds */
 
        union {
                u8 generic[64];
@@ -765,7 +765,7 @@ static netdev_tx_t ems_usb_start_xmit(struct sk_buff *skb, struct net_device *ne
 
        msg = (struct ems_cpc_msg *)&buf[CPC_HEADER_SIZE];
 
-       msg->msg.can_msg.id = cf->can_id & CAN_ERR_MASK;
+       msg->msg.can_msg.id = cpu_to_le32(cf->can_id & CAN_ERR_MASK);
        msg->msg.can_msg.length = cf->can_dlc;
 
        if (cf->can_id & CAN_RTR_FLAG) {
@@ -783,9 +783,6 @@ static netdev_tx_t ems_usb_start_xmit(struct sk_buff *skb, struct net_device *ne
                msg->length = CPC_CAN_MSG_MIN_SIZE + cf->can_dlc;
        }
 
-       /* Respect byte order */
-       msg->msg.can_msg.id = cpu_to_le32(msg->msg.can_msg.id);
-
        for (i = 0; i < MAX_TX_URBS; i++) {
                if (dev->tx_contexts[i].echo_index == MAX_TX_URBS) {
                        context = &dev->tx_contexts[i];
index bacca0bd89c1ffc515bb352b73d4801de30c9f4a..411c1af92c62c1c93985e4adf7232d1b6b78d8bc 100644 (file)
@@ -139,7 +139,7 @@ struct tx_msg {
        u8 cmd;
        u8 net;
        u8 dlc;
-       __le32 hnd;
+       u32 hnd;        /* opaque handle, not used by device */
        __le32 id; /* upper 3 bits contain flags */
        u8 data[8];
 };
@@ -149,7 +149,7 @@ struct tx_done_msg {
        u8 cmd;
        u8 net;
        u8 status;
-       __le32 hnd;
+       u32 hnd;        /* opaque handle, not used by device */
        __le32 ts;
 };
 
index 57611fd91229a4580987b3519d56b8913f185411..4643914859b2c7894f7556cfe023e1903eeb9cc6 100644 (file)
@@ -1867,7 +1867,7 @@ static void kvaser_usb_remove_interfaces(struct kvaser_usb *dev)
                if (!dev->nets[i])
                        continue;
 
-               unregister_netdev(dev->nets[i]->netdev);
+               unregister_candev(dev->nets[i]->netdev);
        }
 
        kvaser_usb_unlink_all_urbs(dev);
index a9221ad9f1a0a0387de1ebf8470c0d360e895773..09d14e70abd746b7c17ff0cac6ee6832e9d8771f 100644 (file)
@@ -182,7 +182,7 @@ static inline void *pcan_usb_fd_cmd_buffer(struct peak_usb_device *dev)
 static int pcan_usb_fd_send_cmd(struct peak_usb_device *dev, void *cmd_tail)
 {
        void *cmd_head = pcan_usb_fd_cmd_buffer(dev);
-       int err;
+       int err = 0;
        u8 *packet_ptr;
        int i, n = 1, packet_len;
        ptrdiff_t cmd_len;
index 6c67643122859f1a37c0eb953d2d5a506f7e3c47..6bddfe062b516467b6cc2be75a53c17c2040b2d7 100644 (file)
@@ -1185,7 +1185,7 @@ static int xcan_remove(struct platform_device *pdev)
 }
 
 /* Match table for OF platform binding */
-static struct of_device_id xcan_of_match[] = {
+static const struct of_device_id xcan_of_match[] = {
        { .compatible = "xlnx,zynq-can-1.0", },
        { .compatible = "xlnx,axi-can-1.00.a", },
        { /* end of list */ },
index 48e62a34f7f27e6a8d4e1669f24c7bfef9709b09..18550c7ebe6f1beb225ba9c56717d14aacf45896 100644 (file)
@@ -7,7 +7,7 @@ config NET_DSA_MV88E6XXX
 
 config NET_DSA_MV88E6060
        tristate "Marvell 88E6060 ethernet switch chip support"
-       select NET_DSA
+       depends on NET_DSA
        select NET_DSA_TAG_TRAILER
        ---help---
          This enables support for the Marvell 88E6060 ethernet switch
@@ -19,7 +19,7 @@ config NET_DSA_MV88E6XXX_NEED_PPU
 
 config NET_DSA_MV88E6131
        tristate "Marvell 88E6085/6095/6095F/6131 ethernet switch chip support"
-       select NET_DSA
+       depends on NET_DSA
        select NET_DSA_MV88E6XXX
        select NET_DSA_MV88E6XXX_NEED_PPU
        select NET_DSA_TAG_DSA
@@ -29,7 +29,7 @@ config NET_DSA_MV88E6131
 
 config NET_DSA_MV88E6123_61_65
        tristate "Marvell 88E6123/6161/6165 ethernet switch chip support"
-       select NET_DSA
+       depends on NET_DSA
        select NET_DSA_MV88E6XXX
        select NET_DSA_TAG_EDSA
        ---help---
@@ -38,7 +38,7 @@ config NET_DSA_MV88E6123_61_65
 
 config NET_DSA_MV88E6171
        tristate "Marvell 88E6171/6172 ethernet switch chip support"
-       select NET_DSA
+       depends on NET_DSA
        select NET_DSA_MV88E6XXX
        select NET_DSA_TAG_EDSA
        ---help---
@@ -47,7 +47,7 @@ config NET_DSA_MV88E6171
 
 config NET_DSA_MV88E6352
        tristate "Marvell 88E6176/88E6352 ethernet switch chip support"
-       select NET_DSA
+       depends on NET_DSA
        select NET_DSA_MV88E6XXX
        select NET_DSA_TAG_EDSA
        ---help---
@@ -56,8 +56,7 @@ config NET_DSA_MV88E6352
 
 config NET_DSA_BCM_SF2
        tristate "Broadcom Starfighter 2 Ethernet switch support"
-       depends on HAS_IOMEM
-       select NET_DSA
+       depends on HAS_IOMEM && NET_DSA
        select NET_DSA_TAG_BRCM
        select FIXED_PHY
        select BCM7XXX_PHY
index 4daffb2849319df132eccb5068a9164fe60b916d..cedb572bf25af58d202a8550b003753f00219f3b 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/of_address.h>
 #include <net/dsa.h>
 #include <linux/ethtool.h>
+#include <linux/if_bridge.h>
 
 #include "bcm_sf2.h"
 #include "bcm_sf2_regs.h"
@@ -299,10 +300,14 @@ static int bcm_sf2_port_setup(struct dsa_switch *ds, int port,
        if (port == 7)
                intrl2_1_mask_clear(priv, P_IRQ_MASK(P7_IRQ_OFF));
 
-       /* Set this port, and only this one to be in the default VLAN */
+       /* Set this port, and only this one to be in the default VLAN,
+        * if member of a bridge, restore its membership prior to
+        * bringing down this port.
+        */
        reg = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(port));
        reg &= ~PORT_VLAN_CTRL_MASK;
        reg |= (1 << port);
+       reg |= priv->port_sts[port].vlan_ctl_mask;
        core_writel(priv, reg, CORE_PORT_VLAN_CTL_PORT(port));
 
        bcm_sf2_imp_vlan_setup(ds, cpu_port);
@@ -400,6 +405,151 @@ static int bcm_sf2_sw_set_eee(struct dsa_switch *ds, int port,
        return 0;
 }
 
+/* Fast-ageing of ARL entries for a given port, equivalent to an ARL
+ * flush for that port.
+ */
+static int bcm_sf2_sw_fast_age_port(struct dsa_switch  *ds, int port)
+{
+       struct bcm_sf2_priv *priv = ds_to_priv(ds);
+       unsigned int timeout = 1000;
+       u32 reg;
+
+       core_writel(priv, port, CORE_FAST_AGE_PORT);
+
+       reg = core_readl(priv, CORE_FAST_AGE_CTRL);
+       reg |= EN_AGE_PORT | FAST_AGE_STR_DONE;
+       core_writel(priv, reg, CORE_FAST_AGE_CTRL);
+
+       do {
+               reg = core_readl(priv, CORE_FAST_AGE_CTRL);
+               if (!(reg & FAST_AGE_STR_DONE))
+                       break;
+
+               cpu_relax();
+       } while (timeout--);
+
+       if (!timeout)
+               return -ETIMEDOUT;
+
+       return 0;
+}
+
+static int bcm_sf2_sw_br_join(struct dsa_switch *ds, int port,
+                             u32 br_port_mask)
+{
+       struct bcm_sf2_priv *priv = ds_to_priv(ds);
+       unsigned int i;
+       u32 reg, p_ctl;
+
+       p_ctl = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(port));
+
+       for (i = 0; i < priv->hw_params.num_ports; i++) {
+               if (!((1 << i) & br_port_mask))
+                       continue;
+
+               /* Add this local port to the remote port VLAN control
+                * membership and update the remote port bitmask
+                */
+               reg = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(i));
+               reg |= 1 << port;
+               core_writel(priv, reg, CORE_PORT_VLAN_CTL_PORT(i));
+               priv->port_sts[i].vlan_ctl_mask = reg;
+
+               p_ctl |= 1 << i;
+       }
+
+       /* Configure the local port VLAN control membership to include
+        * remote ports and update the local port bitmask
+        */
+       core_writel(priv, p_ctl, CORE_PORT_VLAN_CTL_PORT(port));
+       priv->port_sts[port].vlan_ctl_mask = p_ctl;
+
+       return 0;
+}
+
+static int bcm_sf2_sw_br_leave(struct dsa_switch *ds, int port,
+                              u32 br_port_mask)
+{
+       struct bcm_sf2_priv *priv = ds_to_priv(ds);
+       unsigned int i;
+       u32 reg, p_ctl;
+
+       p_ctl = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(port));
+
+       for (i = 0; i < priv->hw_params.num_ports; i++) {
+               /* Don't touch the remaining ports */
+               if (!((1 << i) & br_port_mask))
+                       continue;
+
+               reg = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(i));
+               reg &= ~(1 << port);
+               core_writel(priv, reg, CORE_PORT_VLAN_CTL_PORT(i));
+               priv->port_sts[port].vlan_ctl_mask = reg;
+
+               /* Prevent self removal to preserve isolation */
+               if (port != i)
+                       p_ctl &= ~(1 << i);
+       }
+
+       core_writel(priv, p_ctl, CORE_PORT_VLAN_CTL_PORT(port));
+       priv->port_sts[port].vlan_ctl_mask = p_ctl;
+
+       return 0;
+}
+
+static int bcm_sf2_sw_br_set_stp_state(struct dsa_switch *ds, int port,
+                                      u8 state)
+{
+       struct bcm_sf2_priv *priv = ds_to_priv(ds);
+       u8 hw_state, cur_hw_state;
+       int ret = 0;
+       u32 reg;
+
+       reg = core_readl(priv, CORE_G_PCTL_PORT(port));
+       cur_hw_state = reg >> G_MISTP_STATE_SHIFT;
+
+       switch (state) {
+       case BR_STATE_DISABLED:
+               hw_state = G_MISTP_DIS_STATE;
+               break;
+       case BR_STATE_LISTENING:
+               hw_state = G_MISTP_LISTEN_STATE;
+               break;
+       case BR_STATE_LEARNING:
+               hw_state = G_MISTP_LEARN_STATE;
+               break;
+       case BR_STATE_FORWARDING:
+               hw_state = G_MISTP_FWD_STATE;
+               break;
+       case BR_STATE_BLOCKING:
+               hw_state = G_MISTP_BLOCK_STATE;
+               break;
+       default:
+               pr_err("%s: invalid STP state: %d\n", __func__, state);
+               return -EINVAL;
+       }
+
+       /* Fast-age ARL entries if we are moving a port from Learning or
+        * Forwarding state to Disabled, Blocking or Listening state
+        */
+       if (cur_hw_state != hw_state) {
+               if (cur_hw_state & 4 && !(hw_state & 4)) {
+                       ret = bcm_sf2_sw_fast_age_port(ds, port);
+                       if (ret) {
+                               pr_err("%s: fast-ageing failed\n", __func__);
+                               return ret;
+                       }
+               }
+       }
+
+       reg = core_readl(priv, CORE_G_PCTL_PORT(port));
+       reg &= ~(G_MISTP_STATE_MASK << G_MISTP_STATE_SHIFT);
+       reg |= hw_state;
+       core_writel(priv, reg, CORE_G_PCTL_PORT(port));
+
+       return 0;
+}
+
 static irqreturn_t bcm_sf2_switch_0_isr(int irq, void *dev_id)
 {
        struct bcm_sf2_priv *priv = dev_id;
@@ -916,6 +1066,9 @@ static struct dsa_switch_driver bcm_sf2_switch_driver = {
        .port_disable           = bcm_sf2_port_disable,
        .get_eee                = bcm_sf2_sw_get_eee,
        .set_eee                = bcm_sf2_sw_set_eee,
+       .port_join_bridge       = bcm_sf2_sw_br_join,
+       .port_leave_bridge      = bcm_sf2_sw_br_leave,
+       .port_stp_update        = bcm_sf2_sw_br_set_stp_state,
 };
 
 static int __init bcm_sf2_init(void)
index 7b7053d3c5fad20e07a75eb5ea987743b51484ba..22e2ebf313332f4dd004162b31faf288d7f7ab25 100644 (file)
@@ -46,6 +46,8 @@ struct bcm_sf2_port_status {
        unsigned int link;
 
        struct ethtool_eee eee;
+
+       u32 vlan_ctl_mask;
 };
 
 struct bcm_sf2_priv {
index cabdfa5e217af7fcb4d7dbfb37a141a987d6fd57..fa4e6e78c9ea75526bec5d6ed7356053753fb6cb 100644 (file)
 #define  EN_CHIP_RST                   (1 << 6)
 #define  EN_SW_RESET                   (1 << 4)
 
+#define CORE_FAST_AGE_CTRL             0x00220
+#define  EN_FAST_AGE_STATIC            (1 << 0)
+#define  EN_AGE_DYNAMIC                        (1 << 1)
+#define  EN_AGE_PORT                   (1 << 2)
+#define  EN_AGE_VLAN                   (1 << 3)
+#define  EN_AGE_SPT                    (1 << 4)
+#define  EN_AGE_MCAST                  (1 << 5)
+#define  FAST_AGE_STR_DONE             (1 << 7)
+
+#define CORE_FAST_AGE_PORT             0x00224
+#define  AGE_PORT_MASK                 0xf
+
+#define CORE_FAST_AGE_VID              0x00228
+#define  AGE_VID_MASK                  0x3fff
+
 #define CORE_LNKSTS                    0x00400
 #define  LNK_STS_MASK                  0x1ff
 
index e9c736e1cef3a20cd599f6347be20de73127a0ae..b4af6d5aff7cc970773f55c3f9a8fedbe3c06f1f 100644 (file)
@@ -25,66 +25,33 @@ static char *mv88e6123_61_65_probe(struct device *host_dev, int sw_addr)
        if (bus == NULL)
                return NULL;
 
-       ret = __mv88e6xxx_reg_read(bus, sw_addr, REG_PORT(0), 0x03);
+       ret = __mv88e6xxx_reg_read(bus, sw_addr, REG_PORT(0), PORT_SWITCH_ID);
        if (ret >= 0) {
-               if (ret == 0x1212)
+               if (ret == PORT_SWITCH_ID_6123_A1)
                        return "Marvell 88E6123 (A1)";
-               if (ret == 0x1213)
+               if (ret == PORT_SWITCH_ID_6123_A2)
                        return "Marvell 88E6123 (A2)";
-               if ((ret & 0xfff0) == 0x1210)
+               if ((ret & 0xfff0) == PORT_SWITCH_ID_6123)
                        return "Marvell 88E6123";
 
-               if (ret == 0x1612)
+               if (ret == PORT_SWITCH_ID_6161_A1)
                        return "Marvell 88E6161 (A1)";
-               if (ret == 0x1613)
+               if (ret == PORT_SWITCH_ID_6161_A2)
                        return "Marvell 88E6161 (A2)";
-               if ((ret & 0xfff0) == 0x1610)
+               if ((ret & 0xfff0) == PORT_SWITCH_ID_6161)
                        return "Marvell 88E6161";
 
-               if (ret == 0x1652)
+               if (ret == PORT_SWITCH_ID_6165_A1)
                        return "Marvell 88E6165 (A1)";
-               if (ret == 0x1653)
+               if (ret == PORT_SWITCH_ID_6165_A2)
                        return "Marvell 88e6165 (A2)";
-               if ((ret & 0xfff0) == 0x1650)
+               if ((ret & 0xfff0) == PORT_SWITCH_ID_6165)
                        return "Marvell 88E6165";
        }
 
        return NULL;
 }
 
-static int mv88e6123_61_65_switch_reset(struct dsa_switch *ds)
-{
-       int i;
-       int ret;
-       unsigned long timeout;
-
-       /* Set all ports to the disabled state. */
-       for (i = 0; i < 8; i++) {
-               ret = REG_READ(REG_PORT(i), 0x04);
-               REG_WRITE(REG_PORT(i), 0x04, ret & 0xfffc);
-       }
-
-       /* Wait for transmit queues to drain. */
-       usleep_range(2000, 4000);
-
-       /* Reset the switch. */
-       REG_WRITE(REG_GLOBAL, 0x04, 0xc400);
-
-       /* Wait up to one second for reset to complete. */
-       timeout = jiffies + 1 * HZ;
-       while (time_before(jiffies, timeout)) {
-               ret = REG_READ(REG_GLOBAL, 0x00);
-               if ((ret & 0xc800) == 0xc800)
-                       break;
-
-               usleep_range(1000, 2000);
-       }
-       if (time_after(jiffies, timeout))
-               return -ETIMEDOUT;
-
-       return 0;
-}
-
 static int mv88e6123_61_65_setup_global(struct dsa_switch *ds)
 {
        int ret;
@@ -222,28 +189,6 @@ static int mv88e6123_61_65_setup_port(struct dsa_switch *ds, int p)
                val |= 0x000c;
        REG_WRITE(addr, 0x04, val);
 
-       /* Port Control 1: disable trunking.  Also, if this is the
-        * CPU port, enable learn messages to be sent to this port.
-        */
-       REG_WRITE(addr, 0x05, dsa_is_cpu_port(ds, p) ? 0x8000 : 0x0000);
-
-       /* Port based VLAN map: give each port its own address
-        * database, allow the CPU port to talk to each of the 'real'
-        * ports, and allow each of the 'real' ports to only talk to
-        * the upstream port.
-        */
-       val = (p & 0xf) << 12;
-       if (dsa_is_cpu_port(ds, p))
-               val |= ds->phys_port_mask;
-       else
-               val |= 1 << dsa_upstream_port(ds);
-       REG_WRITE(addr, 0x06, val);
-
-       /* Default VLAN ID and priority: don't set a default VLAN
-        * ID, and set the default packet priority to zero.
-        */
-       REG_WRITE(addr, 0x07, 0x0000);
-
        /* Port Control 2: don't force a good FCS, set the maximum
         * frame size to 10240 bytes, don't let the switch add or
         * strip 802.1q tags, don't discard tagged or untagged frames
@@ -288,7 +233,7 @@ static int mv88e6123_61_65_setup_port(struct dsa_switch *ds, int p)
         */
        REG_WRITE(addr, 0x19, 0x7654);
 
-       return 0;
+       return mv88e6xxx_setup_port_common(ds, p);
 }
 
 static int mv88e6123_61_65_setup(struct dsa_switch *ds)
@@ -297,11 +242,23 @@ static int mv88e6123_61_65_setup(struct dsa_switch *ds)
        int i;
        int ret;
 
-       mutex_init(&ps->smi_mutex);
-       mutex_init(&ps->stats_mutex);
-       mutex_init(&ps->phy_mutex);
+       ret = mv88e6xxx_setup_common(ds);
+       if (ret < 0)
+               return ret;
+
+       switch (ps->id) {
+       case PORT_SWITCH_ID_6123:
+               ps->num_ports = 3;
+               break;
+       case PORT_SWITCH_ID_6161:
+       case PORT_SWITCH_ID_6165:
+               ps->num_ports = 6;
+               break;
+       default:
+               return -ENODEV;
+       }
 
-       ret = mv88e6123_61_65_switch_reset(ds);
+       ret = mv88e6xxx_switch_reset(ds, false);
        if (ret < 0)
                return ret;
 
@@ -311,7 +268,7 @@ static int mv88e6123_61_65_setup(struct dsa_switch *ds)
        if (ret < 0)
                return ret;
 
-       for (i = 0; i < 6; i++) {
+       for (i = 0; i < ps->num_ports; i++) {
                ret = mv88e6123_61_65_setup_port(ds, i);
                if (ret < 0)
                        return ret;
@@ -320,108 +277,18 @@ static int mv88e6123_61_65_setup(struct dsa_switch *ds)
        return 0;
 }
 
-static int mv88e6123_61_65_port_to_phy_addr(int port)
-{
-       if (port >= 0 && port <= 4)
-               return port;
-       return -1;
-}
-
-static int
-mv88e6123_61_65_phy_read(struct dsa_switch *ds, int port, int regnum)
-{
-       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
-       int addr = mv88e6123_61_65_port_to_phy_addr(port);
-       int ret;
-
-       mutex_lock(&ps->phy_mutex);
-       ret = mv88e6xxx_phy_read(ds, addr, regnum);
-       mutex_unlock(&ps->phy_mutex);
-       return ret;
-}
-
-static int
-mv88e6123_61_65_phy_write(struct dsa_switch *ds,
-                             int port, int regnum, u16 val)
-{
-       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
-       int addr = mv88e6123_61_65_port_to_phy_addr(port);
-       int ret;
-
-       mutex_lock(&ps->phy_mutex);
-       ret = mv88e6xxx_phy_write(ds, addr, regnum, val);
-       mutex_unlock(&ps->phy_mutex);
-       return ret;
-}
-
-static struct mv88e6xxx_hw_stat mv88e6123_61_65_hw_stats[] = {
-       { "in_good_octets", 8, 0x00, },
-       { "in_bad_octets", 4, 0x02, },
-       { "in_unicast", 4, 0x04, },
-       { "in_broadcasts", 4, 0x06, },
-       { "in_multicasts", 4, 0x07, },
-       { "in_pause", 4, 0x16, },
-       { "in_undersize", 4, 0x18, },
-       { "in_fragments", 4, 0x19, },
-       { "in_oversize", 4, 0x1a, },
-       { "in_jabber", 4, 0x1b, },
-       { "in_rx_error", 4, 0x1c, },
-       { "in_fcs_error", 4, 0x1d, },
-       { "out_octets", 8, 0x0e, },
-       { "out_unicast", 4, 0x10, },
-       { "out_broadcasts", 4, 0x13, },
-       { "out_multicasts", 4, 0x12, },
-       { "out_pause", 4, 0x15, },
-       { "excessive", 4, 0x11, },
-       { "collisions", 4, 0x1e, },
-       { "deferred", 4, 0x05, },
-       { "single", 4, 0x14, },
-       { "multiple", 4, 0x17, },
-       { "out_fcs_error", 4, 0x03, },
-       { "late", 4, 0x1f, },
-       { "hist_64bytes", 4, 0x08, },
-       { "hist_65_127bytes", 4, 0x09, },
-       { "hist_128_255bytes", 4, 0x0a, },
-       { "hist_256_511bytes", 4, 0x0b, },
-       { "hist_512_1023bytes", 4, 0x0c, },
-       { "hist_1024_max_bytes", 4, 0x0d, },
-       { "sw_in_discards", 4, 0x110, },
-       { "sw_in_filtered", 2, 0x112, },
-       { "sw_out_filtered", 2, 0x113, },
-};
-
-static void
-mv88e6123_61_65_get_strings(struct dsa_switch *ds, int port, uint8_t *data)
-{
-       mv88e6xxx_get_strings(ds, ARRAY_SIZE(mv88e6123_61_65_hw_stats),
-                             mv88e6123_61_65_hw_stats, port, data);
-}
-
-static void
-mv88e6123_61_65_get_ethtool_stats(struct dsa_switch *ds,
-                                 int port, uint64_t *data)
-{
-       mv88e6xxx_get_ethtool_stats(ds, ARRAY_SIZE(mv88e6123_61_65_hw_stats),
-                                   mv88e6123_61_65_hw_stats, port, data);
-}
-
-static int mv88e6123_61_65_get_sset_count(struct dsa_switch *ds)
-{
-       return ARRAY_SIZE(mv88e6123_61_65_hw_stats);
-}
-
 struct dsa_switch_driver mv88e6123_61_65_switch_driver = {
        .tag_protocol           = DSA_TAG_PROTO_EDSA,
        .priv_size              = sizeof(struct mv88e6xxx_priv_state),
        .probe                  = mv88e6123_61_65_probe,
        .setup                  = mv88e6123_61_65_setup,
        .set_addr               = mv88e6xxx_set_addr_indirect,
-       .phy_read               = mv88e6123_61_65_phy_read,
-       .phy_write              = mv88e6123_61_65_phy_write,
+       .phy_read               = mv88e6xxx_phy_read,
+       .phy_write              = mv88e6xxx_phy_write,
        .poll_link              = mv88e6xxx_poll_link,
-       .get_strings            = mv88e6123_61_65_get_strings,
-       .get_ethtool_stats      = mv88e6123_61_65_get_ethtool_stats,
-       .get_sset_count         = mv88e6123_61_65_get_sset_count,
+       .get_strings            = mv88e6xxx_get_strings,
+       .get_ethtool_stats      = mv88e6xxx_get_ethtool_stats,
+       .get_sset_count         = mv88e6xxx_get_sset_count,
 #ifdef CONFIG_NET_DSA_HWMON
        .get_temp               = mv88e6xxx_get_temp,
 #endif
index 2540ef0142afd68f5dfb489850bbd9ad64f3fdc2..e54824fa0d959f919586c7ec07cbb678a40a031a 100644 (file)
 #include <net/dsa.h>
 #include "mv88e6xxx.h"
 
-/* Switch product IDs */
-#define ID_6085                0x04a0
-#define ID_6095                0x0950
-#define ID_6131                0x1060
-#define ID_6131_B2     0x1066
-
 static char *mv88e6131_probe(struct device *host_dev, int sw_addr)
 {
        struct mii_bus *bus = dsa_host_dev_to_mii_bus(host_dev);
@@ -31,56 +25,23 @@ static char *mv88e6131_probe(struct device *host_dev, int sw_addr)
        if (bus == NULL)
                return NULL;
 
-       ret = __mv88e6xxx_reg_read(bus, sw_addr, REG_PORT(0), 0x03);
+       ret = __mv88e6xxx_reg_read(bus, sw_addr, REG_PORT(0), PORT_SWITCH_ID);
        if (ret >= 0) {
                int ret_masked = ret & 0xfff0;
 
-               if (ret_masked == ID_6085)
+               if (ret_masked == PORT_SWITCH_ID_6085)
                        return "Marvell 88E6085";
-               if (ret_masked == ID_6095)
+               if (ret_masked == PORT_SWITCH_ID_6095)
                        return "Marvell 88E6095/88E6095F";
-               if (ret == ID_6131_B2)
+               if (ret == PORT_SWITCH_ID_6131_B2)
                        return "Marvell 88E6131 (B2)";
-               if (ret_masked == ID_6131)
+               if (ret_masked == PORT_SWITCH_ID_6131)
                        return "Marvell 88E6131";
        }
 
        return NULL;
 }
 
-static int mv88e6131_switch_reset(struct dsa_switch *ds)
-{
-       int i;
-       int ret;
-       unsigned long timeout;
-
-       /* Set all ports to the disabled state. */
-       for (i = 0; i < 11; i++) {
-               ret = REG_READ(REG_PORT(i), 0x04);
-               REG_WRITE(REG_PORT(i), 0x04, ret & 0xfffc);
-       }
-
-       /* Wait for transmit queues to drain. */
-       usleep_range(2000, 4000);
-
-       /* Reset the switch. */
-       REG_WRITE(REG_GLOBAL, 0x04, 0xc400);
-
-       /* Wait up to one second for reset to complete. */
-       timeout = jiffies + 1 * HZ;
-       while (time_before(jiffies, timeout)) {
-               ret = REG_READ(REG_GLOBAL, 0x00);
-               if ((ret & 0xc800) == 0xc800)
-                       break;
-
-               usleep_range(1000, 2000);
-       }
-       if (time_after(jiffies, timeout))
-               return -ETIMEDOUT;
-
-       return 0;
-}
-
 static int mv88e6131_setup_global(struct dsa_switch *ds)
 {
        int ret;
@@ -174,7 +135,7 @@ static int mv88e6131_setup_port(struct dsa_switch *ds, int p)
         * (100 Mb/s on 6085) full duplex.
         */
        if (dsa_is_cpu_port(ds, p) || ds->dsa_port_mask & (1 << p))
-               if (ps->id == ID_6085)
+               if (ps->id == PORT_SWITCH_ID_6085)
                        REG_WRITE(addr, 0x01, 0x003d); /* 100 Mb/s */
                else
                        REG_WRITE(addr, 0x01, 0x003e); /* 1000 Mb/s */
@@ -201,35 +162,13 @@ static int mv88e6131_setup_port(struct dsa_switch *ds, int p)
                /* On 6085, unknown multicast forward is controlled
                 * here rather than in Port Control 2 register.
                 */
-               if (ps->id == ID_6085)
+               if (ps->id == PORT_SWITCH_ID_6085)
                        val |= 0x0008;
        }
        if (ds->dsa_port_mask & (1 << p))
                val |= 0x0100;
        REG_WRITE(addr, 0x04, val);
 
-       /* Port Control 1: disable trunking.  Also, if this is the
-        * CPU port, enable learn messages to be sent to this port.
-        */
-       REG_WRITE(addr, 0x05, dsa_is_cpu_port(ds, p) ? 0x8000 : 0x0000);
-
-       /* Port based VLAN map: give each port its own address
-        * database, allow the CPU port to talk to each of the 'real'
-        * ports, and allow each of the 'real' ports to only talk to
-        * the upstream port.
-        */
-       val = (p & 0xf) << 12;
-       if (dsa_is_cpu_port(ds, p))
-               val |= ds->phys_port_mask;
-       else
-               val |= 1 << dsa_upstream_port(ds);
-       REG_WRITE(addr, 0x06, val);
-
-       /* Default VLAN ID and priority: don't set a default VLAN
-        * ID, and set the default packet priority to zero.
-        */
-       REG_WRITE(addr, 0x07, 0x0000);
-
        /* Port Control 2: don't force a good FCS, don't use
         * VLAN-based, source address-based or destination
         * address-based priority overrides, don't let the switch
@@ -242,7 +181,7 @@ static int mv88e6131_setup_port(struct dsa_switch *ds, int p)
         * If this is the upstream port for this switch, enable
         * forwarding of unknown multicast addresses.
         */
-       if (ps->id == ID_6085)
+       if (ps->id == PORT_SWITCH_ID_6085)
                /* on 6085, bits 3:0 are reserved, bit 6 control ARP
                 * mirroring, and multicast forward is handled in
                 * Port Control register.
@@ -278,7 +217,7 @@ static int mv88e6131_setup_port(struct dsa_switch *ds, int p)
         */
        REG_WRITE(addr, 0x19, 0x7654);
 
-       return 0;
+       return mv88e6xxx_setup_port_common(ds, p);
 }
 
 static int mv88e6131_setup(struct dsa_switch *ds)
@@ -287,13 +226,28 @@ static int mv88e6131_setup(struct dsa_switch *ds)
        int i;
        int ret;
 
-       mutex_init(&ps->smi_mutex);
+       ret = mv88e6xxx_setup_common(ds);
+       if (ret < 0)
+               return ret;
+
        mv88e6xxx_ppu_state_init(ds);
-       mutex_init(&ps->stats_mutex);
 
-       ps->id = REG_READ(REG_PORT(0), 0x03) & 0xfff0;
+       switch (ps->id) {
+       case PORT_SWITCH_ID_6085:
+               ps->num_ports = 10;
+               break;
+       case PORT_SWITCH_ID_6095:
+               ps->num_ports = 11;
+               break;
+       case PORT_SWITCH_ID_6131:
+       case PORT_SWITCH_ID_6131_B2:
+               ps->num_ports = 8;
+               break;
+       default:
+               return -ENODEV;
+       }
 
-       ret = mv88e6131_switch_reset(ds);
+       ret = mv88e6xxx_switch_reset(ds, false);
        if (ret < 0)
                return ret;
 
@@ -303,7 +257,7 @@ static int mv88e6131_setup(struct dsa_switch *ds)
        if (ret < 0)
                return ret;
 
-       for (i = 0; i < 11; i++) {
+       for (i = 0; i < ps->num_ports; i++) {
                ret = mv88e6131_setup_port(ds, i);
                if (ret < 0)
                        return ret;
@@ -312,17 +266,24 @@ static int mv88e6131_setup(struct dsa_switch *ds)
        return 0;
 }
 
-static int mv88e6131_port_to_phy_addr(int port)
+static int mv88e6131_port_to_phy_addr(struct dsa_switch *ds, int port)
 {
-       if (port >= 0 && port <= 11)
+       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+
+       if (port >= 0 && port < ps->num_ports)
                return port;
-       return -1;
+
+       return -EINVAL;
 }
 
 static int
 mv88e6131_phy_read(struct dsa_switch *ds, int port, int regnum)
 {
-       int addr = mv88e6131_port_to_phy_addr(port);
+       int addr = mv88e6131_port_to_phy_addr(ds, port);
+
+       if (addr < 0)
+               return addr;
+
        return mv88e6xxx_phy_read_ppu(ds, addr, regnum);
 }
 
@@ -330,61 +291,12 @@ static int
 mv88e6131_phy_write(struct dsa_switch *ds,
                              int port, int regnum, u16 val)
 {
-       int addr = mv88e6131_port_to_phy_addr(port);
-       return mv88e6xxx_phy_write_ppu(ds, addr, regnum, val);
-}
-
-static struct mv88e6xxx_hw_stat mv88e6131_hw_stats[] = {
-       { "in_good_octets", 8, 0x00, },
-       { "in_bad_octets", 4, 0x02, },
-       { "in_unicast", 4, 0x04, },
-       { "in_broadcasts", 4, 0x06, },
-       { "in_multicasts", 4, 0x07, },
-       { "in_pause", 4, 0x16, },
-       { "in_undersize", 4, 0x18, },
-       { "in_fragments", 4, 0x19, },
-       { "in_oversize", 4, 0x1a, },
-       { "in_jabber", 4, 0x1b, },
-       { "in_rx_error", 4, 0x1c, },
-       { "in_fcs_error", 4, 0x1d, },
-       { "out_octets", 8, 0x0e, },
-       { "out_unicast", 4, 0x10, },
-       { "out_broadcasts", 4, 0x13, },
-       { "out_multicasts", 4, 0x12, },
-       { "out_pause", 4, 0x15, },
-       { "excessive", 4, 0x11, },
-       { "collisions", 4, 0x1e, },
-       { "deferred", 4, 0x05, },
-       { "single", 4, 0x14, },
-       { "multiple", 4, 0x17, },
-       { "out_fcs_error", 4, 0x03, },
-       { "late", 4, 0x1f, },
-       { "hist_64bytes", 4, 0x08, },
-       { "hist_65_127bytes", 4, 0x09, },
-       { "hist_128_255bytes", 4, 0x0a, },
-       { "hist_256_511bytes", 4, 0x0b, },
-       { "hist_512_1023bytes", 4, 0x0c, },
-       { "hist_1024_max_bytes", 4, 0x0d, },
-};
-
-static void
-mv88e6131_get_strings(struct dsa_switch *ds, int port, uint8_t *data)
-{
-       mv88e6xxx_get_strings(ds, ARRAY_SIZE(mv88e6131_hw_stats),
-                             mv88e6131_hw_stats, port, data);
-}
+       int addr = mv88e6131_port_to_phy_addr(ds, port);
 
-static void
-mv88e6131_get_ethtool_stats(struct dsa_switch *ds,
-                                 int port, uint64_t *data)
-{
-       mv88e6xxx_get_ethtool_stats(ds, ARRAY_SIZE(mv88e6131_hw_stats),
-                                   mv88e6131_hw_stats, port, data);
-}
+       if (addr < 0)
+               return addr;
 
-static int mv88e6131_get_sset_count(struct dsa_switch *ds)
-{
-       return ARRAY_SIZE(mv88e6131_hw_stats);
+       return mv88e6xxx_phy_write_ppu(ds, addr, regnum, val);
 }
 
 struct dsa_switch_driver mv88e6131_switch_driver = {
@@ -396,9 +308,9 @@ struct dsa_switch_driver mv88e6131_switch_driver = {
        .phy_read               = mv88e6131_phy_read,
        .phy_write              = mv88e6131_phy_write,
        .poll_link              = mv88e6xxx_poll_link,
-       .get_strings            = mv88e6131_get_strings,
-       .get_ethtool_stats      = mv88e6131_get_ethtool_stats,
-       .get_sset_count         = mv88e6131_get_sset_count,
+       .get_strings            = mv88e6xxx_get_strings,
+       .get_ethtool_stats      = mv88e6xxx_get_ethtool_stats,
+       .get_sset_count         = mv88e6xxx_get_sset_count,
 };
 
 MODULE_ALIAS("platform:mv88e6085");
index aa33d16f2e22ec6b48f6332db928c9899ce82817..9104efea0e3e8289803c53348e79eded3d80e50c 100644 (file)
@@ -25,69 +25,27 @@ static char *mv88e6171_probe(struct device *host_dev, int sw_addr)
        if (bus == NULL)
                return NULL;
 
-       ret = __mv88e6xxx_reg_read(bus, sw_addr, REG_PORT(0), 0x03);
+       ret = __mv88e6xxx_reg_read(bus, sw_addr, REG_PORT(0), PORT_SWITCH_ID);
        if (ret >= 0) {
-               if ((ret & 0xfff0) == 0x1710)
+               if ((ret & 0xfff0) == PORT_SWITCH_ID_6171)
                        return "Marvell 88E6171";
-               if ((ret & 0xfff0) == 0x1720)
+               if ((ret & 0xfff0) == PORT_SWITCH_ID_6172)
                        return "Marvell 88E6172";
        }
 
        return NULL;
 }
 
-static int mv88e6171_switch_reset(struct dsa_switch *ds)
-{
-       int i;
-       int ret;
-       unsigned long timeout;
-
-       /* Set all ports to the disabled state. */
-       for (i = 0; i < 8; i++) {
-               ret = REG_READ(REG_PORT(i), 0x04);
-               REG_WRITE(REG_PORT(i), 0x04, ret & 0xfffc);
-       }
-
-       /* Wait for transmit queues to drain. */
-       usleep_range(2000, 4000);
-
-       /* Reset the switch. */
-       REG_WRITE(REG_GLOBAL, 0x04, 0xc400);
-
-       /* Wait up to one second for reset to complete. */
-       timeout = jiffies + 1 * HZ;
-       while (time_before(jiffies, timeout)) {
-               ret = REG_READ(REG_GLOBAL, 0x00);
-               if ((ret & 0xc800) == 0xc800)
-                       break;
-
-               usleep_range(1000, 2000);
-       }
-       if (time_after(jiffies, timeout))
-               return -ETIMEDOUT;
-
-       /* Enable ports not under DSA, e.g. WAN port */
-       for (i = 0; i < 8; i++) {
-               if (dsa_is_cpu_port(ds, i) || ds->phys_port_mask & (1 << i))
-                       continue;
-
-               ret = REG_READ(REG_PORT(i), 0x04);
-               REG_WRITE(REG_PORT(i), 0x04, ret | 0x03);
-       }
-
-       return 0;
-}
-
 static int mv88e6171_setup_global(struct dsa_switch *ds)
 {
+       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
        int ret;
        int i;
 
-       /* Disable the PHY polling unit (since there won't be any
-        * external PHYs to poll), don't discard packets with
-        * excessive collisions, and mask all interrupt sources.
+       /* Discard packets with excessive collisions, mask all
+        * interrupt sources, enable PPU.
         */
-       REG_WRITE(REG_GLOBAL, 0x04, 0x0000);
+       REG_WRITE(REG_GLOBAL, 0x04, 0x6000);
 
        /* Set the default address aging time to 5 minutes, and
         * enable address learn messages to be sent to all message
@@ -145,7 +103,7 @@ static int mv88e6171_setup_global(struct dsa_switch *ds)
        }
 
        /* Clear all trunk masks. */
-       for (i = 0; i < 8; i++)
+       for (i = 0; i < ps->num_ports; i++)
                REG_WRITE(REG_GLOBAL2, 0x07, 0x8000 | (i << 12) | 0xff);
 
        /* Clear all trunk mappings. */
@@ -219,28 +177,6 @@ static int mv88e6171_setup_port(struct dsa_switch *ds, int p)
                val |= 0x000c;
        REG_WRITE(addr, 0x04, val);
 
-       /* Port Control 1: disable trunking.  Also, if this is the
-        * CPU port, enable learn messages to be sent to this port.
-        */
-       REG_WRITE(addr, 0x05, dsa_is_cpu_port(ds, p) ? 0x8000 : 0x0000);
-
-       /* Port based VLAN map: give each port its own address
-        * database, allow the CPU port to talk to each of the 'real'
-        * ports, and allow each of the 'real' ports to only talk to
-        * the upstream port.
-        */
-       val = (p & 0xf) << 12;
-       if (dsa_is_cpu_port(ds, p))
-               val |= ds->phys_port_mask;
-       else
-               val |= 1 << dsa_upstream_port(ds);
-       REG_WRITE(addr, 0x06, val);
-
-       /* Default VLAN ID and priority: don't set a default VLAN
-        * ID, and set the default packet priority to zero.
-        */
-       REG_WRITE(addr, 0x07, 0x0000);
-
        /* Port Control 2: don't force a good FCS, set the maximum
         * frame size to 10240 bytes, don't let the switch add or
         * strip 802.1q tags, don't discard tagged or untagged frames
@@ -285,19 +221,22 @@ static int mv88e6171_setup_port(struct dsa_switch *ds, int p)
         */
        REG_WRITE(addr, 0x19, 0x7654);
 
-       return 0;
+       return mv88e6xxx_setup_port_common(ds, p);
 }
 
 static int mv88e6171_setup(struct dsa_switch *ds)
 {
-       struct mv88e6xxx_priv_state *ps = (void *)(ds + 1);
+       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
        int i;
        int ret;
 
-       mutex_init(&ps->smi_mutex);
-       mutex_init(&ps->stats_mutex);
+       ret = mv88e6xxx_setup_common(ds);
+       if (ret < 0)
+               return ret;
+
+       ps->num_ports = 7;
 
-       ret = mv88e6171_switch_reset(ds);
+       ret = mv88e6xxx_switch_reset(ds, true);
        if (ret < 0)
                return ret;
 
@@ -307,7 +246,7 @@ static int mv88e6171_setup(struct dsa_switch *ds)
        if (ret < 0)
                return ret;
 
-       for (i = 0; i < 8; i++) {
+       for (i = 0; i < ps->num_ports; i++) {
                if (!(dsa_is_cpu_port(ds, i) || ds->phys_port_mask & (1 << i)))
                        continue;
 
@@ -316,96 +255,29 @@ static int mv88e6171_setup(struct dsa_switch *ds)
                        return ret;
        }
 
-       mutex_init(&ps->phy_mutex);
-
        return 0;
 }
 
-static int mv88e6171_port_to_phy_addr(int port)
-{
-       if (port >= 0 && port <= 4)
-               return port;
-       return -1;
-}
-
-static int
-mv88e6171_phy_read(struct dsa_switch *ds, int port, int regnum)
+static int mv88e6171_get_eee(struct dsa_switch *ds, int port,
+                            struct ethtool_eee *e)
 {
        struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
-       int addr = mv88e6171_port_to_phy_addr(port);
-       int ret;
 
-       mutex_lock(&ps->phy_mutex);
-       ret = mv88e6xxx_phy_read(ds, addr, regnum);
-       mutex_unlock(&ps->phy_mutex);
-       return ret;
-}
-
-static int
-mv88e6171_phy_write(struct dsa_switch *ds,
-                   int port, int regnum, u16 val)
-{
-       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
-       int addr = mv88e6171_port_to_phy_addr(port);
-       int ret;
+       if (ps->id == PORT_SWITCH_ID_6172)
+               return mv88e6xxx_get_eee(ds, port, e);
 
-       mutex_lock(&ps->phy_mutex);
-       ret = mv88e6xxx_phy_write(ds, addr, regnum, val);
-       mutex_unlock(&ps->phy_mutex);
-       return ret;
+       return -EOPNOTSUPP;
 }
 
-static struct mv88e6xxx_hw_stat mv88e6171_hw_stats[] = {
-       { "in_good_octets", 8, 0x00, },
-       { "in_bad_octets", 4, 0x02, },
-       { "in_unicast", 4, 0x04, },
-       { "in_broadcasts", 4, 0x06, },
-       { "in_multicasts", 4, 0x07, },
-       { "in_pause", 4, 0x16, },
-       { "in_undersize", 4, 0x18, },
-       { "in_fragments", 4, 0x19, },
-       { "in_oversize", 4, 0x1a, },
-       { "in_jabber", 4, 0x1b, },
-       { "in_rx_error", 4, 0x1c, },
-       { "in_fcs_error", 4, 0x1d, },
-       { "out_octets", 8, 0x0e, },
-       { "out_unicast", 4, 0x10, },
-       { "out_broadcasts", 4, 0x13, },
-       { "out_multicasts", 4, 0x12, },
-       { "out_pause", 4, 0x15, },
-       { "excessive", 4, 0x11, },
-       { "collisions", 4, 0x1e, },
-       { "deferred", 4, 0x05, },
-       { "single", 4, 0x14, },
-       { "multiple", 4, 0x17, },
-       { "out_fcs_error", 4, 0x03, },
-       { "late", 4, 0x1f, },
-       { "hist_64bytes", 4, 0x08, },
-       { "hist_65_127bytes", 4, 0x09, },
-       { "hist_128_255bytes", 4, 0x0a, },
-       { "hist_256_511bytes", 4, 0x0b, },
-       { "hist_512_1023bytes", 4, 0x0c, },
-       { "hist_1024_max_bytes", 4, 0x0d, },
-};
-
-static void
-mv88e6171_get_strings(struct dsa_switch *ds, int port, uint8_t *data)
+static int mv88e6171_set_eee(struct dsa_switch *ds, int port,
+                            struct phy_device *phydev, struct ethtool_eee *e)
 {
-       mv88e6xxx_get_strings(ds, ARRAY_SIZE(mv88e6171_hw_stats),
-                             mv88e6171_hw_stats, port, data);
-}
+       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
 
-static void
-mv88e6171_get_ethtool_stats(struct dsa_switch *ds,
-                           int port, uint64_t *data)
-{
-       mv88e6xxx_get_ethtool_stats(ds, ARRAY_SIZE(mv88e6171_hw_stats),
-                                   mv88e6171_hw_stats, port, data);
-}
+       if (ps->id == PORT_SWITCH_ID_6172)
+               return mv88e6xxx_set_eee(ds, port, phydev, e);
 
-static int mv88e6171_get_sset_count(struct dsa_switch *ds)
-{
-       return ARRAY_SIZE(mv88e6171_hw_stats);
+       return -EOPNOTSUPP;
 }
 
 struct dsa_switch_driver mv88e6171_switch_driver = {
@@ -414,17 +286,25 @@ struct dsa_switch_driver mv88e6171_switch_driver = {
        .probe                  = mv88e6171_probe,
        .setup                  = mv88e6171_setup,
        .set_addr               = mv88e6xxx_set_addr_indirect,
-       .phy_read               = mv88e6171_phy_read,
-       .phy_write              = mv88e6171_phy_write,
+       .phy_read               = mv88e6xxx_phy_read_indirect,
+       .phy_write              = mv88e6xxx_phy_write_indirect,
        .poll_link              = mv88e6xxx_poll_link,
-       .get_strings            = mv88e6171_get_strings,
-       .get_ethtool_stats      = mv88e6171_get_ethtool_stats,
-       .get_sset_count         = mv88e6171_get_sset_count,
+       .get_strings            = mv88e6xxx_get_strings,
+       .get_ethtool_stats      = mv88e6xxx_get_ethtool_stats,
+       .get_sset_count         = mv88e6xxx_get_sset_count,
+       .set_eee                = mv88e6171_set_eee,
+       .get_eee                = mv88e6171_get_eee,
 #ifdef CONFIG_NET_DSA_HWMON
        .get_temp               = mv88e6xxx_get_temp,
 #endif
        .get_regs_len           = mv88e6xxx_get_regs_len,
        .get_regs               = mv88e6xxx_get_regs,
+       .port_join_bridge       = mv88e6xxx_join_bridge,
+       .port_leave_bridge      = mv88e6xxx_leave_bridge,
+       .port_stp_update        = mv88e6xxx_port_stp_update,
+       .fdb_add                = mv88e6xxx_port_fdb_add,
+       .fdb_del                = mv88e6xxx_port_fdb_del,
+       .fdb_getnext            = mv88e6xxx_port_fdb_getnext,
 };
 
 MODULE_ALIAS("platform:mv88e6171");
index e13adc7b3ddaf4cc5742d99b68340f152ba50184..126c11b81e756ec232106de72583904638ccb024 100644 (file)
 #include <net/dsa.h>
 #include "mv88e6xxx.h"
 
-static int mv88e6352_wait(struct dsa_switch *ds, int reg, int offset, u16 mask)
-{
-       unsigned long timeout = jiffies + HZ / 10;
-
-       while (time_before(jiffies, timeout)) {
-               int ret;
-
-               ret = REG_READ(reg, offset);
-               if (!(ret & mask))
-                       return 0;
-
-               usleep_range(1000, 2000);
-       }
-       return -ETIMEDOUT;
-}
-
-static inline int mv88e6352_phy_wait(struct dsa_switch *ds)
-{
-       return mv88e6352_wait(ds, REG_GLOBAL2, 0x18, 0x8000);
-}
-
-static inline int mv88e6352_eeprom_load_wait(struct dsa_switch *ds)
-{
-       return mv88e6352_wait(ds, REG_GLOBAL2, 0x14, 0x0800);
-}
-
-static inline int mv88e6352_eeprom_busy_wait(struct dsa_switch *ds)
-{
-       return mv88e6352_wait(ds, REG_GLOBAL2, 0x14, 0x8000);
-}
-
-static int __mv88e6352_phy_read(struct dsa_switch *ds, int addr, int regnum)
-{
-       int ret;
-
-       REG_WRITE(REG_GLOBAL2, 0x18, 0x9800 | (addr << 5) | regnum);
-
-       ret = mv88e6352_phy_wait(ds);
-       if (ret < 0)
-               return ret;
-
-       return REG_READ(REG_GLOBAL2, 0x19);
-}
-
-static int __mv88e6352_phy_write(struct dsa_switch *ds, int addr, int regnum,
-                                u16 val)
-{
-       REG_WRITE(REG_GLOBAL2, 0x19, val);
-       REG_WRITE(REG_GLOBAL2, 0x18, 0x9400 | (addr << 5) | regnum);
-
-       return mv88e6352_phy_wait(ds);
-}
-
 static char *mv88e6352_probe(struct device *host_dev, int sw_addr)
 {
        struct mii_bus *bus = dsa_host_dev_to_mii_bus(host_dev);
@@ -83,58 +30,24 @@ static char *mv88e6352_probe(struct device *host_dev, int sw_addr)
        if (bus == NULL)
                return NULL;
 
-       ret = __mv88e6xxx_reg_read(bus, sw_addr, REG_PORT(0), 0x03);
+       ret = __mv88e6xxx_reg_read(bus, sw_addr, REG_PORT(0), PORT_SWITCH_ID);
        if (ret >= 0) {
-               if ((ret & 0xfff0) == 0x1760)
+               if ((ret & 0xfff0) == PORT_SWITCH_ID_6176)
                        return "Marvell 88E6176";
-               if (ret == 0x3521)
+               if (ret == PORT_SWITCH_ID_6352_A0)
                        return "Marvell 88E6352 (A0)";
-               if (ret == 0x3522)
+               if (ret == PORT_SWITCH_ID_6352_A1)
                        return "Marvell 88E6352 (A1)";
-               if ((ret & 0xfff0) == 0x3520)
+               if ((ret & 0xfff0) == PORT_SWITCH_ID_6352)
                        return "Marvell 88E6352";
        }
 
        return NULL;
 }
 
-static int mv88e6352_switch_reset(struct dsa_switch *ds)
-{
-       unsigned long timeout;
-       int ret;
-       int i;
-
-       /* Set all ports to the disabled state. */
-       for (i = 0; i < 7; i++) {
-               ret = REG_READ(REG_PORT(i), 0x04);
-               REG_WRITE(REG_PORT(i), 0x04, ret & 0xfffc);
-       }
-
-       /* Wait for transmit queues to drain. */
-       usleep_range(2000, 4000);
-
-       /* Reset the switch. Keep PPU active (bit 14, undocumented).
-        * The PPU needs to be active to support indirect phy register
-        * accesses through global registers 0x18 and 0x19.
-        */
-       REG_WRITE(REG_GLOBAL, 0x04, 0xc000);
-
-       /* Wait up to one second for reset to complete. */
-       timeout = jiffies + 1 * HZ;
-       while (time_before(jiffies, timeout)) {
-               ret = REG_READ(REG_GLOBAL, 0x00);
-               if ((ret & 0x8800) == 0x8800)
-                       break;
-               usleep_range(1000, 2000);
-       }
-       if (time_after(jiffies, timeout))
-               return -ETIMEDOUT;
-
-       return 0;
-}
-
 static int mv88e6352_setup_global(struct dsa_switch *ds)
 {
+       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
        int ret;
        int i;
 
@@ -205,7 +118,7 @@ static int mv88e6352_setup_global(struct dsa_switch *ds)
        /* Disable ingress rate limiting by resetting all ingress
         * rate limit registers to their initial state.
         */
-       for (i = 0; i < 7; i++)
+       for (i = 0; i < ps->num_ports; i++)
                REG_WRITE(REG_GLOBAL2, 0x09, 0x9000 | (i << 8));
 
        /* Initialise cross-chip port VLAN table to reset defaults. */
@@ -268,28 +181,6 @@ static int mv88e6352_setup_port(struct dsa_switch *ds, int p)
                val |= 0x000c;
        REG_WRITE(addr, 0x04, val);
 
-       /* Port Control 1: disable trunking.  Also, if this is the
-        * CPU port, enable learn messages to be sent to this port.
-        */
-       REG_WRITE(addr, 0x05, dsa_is_cpu_port(ds, p) ? 0x8000 : 0x0000);
-
-       /* Port based VLAN map: give each port its own address
-        * database, allow the CPU port to talk to each of the 'real'
-        * ports, and allow each of the 'real' ports to only talk to
-        * the upstream port.
-        */
-       val = (p & 0xf) << 12;
-       if (dsa_is_cpu_port(ds, p))
-               val |= ds->phys_port_mask;
-       else
-               val |= 1 << dsa_upstream_port(ds);
-       REG_WRITE(addr, 0x06, val);
-
-       /* Default VLAN ID and priority: don't set a default VLAN
-        * ID, and set the default packet priority to zero.
-        */
-       REG_WRITE(addr, 0x07, 0x0000);
-
        /* Port Control 2: don't force a good FCS, set the maximum
         * frame size to 10240 bytes, don't let the switch add or
         * strip 802.1q tags, don't discard tagged or untagged frames
@@ -334,53 +225,18 @@ static int mv88e6352_setup_port(struct dsa_switch *ds, int p)
         */
        REG_WRITE(addr, 0x19, 0x7654);
 
-       return 0;
+       return mv88e6xxx_setup_port_common(ds, p);
 }
 
 #ifdef CONFIG_NET_DSA_HWMON
 
-static int mv88e6352_phy_page_read(struct dsa_switch *ds,
-                                  int port, int page, int reg)
-{
-       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
-       int ret;
-
-       mutex_lock(&ps->phy_mutex);
-       ret = __mv88e6352_phy_write(ds, port, 0x16, page);
-       if (ret < 0)
-               goto error;
-       ret = __mv88e6352_phy_read(ds, port, reg);
-error:
-       __mv88e6352_phy_write(ds, port, 0x16, 0x0);
-       mutex_unlock(&ps->phy_mutex);
-       return ret;
-}
-
-static int mv88e6352_phy_page_write(struct dsa_switch *ds,
-                                   int port, int page, int reg, int val)
-{
-       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
-       int ret;
-
-       mutex_lock(&ps->phy_mutex);
-       ret = __mv88e6352_phy_write(ds, port, 0x16, page);
-       if (ret < 0)
-               goto error;
-
-       ret = __mv88e6352_phy_write(ds, port, reg, val);
-error:
-       __mv88e6352_phy_write(ds, port, 0x16, 0x0);
-       mutex_unlock(&ps->phy_mutex);
-       return ret;
-}
-
 static int mv88e6352_get_temp(struct dsa_switch *ds, int *temp)
 {
        int ret;
 
        *temp = 0;
 
-       ret = mv88e6352_phy_page_read(ds, 0, 6, 27);
+       ret = mv88e6xxx_phy_page_read(ds, 0, 6, 27);
        if (ret < 0)
                return ret;
 
@@ -395,7 +251,7 @@ static int mv88e6352_get_temp_limit(struct dsa_switch *ds, int *temp)
 
        *temp = 0;
 
-       ret = mv88e6352_phy_page_read(ds, 0, 6, 26);
+       ret = mv88e6xxx_phy_page_read(ds, 0, 6, 26);
        if (ret < 0)
                return ret;
 
@@ -408,11 +264,11 @@ static int mv88e6352_set_temp_limit(struct dsa_switch *ds, int temp)
 {
        int ret;
 
-       ret = mv88e6352_phy_page_read(ds, 0, 6, 26);
+       ret = mv88e6xxx_phy_page_read(ds, 0, 6, 26);
        if (ret < 0)
                return ret;
        temp = clamp_val(DIV_ROUND_CLOSEST(temp, 5) + 5, 0, 0x1f);
-       return mv88e6352_phy_page_write(ds, 0, 6, 26,
+       return mv88e6xxx_phy_page_write(ds, 0, 6, 26,
                                        (ret & 0xe0ff) | (temp << 8));
 }
 
@@ -422,7 +278,7 @@ static int mv88e6352_get_temp_alarm(struct dsa_switch *ds, bool *alarm)
 
        *alarm = false;
 
-       ret = mv88e6352_phy_page_read(ds, 0, 6, 26);
+       ret = mv88e6xxx_phy_page_read(ds, 0, 6, 26);
        if (ret < 0)
                return ret;
 
@@ -438,14 +294,15 @@ static int mv88e6352_setup(struct dsa_switch *ds)
        int ret;
        int i;
 
-       mutex_init(&ps->smi_mutex);
-       mutex_init(&ps->stats_mutex);
-       mutex_init(&ps->phy_mutex);
-       mutex_init(&ps->eeprom_mutex);
+       ret = mv88e6xxx_setup_common(ds);
+       if (ret < 0)
+               return ret;
 
-       ps->id = REG_READ(REG_PORT(0), 0x03) & 0xfff0;
+       ps->num_ports = 7;
 
-       ret = mv88e6352_switch_reset(ds);
+       mutex_init(&ps->eeprom_mutex);
+
+       ret = mv88e6xxx_switch_reset(ds, true);
        if (ret < 0)
                return ret;
 
@@ -455,7 +312,7 @@ static int mv88e6352_setup(struct dsa_switch *ds)
        if (ret < 0)
                return ret;
 
-       for (i = 0; i < 7; i++) {
+       for (i = 0; i < ps->num_ports; i++) {
                ret = mv88e6352_setup_port(ds, i);
                if (ret < 0)
                        return ret;
@@ -464,83 +321,6 @@ static int mv88e6352_setup(struct dsa_switch *ds)
        return 0;
 }
 
-static int mv88e6352_port_to_phy_addr(int port)
-{
-       if (port >= 0 && port <= 4)
-               return port;
-       return -EINVAL;
-}
-
-static int
-mv88e6352_phy_read(struct dsa_switch *ds, int port, int regnum)
-{
-       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
-       int addr = mv88e6352_port_to_phy_addr(port);
-       int ret;
-
-       if (addr < 0)
-               return addr;
-
-       mutex_lock(&ps->phy_mutex);
-       ret = __mv88e6352_phy_read(ds, addr, regnum);
-       mutex_unlock(&ps->phy_mutex);
-
-       return ret;
-}
-
-static int
-mv88e6352_phy_write(struct dsa_switch *ds, int port, int regnum, u16 val)
-{
-       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
-       int addr = mv88e6352_port_to_phy_addr(port);
-       int ret;
-
-       if (addr < 0)
-               return addr;
-
-       mutex_lock(&ps->phy_mutex);
-       ret = __mv88e6352_phy_write(ds, addr, regnum, val);
-       mutex_unlock(&ps->phy_mutex);
-
-       return ret;
-}
-
-static struct mv88e6xxx_hw_stat mv88e6352_hw_stats[] = {
-       { "in_good_octets", 8, 0x00, },
-       { "in_bad_octets", 4, 0x02, },
-       { "in_unicast", 4, 0x04, },
-       { "in_broadcasts", 4, 0x06, },
-       { "in_multicasts", 4, 0x07, },
-       { "in_pause", 4, 0x16, },
-       { "in_undersize", 4, 0x18, },
-       { "in_fragments", 4, 0x19, },
-       { "in_oversize", 4, 0x1a, },
-       { "in_jabber", 4, 0x1b, },
-       { "in_rx_error", 4, 0x1c, },
-       { "in_fcs_error", 4, 0x1d, },
-       { "out_octets", 8, 0x0e, },
-       { "out_unicast", 4, 0x10, },
-       { "out_broadcasts", 4, 0x13, },
-       { "out_multicasts", 4, 0x12, },
-       { "out_pause", 4, 0x15, },
-       { "excessive", 4, 0x11, },
-       { "collisions", 4, 0x1e, },
-       { "deferred", 4, 0x05, },
-       { "single", 4, 0x14, },
-       { "multiple", 4, 0x17, },
-       { "out_fcs_error", 4, 0x03, },
-       { "late", 4, 0x1f, },
-       { "hist_64bytes", 4, 0x08, },
-       { "hist_65_127bytes", 4, 0x09, },
-       { "hist_128_255bytes", 4, 0x0a, },
-       { "hist_256_511bytes", 4, 0x0b, },
-       { "hist_512_1023bytes", 4, 0x0c, },
-       { "hist_1024_max_bytes", 4, 0x0d, },
-       { "sw_in_discards", 4, 0x110, },
-       { "sw_in_filtered", 2, 0x112, },
-       { "sw_out_filtered", 2, 0x113, },
-};
-
 static int mv88e6352_read_eeprom_word(struct dsa_switch *ds, int addr)
 {
        struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
@@ -553,7 +333,7 @@ static int mv88e6352_read_eeprom_word(struct dsa_switch *ds, int addr)
        if (ret < 0)
                goto error;
 
-       ret = mv88e6352_eeprom_busy_wait(ds);
+       ret = mv88e6xxx_eeprom_busy_wait(ds);
        if (ret < 0)
                goto error;
 
@@ -576,7 +356,7 @@ static int mv88e6352_get_eeprom(struct dsa_switch *ds,
 
        eeprom->magic = 0xc3ec4951;
 
-       ret = mv88e6352_eeprom_load_wait(ds);
+       ret = mv88e6xxx_eeprom_load_wait(ds);
        if (ret < 0)
                return ret;
 
@@ -657,7 +437,7 @@ static int mv88e6352_write_eeprom_word(struct dsa_switch *ds, int addr,
        if (ret < 0)
                goto error;
 
-       ret = mv88e6352_eeprom_busy_wait(ds);
+       ret = mv88e6xxx_eeprom_busy_wait(ds);
 error:
        mutex_unlock(&ps->eeprom_mutex);
        return ret;
@@ -681,7 +461,7 @@ static int mv88e6352_set_eeprom(struct dsa_switch *ds,
        len = eeprom->len;
        eeprom->len = 0;
 
-       ret = mv88e6352_eeprom_load_wait(ds);
+       ret = mv88e6xxx_eeprom_load_wait(ds);
        if (ret < 0)
                return ret;
 
@@ -739,37 +519,20 @@ static int mv88e6352_set_eeprom(struct dsa_switch *ds,
        return 0;
 }
 
-static void
-mv88e6352_get_strings(struct dsa_switch *ds, int port, uint8_t *data)
-{
-       mv88e6xxx_get_strings(ds, ARRAY_SIZE(mv88e6352_hw_stats),
-                             mv88e6352_hw_stats, port, data);
-}
-
-static void
-mv88e6352_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *data)
-{
-       mv88e6xxx_get_ethtool_stats(ds, ARRAY_SIZE(mv88e6352_hw_stats),
-                                   mv88e6352_hw_stats, port, data);
-}
-
-static int mv88e6352_get_sset_count(struct dsa_switch *ds)
-{
-       return ARRAY_SIZE(mv88e6352_hw_stats);
-}
-
 struct dsa_switch_driver mv88e6352_switch_driver = {
        .tag_protocol           = DSA_TAG_PROTO_EDSA,
        .priv_size              = sizeof(struct mv88e6xxx_priv_state),
        .probe                  = mv88e6352_probe,
        .setup                  = mv88e6352_setup,
        .set_addr               = mv88e6xxx_set_addr_indirect,
-       .phy_read               = mv88e6352_phy_read,
-       .phy_write              = mv88e6352_phy_write,
+       .phy_read               = mv88e6xxx_phy_read_indirect,
+       .phy_write              = mv88e6xxx_phy_write_indirect,
        .poll_link              = mv88e6xxx_poll_link,
-       .get_strings            = mv88e6352_get_strings,
-       .get_ethtool_stats      = mv88e6352_get_ethtool_stats,
-       .get_sset_count         = mv88e6352_get_sset_count,
+       .get_strings            = mv88e6xxx_get_strings,
+       .get_ethtool_stats      = mv88e6xxx_get_ethtool_stats,
+       .get_sset_count         = mv88e6xxx_get_sset_count,
+       .set_eee                = mv88e6xxx_set_eee,
+       .get_eee                = mv88e6xxx_get_eee,
 #ifdef CONFIG_NET_DSA_HWMON
        .get_temp               = mv88e6352_get_temp,
        .get_temp_limit         = mv88e6352_get_temp_limit,
@@ -780,6 +543,12 @@ struct dsa_switch_driver mv88e6352_switch_driver = {
        .set_eeprom             = mv88e6352_set_eeprom,
        .get_regs_len           = mv88e6xxx_get_regs_len,
        .get_regs               = mv88e6xxx_get_regs,
+       .port_join_bridge       = mv88e6xxx_join_bridge,
+       .port_leave_bridge      = mv88e6xxx_leave_bridge,
+       .port_stp_update        = mv88e6xxx_port_stp_update,
+       .fdb_add                = mv88e6xxx_port_fdb_add,
+       .fdb_del                = mv88e6xxx_port_fdb_del,
+       .fdb_getnext            = mv88e6xxx_port_fdb_getnext,
 };
 
 MODULE_ALIAS("platform:mv88e6352");
index 3e7e31a6abb73f28ea44c9314c4f3628337edab7..fc8d3b6ffe8e0b35e64ed2aaa78b342a1b86f66a 100644 (file)
@@ -9,6 +9,8 @@
  */
 
 #include <linux/delay.h>
+#include <linux/etherdevice.h>
+#include <linux/if_bridge.h>
 #include <linux/jiffies.h>
 #include <linux/list.h>
 #include <linux/module.h>
@@ -31,11 +33,11 @@ static int mv88e6xxx_reg_wait_ready(struct mii_bus *bus, int sw_addr)
        int i;
 
        for (i = 0; i < 16; i++) {
-               ret = mdiobus_read(bus, sw_addr, 0);
+               ret = mdiobus_read(bus, sw_addr, SMI_CMD);
                if (ret < 0)
                        return ret;
 
-               if ((ret & 0x8000) == 0)
+               if ((ret & SMI_CMD_BUSY) == 0)
                        return 0;
        }
 
@@ -55,7 +57,8 @@ int __mv88e6xxx_reg_read(struct mii_bus *bus, int sw_addr, int addr, int reg)
                return ret;
 
        /* Transmit the read command. */
-       ret = mdiobus_write(bus, sw_addr, 0, 0x9800 | (addr << 5) | reg);
+       ret = mdiobus_write(bus, sw_addr, SMI_CMD,
+                           SMI_CMD_OP_22_READ | (addr << 5) | reg);
        if (ret < 0)
                return ret;
 
@@ -65,26 +68,23 @@ int __mv88e6xxx_reg_read(struct mii_bus *bus, int sw_addr, int addr, int reg)
                return ret;
 
        /* Read the data. */
-       ret = mdiobus_read(bus, sw_addr, 1);
+       ret = mdiobus_read(bus, sw_addr, SMI_DATA);
        if (ret < 0)
                return ret;
 
        return ret & 0xffff;
 }
 
-int mv88e6xxx_reg_read(struct dsa_switch *ds, int addr, int reg)
+/* Must be called with SMI mutex held */
+static int _mv88e6xxx_reg_read(struct dsa_switch *ds, int addr, int reg)
 {
-       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
        struct mii_bus *bus = dsa_host_dev_to_mii_bus(ds->master_dev);
        int ret;
 
        if (bus == NULL)
                return -EINVAL;
 
-       mutex_lock(&ps->smi_mutex);
        ret = __mv88e6xxx_reg_read(bus, ds->pd->sw_addr, addr, reg);
-       mutex_unlock(&ps->smi_mutex);
-
        if (ret < 0)
                return ret;
 
@@ -94,6 +94,18 @@ int mv88e6xxx_reg_read(struct dsa_switch *ds, int addr, int reg)
        return ret;
 }
 
+int mv88e6xxx_reg_read(struct dsa_switch *ds, int addr, int reg)
+{
+       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+       int ret;
+
+       mutex_lock(&ps->smi_mutex);
+       ret = _mv88e6xxx_reg_read(ds, addr, reg);
+       mutex_unlock(&ps->smi_mutex);
+
+       return ret;
+}
+
 int __mv88e6xxx_reg_write(struct mii_bus *bus, int sw_addr, int addr,
                          int reg, u16 val)
 {
@@ -108,12 +120,13 @@ int __mv88e6xxx_reg_write(struct mii_bus *bus, int sw_addr, int addr,
                return ret;
 
        /* Transmit the data to write. */
-       ret = mdiobus_write(bus, sw_addr, 1, val);
+       ret = mdiobus_write(bus, sw_addr, SMI_DATA, val);
        if (ret < 0)
                return ret;
 
        /* Transmit the write command. */
-       ret = mdiobus_write(bus, sw_addr, 0, 0x9400 | (addr << 5) | reg);
+       ret = mdiobus_write(bus, sw_addr, SMI_CMD,
+                           SMI_CMD_OP_22_WRITE | (addr << 5) | reg);
        if (ret < 0)
                return ret;
 
@@ -125,11 +138,11 @@ int __mv88e6xxx_reg_write(struct mii_bus *bus, int sw_addr, int addr,
        return 0;
 }
 
-int mv88e6xxx_reg_write(struct dsa_switch *ds, int addr, int reg, u16 val)
+/* Must be called with SMI mutex held */
+static int _mv88e6xxx_reg_write(struct dsa_switch *ds, int addr, int reg,
+                               u16 val)
 {
-       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
        struct mii_bus *bus = dsa_host_dev_to_mii_bus(ds->master_dev);
-       int ret;
 
        if (bus == NULL)
                return -EINVAL;
@@ -137,8 +150,16 @@ int mv88e6xxx_reg_write(struct dsa_switch *ds, int addr, int reg, u16 val)
        dev_dbg(ds->master_dev, "-> addr: 0x%.2x reg: 0x%.2x val: 0x%.4x\n",
                addr, reg, val);
 
+       return __mv88e6xxx_reg_write(bus, ds->pd->sw_addr, addr, reg, val);
+}
+
+int mv88e6xxx_reg_write(struct dsa_switch *ds, int addr, int reg, u16 val)
+{
+       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+       int ret;
+
        mutex_lock(&ps->smi_mutex);
-       ret = __mv88e6xxx_reg_write(bus, ds->pd->sw_addr, addr, reg, val);
+       ret = _mv88e6xxx_reg_write(ds, addr, reg, val);
        mutex_unlock(&ps->smi_mutex);
 
        return ret;
@@ -147,26 +168,26 @@ int mv88e6xxx_reg_write(struct dsa_switch *ds, int addr, int reg, u16 val)
 int mv88e6xxx_config_prio(struct dsa_switch *ds)
 {
        /* Configure the IP ToS mapping registers. */
-       REG_WRITE(REG_GLOBAL, 0x10, 0x0000);
-       REG_WRITE(REG_GLOBAL, 0x11, 0x0000);
-       REG_WRITE(REG_GLOBAL, 0x12, 0x5555);
-       REG_WRITE(REG_GLOBAL, 0x13, 0x5555);
-       REG_WRITE(REG_GLOBAL, 0x14, 0xaaaa);
-       REG_WRITE(REG_GLOBAL, 0x15, 0xaaaa);
-       REG_WRITE(REG_GLOBAL, 0x16, 0xffff);
-       REG_WRITE(REG_GLOBAL, 0x17, 0xffff);
+       REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_0, 0x0000);
+       REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_1, 0x0000);
+       REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_2, 0x5555);
+       REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_3, 0x5555);
+       REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_4, 0xaaaa);
+       REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_5, 0xaaaa);
+       REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_6, 0xffff);
+       REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_7, 0xffff);
 
        /* Configure the IEEE 802.1p priority mapping register. */
-       REG_WRITE(REG_GLOBAL, 0x18, 0xfa41);
+       REG_WRITE(REG_GLOBAL, GLOBAL_IEEE_PRI, 0xfa41);
 
        return 0;
 }
 
 int mv88e6xxx_set_addr_direct(struct dsa_switch *ds, u8 *addr)
 {
-       REG_WRITE(REG_GLOBAL, 0x01, (addr[0] << 8) | addr[1]);
-       REG_WRITE(REG_GLOBAL, 0x02, (addr[2] << 8) | addr[3]);
-       REG_WRITE(REG_GLOBAL, 0x03, (addr[4] << 8) | addr[5]);
+       REG_WRITE(REG_GLOBAL, GLOBAL_MAC_01, (addr[0] << 8) | addr[1]);
+       REG_WRITE(REG_GLOBAL, GLOBAL_MAC_23, (addr[2] << 8) | addr[3]);
+       REG_WRITE(REG_GLOBAL, GLOBAL_MAC_45, (addr[4] << 8) | addr[5]);
 
        return 0;
 }
@@ -180,12 +201,13 @@ int mv88e6xxx_set_addr_indirect(struct dsa_switch *ds, u8 *addr)
                int j;
 
                /* Write the MAC address byte. */
-               REG_WRITE(REG_GLOBAL2, 0x0d, 0x8000 | (i << 8) | addr[i]);
+               REG_WRITE(REG_GLOBAL2, GLOBAL2_SWITCH_MAC,
+                         GLOBAL2_SWITCH_MAC_BUSY | (i << 8) | addr[i]);
 
                /* Wait for the write to complete. */
                for (j = 0; j < 16; j++) {
-                       ret = REG_READ(REG_GLOBAL2, 0x0d);
-                       if ((ret & 0x8000) == 0)
+                       ret = REG_READ(REG_GLOBAL2, GLOBAL2_SWITCH_MAC);
+                       if ((ret & GLOBAL2_SWITCH_MAC_BUSY) == 0)
                                break;
                }
                if (j == 16)
@@ -195,14 +217,17 @@ int mv88e6xxx_set_addr_indirect(struct dsa_switch *ds, u8 *addr)
        return 0;
 }
 
-int mv88e6xxx_phy_read(struct dsa_switch *ds, int addr, int regnum)
+/* Must be called with phy mutex held */
+static int _mv88e6xxx_phy_read(struct dsa_switch *ds, int addr, int regnum)
 {
        if (addr >= 0)
                return mv88e6xxx_reg_read(ds, addr, regnum);
        return 0xffff;
 }
 
-int mv88e6xxx_phy_write(struct dsa_switch *ds, int addr, int regnum, u16 val)
+/* Must be called with phy mutex held */
+static int _mv88e6xxx_phy_write(struct dsa_switch *ds, int addr, int regnum,
+                               u16 val)
 {
        if (addr >= 0)
                return mv88e6xxx_reg_write(ds, addr, regnum, val);
@@ -215,14 +240,16 @@ static int mv88e6xxx_ppu_disable(struct dsa_switch *ds)
        int ret;
        unsigned long timeout;
 
-       ret = REG_READ(REG_GLOBAL, 0x04);
-       REG_WRITE(REG_GLOBAL, 0x04, ret & ~0x4000);
+       ret = REG_READ(REG_GLOBAL, GLOBAL_CONTROL);
+       REG_WRITE(REG_GLOBAL, GLOBAL_CONTROL,
+                 ret & ~GLOBAL_CONTROL_PPU_ENABLE);
 
        timeout = jiffies + 1 * HZ;
        while (time_before(jiffies, timeout)) {
-               ret = REG_READ(REG_GLOBAL, 0x00);
+               ret = REG_READ(REG_GLOBAL, GLOBAL_STATUS);
                usleep_range(1000, 2000);
-               if ((ret & 0xc000) != 0xc000)
+               if ((ret & GLOBAL_STATUS_PPU_MASK) !=
+                   GLOBAL_STATUS_PPU_POLLING)
                        return 0;
        }
 
@@ -234,14 +261,15 @@ static int mv88e6xxx_ppu_enable(struct dsa_switch *ds)
        int ret;
        unsigned long timeout;
 
-       ret = REG_READ(REG_GLOBAL, 0x04);
-       REG_WRITE(REG_GLOBAL, 0x04, ret | 0x4000);
+       ret = REG_READ(REG_GLOBAL, GLOBAL_CONTROL);
+       REG_WRITE(REG_GLOBAL, GLOBAL_CONTROL, ret | GLOBAL_CONTROL_PPU_ENABLE);
 
        timeout = jiffies + 1 * HZ;
        while (time_before(jiffies, timeout)) {
-               ret = REG_READ(REG_GLOBAL, 0x00);
+               ret = REG_READ(REG_GLOBAL, GLOBAL_STATUS);
                usleep_range(1000, 2000);
-               if ((ret & 0xc000) == 0xc000)
+               if ((ret & GLOBAL_STATUS_PPU_MASK) ==
+                   GLOBAL_STATUS_PPU_POLLING)
                        return 0;
        }
 
@@ -362,11 +390,12 @@ void mv88e6xxx_poll_link(struct dsa_switch *ds)
 
                link = 0;
                if (dev->flags & IFF_UP) {
-                       port_status = mv88e6xxx_reg_read(ds, REG_PORT(i), 0x00);
+                       port_status = mv88e6xxx_reg_read(ds, REG_PORT(i),
+                                                        PORT_STATUS);
                        if (port_status < 0)
                                continue;
 
-                       link = !!(port_status & 0x0800);
+                       link = !!(port_status & PORT_STATUS_LINK);
                }
 
                if (!link) {
@@ -377,22 +406,22 @@ void mv88e6xxx_poll_link(struct dsa_switch *ds)
                        continue;
                }
 
-               switch (port_status & 0x0300) {
-               case 0x0000:
+               switch (port_status & PORT_STATUS_SPEED_MASK) {
+               case PORT_STATUS_SPEED_10:
                        speed = 10;
                        break;
-               case 0x0100:
+               case PORT_STATUS_SPEED_100:
                        speed = 100;
                        break;
-               case 0x0200:
+               case PORT_STATUS_SPEED_1000:
                        speed = 1000;
                        break;
                default:
                        speed = -1;
                        break;
                }
-               duplex = (port_status & 0x0400) ? 1 : 0;
-               fc = (port_status & 0x8000) ? 1 : 0;
+               duplex = (port_status & PORT_STATUS_DUPLEX) ? 1 : 0;
+               fc = (port_status & PORT_STATUS_PAUSE_EN) ? 1 : 0;
 
                if (!netif_carrier_ok(dev)) {
                        netdev_info(dev,
@@ -405,14 +434,27 @@ void mv88e6xxx_poll_link(struct dsa_switch *ds)
        }
 }
 
+static bool mv88e6xxx_6352_family(struct dsa_switch *ds)
+{
+       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+
+       switch (ps->id) {
+       case PORT_SWITCH_ID_6352:
+       case PORT_SWITCH_ID_6172:
+       case PORT_SWITCH_ID_6176:
+               return true;
+       }
+       return false;
+}
+
 static int mv88e6xxx_stats_wait(struct dsa_switch *ds)
 {
        int ret;
        int i;
 
        for (i = 0; i < 10; i++) {
-               ret = REG_READ(REG_GLOBAL, 0x1d);
-               if ((ret & 0x8000) == 0)
+               ret = REG_READ(REG_GLOBAL, GLOBAL_STATS_OP);
+               if ((ret & GLOBAL_STATS_OP_BUSY) == 0)
                        return 0;
        }
 
@@ -423,8 +465,13 @@ static int mv88e6xxx_stats_snapshot(struct dsa_switch *ds, int port)
 {
        int ret;
 
+       if (mv88e6xxx_6352_family(ds))
+               port = (port + 1) << 5;
+
        /* Snapshot the hardware statistics counters for this port. */
-       REG_WRITE(REG_GLOBAL, 0x1d, 0xdc00 | port);
+       REG_WRITE(REG_GLOBAL, GLOBAL_STATS_OP,
+                 GLOBAL_STATS_OP_CAPTURE_PORT |
+                 GLOBAL_STATS_OP_HIST_RX_TX | port);
 
        /* Wait for the snapshotting to complete. */
        ret = mv88e6xxx_stats_wait(ds);
@@ -441,7 +488,9 @@ static void mv88e6xxx_stats_read(struct dsa_switch *ds, int stat, u32 *val)
 
        *val = 0;
 
-       ret = mv88e6xxx_reg_write(ds, REG_GLOBAL, 0x1d, 0xcc00 | stat);
+       ret = mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_STATS_OP,
+                                 GLOBAL_STATS_OP_READ_CAPTURED |
+                                 GLOBAL_STATS_OP_HIST_RX_TX | stat);
        if (ret < 0)
                return;
 
@@ -449,22 +498,77 @@ static void mv88e6xxx_stats_read(struct dsa_switch *ds, int stat, u32 *val)
        if (ret < 0)
                return;
 
-       ret = mv88e6xxx_reg_read(ds, REG_GLOBAL, 0x1e);
+       ret = mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_STATS_COUNTER_32);
        if (ret < 0)
                return;
 
        _val = ret << 16;
 
-       ret = mv88e6xxx_reg_read(ds, REG_GLOBAL, 0x1f);
+       ret = mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_STATS_COUNTER_01);
        if (ret < 0)
                return;
 
        *val = _val | ret;
 }
 
-void mv88e6xxx_get_strings(struct dsa_switch *ds,
-                          int nr_stats, struct mv88e6xxx_hw_stat *stats,
-                          int port, uint8_t *data)
+static struct mv88e6xxx_hw_stat mv88e6xxx_hw_stats[] = {
+       { "in_good_octets", 8, 0x00, },
+       { "in_bad_octets", 4, 0x02, },
+       { "in_unicast", 4, 0x04, },
+       { "in_broadcasts", 4, 0x06, },
+       { "in_multicasts", 4, 0x07, },
+       { "in_pause", 4, 0x16, },
+       { "in_undersize", 4, 0x18, },
+       { "in_fragments", 4, 0x19, },
+       { "in_oversize", 4, 0x1a, },
+       { "in_jabber", 4, 0x1b, },
+       { "in_rx_error", 4, 0x1c, },
+       { "in_fcs_error", 4, 0x1d, },
+       { "out_octets", 8, 0x0e, },
+       { "out_unicast", 4, 0x10, },
+       { "out_broadcasts", 4, 0x13, },
+       { "out_multicasts", 4, 0x12, },
+       { "out_pause", 4, 0x15, },
+       { "excessive", 4, 0x11, },
+       { "collisions", 4, 0x1e, },
+       { "deferred", 4, 0x05, },
+       { "single", 4, 0x14, },
+       { "multiple", 4, 0x17, },
+       { "out_fcs_error", 4, 0x03, },
+       { "late", 4, 0x1f, },
+       { "hist_64bytes", 4, 0x08, },
+       { "hist_65_127bytes", 4, 0x09, },
+       { "hist_128_255bytes", 4, 0x0a, },
+       { "hist_256_511bytes", 4, 0x0b, },
+       { "hist_512_1023bytes", 4, 0x0c, },
+       { "hist_1024_max_bytes", 4, 0x0d, },
+       /* Not all devices have the following counters */
+       { "sw_in_discards", 4, 0x110, },
+       { "sw_in_filtered", 2, 0x112, },
+       { "sw_out_filtered", 2, 0x113, },
+
+};
+
+static bool have_sw_in_discards(struct dsa_switch *ds)
+{
+       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+
+       switch (ps->id) {
+       case PORT_SWITCH_ID_6095: case PORT_SWITCH_ID_6161:
+       case PORT_SWITCH_ID_6165: case PORT_SWITCH_ID_6171:
+       case PORT_SWITCH_ID_6172: case PORT_SWITCH_ID_6176:
+       case PORT_SWITCH_ID_6182: case PORT_SWITCH_ID_6185:
+       case PORT_SWITCH_ID_6352:
+               return true;
+       default:
+               return false;
+       }
+}
+
+static void _mv88e6xxx_get_strings(struct dsa_switch *ds,
+                                  int nr_stats,
+                                  struct mv88e6xxx_hw_stat *stats,
+                                  int port, uint8_t *data)
 {
        int i;
 
@@ -474,9 +578,10 @@ void mv88e6xxx_get_strings(struct dsa_switch *ds,
        }
 }
 
-void mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds,
-                                int nr_stats, struct mv88e6xxx_hw_stat *stats,
-                                int port, uint64_t *data)
+static void _mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds,
+                                        int nr_stats,
+                                        struct mv88e6xxx_hw_stat *stats,
+                                        int port, uint64_t *data)
 {
        struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
        int ret;
@@ -524,6 +629,39 @@ error:
        mutex_unlock(&ps->stats_mutex);
 }
 
+/* All the statistics in the table */
+void
+mv88e6xxx_get_strings(struct dsa_switch *ds, int port, uint8_t *data)
+{
+       if (have_sw_in_discards(ds))
+               _mv88e6xxx_get_strings(ds, ARRAY_SIZE(mv88e6xxx_hw_stats),
+                                      mv88e6xxx_hw_stats, port, data);
+       else
+               _mv88e6xxx_get_strings(ds, ARRAY_SIZE(mv88e6xxx_hw_stats) - 3,
+                                      mv88e6xxx_hw_stats, port, data);
+}
+
+int mv88e6xxx_get_sset_count(struct dsa_switch *ds)
+{
+       if (have_sw_in_discards(ds))
+               return ARRAY_SIZE(mv88e6xxx_hw_stats);
+       return ARRAY_SIZE(mv88e6xxx_hw_stats) - 3;
+}
+
+void
+mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds,
+                           int port, uint64_t *data)
+{
+       if (have_sw_in_discards(ds))
+               _mv88e6xxx_get_ethtool_stats(
+                       ds, ARRAY_SIZE(mv88e6xxx_hw_stats),
+                       mv88e6xxx_hw_stats, port, data);
+       else
+               _mv88e6xxx_get_ethtool_stats(
+                       ds, ARRAY_SIZE(mv88e6xxx_hw_stats) - 3,
+                       mv88e6xxx_hw_stats, port, data);
+}
+
 int mv88e6xxx_get_regs_len(struct dsa_switch *ds, int port)
 {
        return 32 * sizeof(u16);
@@ -560,42 +698,754 @@ int  mv88e6xxx_get_temp(struct dsa_switch *ds, int *temp)
 
        mutex_lock(&ps->phy_mutex);
 
-       ret = mv88e6xxx_phy_write(ds, 0x0, 0x16, 0x6);
+       ret = _mv88e6xxx_phy_write(ds, 0x0, 0x16, 0x6);
        if (ret < 0)
                goto error;
 
        /* Enable temperature sensor */
-       ret = mv88e6xxx_phy_read(ds, 0x0, 0x1a);
+       ret = _mv88e6xxx_phy_read(ds, 0x0, 0x1a);
        if (ret < 0)
                goto error;
 
-       ret = mv88e6xxx_phy_write(ds, 0x0, 0x1a, ret | (1 << 5));
+       ret = _mv88e6xxx_phy_write(ds, 0x0, 0x1a, ret | (1 << 5));
        if (ret < 0)
                goto error;
 
        /* Wait for temperature to stabilize */
        usleep_range(10000, 12000);
 
-       val = mv88e6xxx_phy_read(ds, 0x0, 0x1a);
+       val = _mv88e6xxx_phy_read(ds, 0x0, 0x1a);
        if (val < 0) {
                ret = val;
                goto error;
        }
 
        /* Disable temperature sensor */
-       ret = mv88e6xxx_phy_write(ds, 0x0, 0x1a, ret & ~(1 << 5));
+       ret = _mv88e6xxx_phy_write(ds, 0x0, 0x1a, ret & ~(1 << 5));
        if (ret < 0)
                goto error;
 
        *temp = ((val & 0x1f) - 5) * 5;
 
 error:
-       mv88e6xxx_phy_write(ds, 0x0, 0x16, 0x0);
+       _mv88e6xxx_phy_write(ds, 0x0, 0x16, 0x0);
        mutex_unlock(&ps->phy_mutex);
        return ret;
 }
 #endif /* CONFIG_NET_DSA_HWMON */
 
+static int mv88e6xxx_wait(struct dsa_switch *ds, int reg, int offset, u16 mask)
+{
+       unsigned long timeout = jiffies + HZ / 10;
+
+       while (time_before(jiffies, timeout)) {
+               int ret;
+
+               ret = REG_READ(reg, offset);
+               if (!(ret & mask))
+                       return 0;
+
+               usleep_range(1000, 2000);
+       }
+       return -ETIMEDOUT;
+}
+
+int mv88e6xxx_phy_wait(struct dsa_switch *ds)
+{
+       return mv88e6xxx_wait(ds, REG_GLOBAL2, GLOBAL2_SMI_OP,
+                             GLOBAL2_SMI_OP_BUSY);
+}
+
+int mv88e6xxx_eeprom_load_wait(struct dsa_switch *ds)
+{
+       return mv88e6xxx_wait(ds, REG_GLOBAL2, GLOBAL2_EEPROM_OP,
+                             GLOBAL2_EEPROM_OP_LOAD);
+}
+
+int mv88e6xxx_eeprom_busy_wait(struct dsa_switch *ds)
+{
+       return mv88e6xxx_wait(ds, REG_GLOBAL2, GLOBAL2_EEPROM_OP,
+                             GLOBAL2_EEPROM_OP_BUSY);
+}
+
+/* Must be called with SMI lock held */
+static int _mv88e6xxx_wait(struct dsa_switch *ds, int reg, int offset, u16 mask)
+{
+       unsigned long timeout = jiffies + HZ / 10;
+
+       while (time_before(jiffies, timeout)) {
+               int ret;
+
+               ret = _mv88e6xxx_reg_read(ds, reg, offset);
+               if (ret < 0)
+                       return ret;
+               if (!(ret & mask))
+                       return 0;
+
+               usleep_range(1000, 2000);
+       }
+       return -ETIMEDOUT;
+}
+
+/* Must be called with SMI lock held */
+static int _mv88e6xxx_atu_wait(struct dsa_switch *ds)
+{
+       return _mv88e6xxx_wait(ds, REG_GLOBAL, GLOBAL_ATU_OP,
+                              GLOBAL_ATU_OP_BUSY);
+}
+
+/* Must be called with phy mutex held */
+static int _mv88e6xxx_phy_read_indirect(struct dsa_switch *ds, int addr,
+                                       int regnum)
+{
+       int ret;
+
+       REG_WRITE(REG_GLOBAL2, GLOBAL2_SMI_OP,
+                 GLOBAL2_SMI_OP_22_READ | (addr << 5) | regnum);
+
+       ret = mv88e6xxx_phy_wait(ds);
+       if (ret < 0)
+               return ret;
+
+       return REG_READ(REG_GLOBAL2, GLOBAL2_SMI_DATA);
+}
+
+/* Must be called with phy mutex held */
+static int _mv88e6xxx_phy_write_indirect(struct dsa_switch *ds, int addr,
+                                        int regnum, u16 val)
+{
+       REG_WRITE(REG_GLOBAL2, GLOBAL2_SMI_DATA, val);
+       REG_WRITE(REG_GLOBAL2, GLOBAL2_SMI_OP,
+                 GLOBAL2_SMI_OP_22_WRITE | (addr << 5) | regnum);
+
+       return mv88e6xxx_phy_wait(ds);
+}
+
+int mv88e6xxx_get_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e)
+{
+       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+       int reg;
+
+       mutex_lock(&ps->phy_mutex);
+
+       reg = _mv88e6xxx_phy_read_indirect(ds, port, 16);
+       if (reg < 0)
+               goto out;
+
+       e->eee_enabled = !!(reg & 0x0200);
+       e->tx_lpi_enabled = !!(reg & 0x0100);
+
+       reg = mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_STATUS);
+       if (reg < 0)
+               goto out;
+
+       e->eee_active = !!(reg & PORT_STATUS_EEE);
+       reg = 0;
+
+out:
+       mutex_unlock(&ps->phy_mutex);
+       return reg;
+}
+
+int mv88e6xxx_set_eee(struct dsa_switch *ds, int port,
+                     struct phy_device *phydev, struct ethtool_eee *e)
+{
+       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+       int reg;
+       int ret;
+
+       mutex_lock(&ps->phy_mutex);
+
+       ret = _mv88e6xxx_phy_read_indirect(ds, port, 16);
+       if (ret < 0)
+               goto out;
+
+       reg = ret & ~0x0300;
+       if (e->eee_enabled)
+               reg |= 0x0200;
+       if (e->tx_lpi_enabled)
+               reg |= 0x0100;
+
+       ret = _mv88e6xxx_phy_write_indirect(ds, port, 16, reg);
+out:
+       mutex_unlock(&ps->phy_mutex);
+
+       return ret;
+}
+
+static int _mv88e6xxx_atu_cmd(struct dsa_switch *ds, int fid, u16 cmd)
+{
+       int ret;
+
+       ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, 0x01, fid);
+       if (ret < 0)
+               return ret;
+
+       ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_ATU_OP, cmd);
+       if (ret < 0)
+               return ret;
+
+       return _mv88e6xxx_atu_wait(ds);
+}
+
+static int _mv88e6xxx_flush_fid(struct dsa_switch *ds, int fid)
+{
+       int ret;
+
+       ret = _mv88e6xxx_atu_wait(ds);
+       if (ret < 0)
+               return ret;
+
+       return _mv88e6xxx_atu_cmd(ds, fid, GLOBAL_ATU_OP_FLUSH_NON_STATIC_DB);
+}
+
+static int mv88e6xxx_set_port_state(struct dsa_switch *ds, int port, u8 state)
+{
+       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+       int reg, ret;
+       u8 oldstate;
+
+       mutex_lock(&ps->smi_mutex);
+
+       reg = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_CONTROL);
+       if (reg < 0)
+               goto abort;
+
+       oldstate = reg & PORT_CONTROL_STATE_MASK;
+       if (oldstate != state) {
+               /* Flush forwarding database if we're moving a port
+                * from Learning or Forwarding state to Disabled or
+                * Blocking or Listening state.
+                */
+               if (oldstate >= PORT_CONTROL_STATE_LEARNING &&
+                   state <= PORT_CONTROL_STATE_BLOCKING) {
+                       ret = _mv88e6xxx_flush_fid(ds, ps->fid[port]);
+                       if (ret)
+                               goto abort;
+               }
+               reg = (reg & ~PORT_CONTROL_STATE_MASK) | state;
+               ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_CONTROL,
+                                          reg);
+       }
+
+abort:
+       mutex_unlock(&ps->smi_mutex);
+       return ret;
+}
+
+/* Must be called with smi lock held */
+static int _mv88e6xxx_update_port_config(struct dsa_switch *ds, int port)
+{
+       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+       u8 fid = ps->fid[port];
+       u16 reg = fid << 12;
+
+       if (dsa_is_cpu_port(ds, port))
+               reg |= ds->phys_port_mask;
+       else
+               reg |= (ps->bridge_mask[fid] |
+                      (1 << dsa_upstream_port(ds))) & ~(1 << port);
+
+       return _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_BASE_VLAN, reg);
+}
+
+/* Must be called with smi lock held */
+static int _mv88e6xxx_update_bridge_config(struct dsa_switch *ds, int fid)
+{
+       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+       int port;
+       u32 mask;
+       int ret;
+
+       mask = ds->phys_port_mask;
+       while (mask) {
+               port = __ffs(mask);
+               mask &= ~(1 << port);
+               if (ps->fid[port] != fid)
+                       continue;
+
+               ret = _mv88e6xxx_update_port_config(ds, port);
+               if (ret)
+                       return ret;
+       }
+
+       return _mv88e6xxx_flush_fid(ds, fid);
+}
+
+/* Bridge handling functions */
+
+int mv88e6xxx_join_bridge(struct dsa_switch *ds, int port, u32 br_port_mask)
+{
+       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+       int ret = 0;
+       u32 nmask;
+       int fid;
+
+       /* If the bridge group is not empty, join that group.
+        * Otherwise create a new group.
+        */
+       fid = ps->fid[port];
+       nmask = br_port_mask & ~(1 << port);
+       if (nmask)
+               fid = ps->fid[__ffs(nmask)];
+
+       nmask = ps->bridge_mask[fid] | (1 << port);
+       if (nmask != br_port_mask) {
+               netdev_err(ds->ports[port],
+                          "join: Bridge port mask mismatch fid=%d mask=0x%x expected 0x%x\n",
+                          fid, br_port_mask, nmask);
+               return -EINVAL;
+       }
+
+       mutex_lock(&ps->smi_mutex);
+
+       ps->bridge_mask[fid] = br_port_mask;
+
+       if (fid != ps->fid[port]) {
+               ps->fid_mask |= 1 << ps->fid[port];
+               ps->fid[port] = fid;
+               ret = _mv88e6xxx_update_bridge_config(ds, fid);
+       }
+
+       mutex_unlock(&ps->smi_mutex);
+
+       return ret;
+}
+
+int mv88e6xxx_leave_bridge(struct dsa_switch *ds, int port, u32 br_port_mask)
+{
+       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+       u8 fid, newfid;
+       int ret;
+
+       fid = ps->fid[port];
+
+       if (ps->bridge_mask[fid] != br_port_mask) {
+               netdev_err(ds->ports[port],
+                          "leave: Bridge port mask mismatch fid=%d mask=0x%x expected 0x%x\n",
+                          fid, br_port_mask, ps->bridge_mask[fid]);
+               return -EINVAL;
+       }
+
+       /* If the port was the last port of a bridge, we are done.
+        * Otherwise assign a new fid to the port, and fix up
+        * the bridge configuration.
+        */
+       if (br_port_mask == (1 << port))
+               return 0;
+
+       mutex_lock(&ps->smi_mutex);
+
+       newfid = __ffs(ps->fid_mask);
+       ps->fid[port] = newfid;
+       ps->fid_mask &= (1 << newfid);
+       ps->bridge_mask[fid] &= ~(1 << port);
+       ps->bridge_mask[newfid] = 1 << port;
+
+       ret = _mv88e6xxx_update_bridge_config(ds, fid);
+       if (!ret)
+               ret = _mv88e6xxx_update_bridge_config(ds, newfid);
+
+       mutex_unlock(&ps->smi_mutex);
+
+       return ret;
+}
+
+int mv88e6xxx_port_stp_update(struct dsa_switch *ds, int port, u8 state)
+{
+       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+       int stp_state;
+
+       switch (state) {
+       case BR_STATE_DISABLED:
+               stp_state = PORT_CONTROL_STATE_DISABLED;
+               break;
+       case BR_STATE_BLOCKING:
+       case BR_STATE_LISTENING:
+               stp_state = PORT_CONTROL_STATE_BLOCKING;
+               break;
+       case BR_STATE_LEARNING:
+               stp_state = PORT_CONTROL_STATE_LEARNING;
+               break;
+       case BR_STATE_FORWARDING:
+       default:
+               stp_state = PORT_CONTROL_STATE_FORWARDING;
+               break;
+       }
+
+       netdev_dbg(ds->ports[port], "port state %d [%d]\n", state, stp_state);
+
+       /* mv88e6xxx_port_stp_update may be called with softirqs disabled,
+        * so we can not update the port state directly but need to schedule it.
+        */
+       ps->port_state[port] = stp_state;
+       set_bit(port, &ps->port_state_update_mask);
+       schedule_work(&ps->bridge_work);
+
+       return 0;
+}
+
+static int __mv88e6xxx_write_addr(struct dsa_switch *ds,
+                                 const unsigned char *addr)
+{
+       int i, ret;
+
+       for (i = 0; i < 3; i++) {
+               ret = _mv88e6xxx_reg_write(
+                       ds, REG_GLOBAL, GLOBAL_ATU_MAC_01 + i,
+                       (addr[i * 2] << 8) | addr[i * 2 + 1]);
+               if (ret < 0)
+                       return ret;
+       }
+
+       return 0;
+}
+
+static int __mv88e6xxx_read_addr(struct dsa_switch *ds, unsigned char *addr)
+{
+       int i, ret;
+
+       for (i = 0; i < 3; i++) {
+               ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL,
+                                         GLOBAL_ATU_MAC_01 + i);
+               if (ret < 0)
+                       return ret;
+               addr[i * 2] = ret >> 8;
+               addr[i * 2 + 1] = ret & 0xff;
+       }
+
+       return 0;
+}
+
+static int __mv88e6xxx_port_fdb_cmd(struct dsa_switch *ds, int port,
+                                   const unsigned char *addr, int state)
+{
+       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+       u8 fid = ps->fid[port];
+       int ret;
+
+       ret = _mv88e6xxx_atu_wait(ds);
+       if (ret < 0)
+               return ret;
+
+       ret = __mv88e6xxx_write_addr(ds, addr);
+       if (ret < 0)
+               return ret;
+
+       ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_ATU_DATA,
+                                  (0x10 << port) | state);
+       if (ret)
+               return ret;
+
+       ret = _mv88e6xxx_atu_cmd(ds, fid, GLOBAL_ATU_OP_LOAD_DB);
+
+       return ret;
+}
+
+int mv88e6xxx_port_fdb_add(struct dsa_switch *ds, int port,
+                          const unsigned char *addr, u16 vid)
+{
+       int state = is_multicast_ether_addr(addr) ?
+               GLOBAL_ATU_DATA_STATE_MC_STATIC :
+               GLOBAL_ATU_DATA_STATE_UC_STATIC;
+       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+       int ret;
+
+       mutex_lock(&ps->smi_mutex);
+       ret = __mv88e6xxx_port_fdb_cmd(ds, port, addr, state);
+       mutex_unlock(&ps->smi_mutex);
+
+       return ret;
+}
+
+int mv88e6xxx_port_fdb_del(struct dsa_switch *ds, int port,
+                          const unsigned char *addr, u16 vid)
+{
+       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+       int ret;
+
+       mutex_lock(&ps->smi_mutex);
+       ret = __mv88e6xxx_port_fdb_cmd(ds, port, addr,
+                                      GLOBAL_ATU_DATA_STATE_UNUSED);
+       mutex_unlock(&ps->smi_mutex);
+
+       return ret;
+}
+
+static int __mv88e6xxx_port_getnext(struct dsa_switch *ds, int port,
+                                   unsigned char *addr, bool *is_static)
+{
+       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+       u8 fid = ps->fid[port];
+       int ret, state;
+
+       ret = _mv88e6xxx_atu_wait(ds);
+       if (ret < 0)
+               return ret;
+
+       ret = __mv88e6xxx_write_addr(ds, addr);
+       if (ret < 0)
+               return ret;
+
+       do {
+               ret = _mv88e6xxx_atu_cmd(ds, fid,  GLOBAL_ATU_OP_GET_NEXT_DB);
+               if (ret < 0)
+                       return ret;
+
+               ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_ATU_DATA);
+               if (ret < 0)
+                       return ret;
+               state = ret & GLOBAL_ATU_DATA_STATE_MASK;
+               if (state == GLOBAL_ATU_DATA_STATE_UNUSED)
+                       return -ENOENT;
+       } while (!(((ret >> 4) & 0xff) & (1 << port)));
+
+       ret = __mv88e6xxx_read_addr(ds, addr);
+       if (ret < 0)
+               return ret;
+
+       *is_static = state == (is_multicast_ether_addr(addr) ?
+                              GLOBAL_ATU_DATA_STATE_MC_STATIC :
+                              GLOBAL_ATU_DATA_STATE_UC_STATIC);
+
+       return 0;
+}
+
+/* get next entry for port */
+int mv88e6xxx_port_fdb_getnext(struct dsa_switch *ds, int port,
+                              unsigned char *addr, bool *is_static)
+{
+       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+       int ret;
+
+       mutex_lock(&ps->smi_mutex);
+       ret = __mv88e6xxx_port_getnext(ds, port, addr, is_static);
+       mutex_unlock(&ps->smi_mutex);
+
+       return ret;
+}
+
+static void mv88e6xxx_bridge_work(struct work_struct *work)
+{
+       struct mv88e6xxx_priv_state *ps;
+       struct dsa_switch *ds;
+       int port;
+
+       ps = container_of(work, struct mv88e6xxx_priv_state, bridge_work);
+       ds = ((struct dsa_switch *)ps) - 1;
+
+       while (ps->port_state_update_mask) {
+               port = __ffs(ps->port_state_update_mask);
+               clear_bit(port, &ps->port_state_update_mask);
+               mv88e6xxx_set_port_state(ds, port, ps->port_state[port]);
+       }
+}
+
+int mv88e6xxx_setup_port_common(struct dsa_switch *ds, int port)
+{
+       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+       int ret, fid;
+
+       mutex_lock(&ps->smi_mutex);
+
+       /* Port Control 1: disable trunking, disable sending
+        * learning messages to this port.
+        */
+       ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_DEFAULT_VLAN,
+                                  0x0000);
+       if (ret)
+               goto abort;
+
+       /* Port based VLAN map: give each port its own address
+        * database, allow the CPU port to talk to each of the 'real'
+        * ports, and allow each of the 'real' ports to only talk to
+        * the upstream port.
+        */
+       fid = __ffs(ps->fid_mask);
+       ps->fid[port] = fid;
+       ps->fid_mask &= ~(1 << fid);
+
+       if (!dsa_is_cpu_port(ds, port))
+               ps->bridge_mask[fid] = 1 << port;
+
+       ret = _mv88e6xxx_update_port_config(ds, port);
+       if (ret)
+               goto abort;
+
+       /* Default VLAN ID and priority: don't set a default VLAN
+        * ID, and set the default packet priority to zero.
+        */
+       ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), 0x07, 0x0000);
+abort:
+       mutex_unlock(&ps->smi_mutex);
+       return ret;
+}
+
+int mv88e6xxx_setup_common(struct dsa_switch *ds)
+{
+       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+
+       mutex_init(&ps->smi_mutex);
+       mutex_init(&ps->stats_mutex);
+       mutex_init(&ps->phy_mutex);
+
+       ps->id = REG_READ(REG_PORT(0), PORT_SWITCH_ID) & 0xfff0;
+
+       ps->fid_mask = (1 << DSA_MAX_PORTS) - 1;
+
+       INIT_WORK(&ps->bridge_work, mv88e6xxx_bridge_work);
+
+       return 0;
+}
+
+int mv88e6xxx_switch_reset(struct dsa_switch *ds, bool ppu_active)
+{
+       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+       u16 is_reset = (ppu_active ? 0x8800 : 0xc800);
+       unsigned long timeout;
+       int ret;
+       int i;
+
+       /* Set all ports to the disabled state. */
+       for (i = 0; i < ps->num_ports; i++) {
+               ret = REG_READ(REG_PORT(i), PORT_CONTROL);
+               REG_WRITE(REG_PORT(i), PORT_CONTROL, ret & 0xfffc);
+       }
+
+       /* Wait for transmit queues to drain. */
+       usleep_range(2000, 4000);
+
+       /* Reset the switch. Keep the PPU active if requested. The PPU
+        * needs to be active to support indirect phy register access
+        * through global registers 0x18 and 0x19.
+        */
+       if (ppu_active)
+               REG_WRITE(REG_GLOBAL, 0x04, 0xc000);
+       else
+               REG_WRITE(REG_GLOBAL, 0x04, 0xc400);
+
+       /* Wait up to one second for reset to complete. */
+       timeout = jiffies + 1 * HZ;
+       while (time_before(jiffies, timeout)) {
+               ret = REG_READ(REG_GLOBAL, 0x00);
+               if ((ret & is_reset) == is_reset)
+                       break;
+               usleep_range(1000, 2000);
+       }
+       if (time_after(jiffies, timeout))
+               return -ETIMEDOUT;
+
+       return 0;
+}
+
+int mv88e6xxx_phy_page_read(struct dsa_switch *ds, int port, int page, int reg)
+{
+       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+       int ret;
+
+       mutex_lock(&ps->phy_mutex);
+       ret = _mv88e6xxx_phy_write_indirect(ds, port, 0x16, page);
+       if (ret < 0)
+               goto error;
+       ret = _mv88e6xxx_phy_read_indirect(ds, port, reg);
+error:
+       _mv88e6xxx_phy_write_indirect(ds, port, 0x16, 0x0);
+       mutex_unlock(&ps->phy_mutex);
+       return ret;
+}
+
+int mv88e6xxx_phy_page_write(struct dsa_switch *ds, int port, int page,
+                            int reg, int val)
+{
+       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+       int ret;
+
+       mutex_lock(&ps->phy_mutex);
+       ret = _mv88e6xxx_phy_write_indirect(ds, port, 0x16, page);
+       if (ret < 0)
+               goto error;
+
+       ret = _mv88e6xxx_phy_write_indirect(ds, port, reg, val);
+error:
+       _mv88e6xxx_phy_write_indirect(ds, port, 0x16, 0x0);
+       mutex_unlock(&ps->phy_mutex);
+       return ret;
+}
+
+static int mv88e6xxx_port_to_phy_addr(struct dsa_switch *ds, int port)
+{
+       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+
+       if (port >= 0 && port < ps->num_ports)
+               return port;
+       return -EINVAL;
+}
+
+int
+mv88e6xxx_phy_read(struct dsa_switch *ds, int port, int regnum)
+{
+       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+       int addr = mv88e6xxx_port_to_phy_addr(ds, port);
+       int ret;
+
+       if (addr < 0)
+               return addr;
+
+       mutex_lock(&ps->phy_mutex);
+       ret = _mv88e6xxx_phy_read(ds, addr, regnum);
+       mutex_unlock(&ps->phy_mutex);
+       return ret;
+}
+
+int
+mv88e6xxx_phy_write(struct dsa_switch *ds, int port, int regnum, u16 val)
+{
+       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+       int addr = mv88e6xxx_port_to_phy_addr(ds, port);
+       int ret;
+
+       if (addr < 0)
+               return addr;
+
+       mutex_lock(&ps->phy_mutex);
+       ret = _mv88e6xxx_phy_write(ds, addr, regnum, val);
+       mutex_unlock(&ps->phy_mutex);
+       return ret;
+}
+
+int
+mv88e6xxx_phy_read_indirect(struct dsa_switch *ds, int port, int regnum)
+{
+       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+       int addr = mv88e6xxx_port_to_phy_addr(ds, port);
+       int ret;
+
+       if (addr < 0)
+               return addr;
+
+       mutex_lock(&ps->phy_mutex);
+       ret = _mv88e6xxx_phy_read_indirect(ds, addr, regnum);
+       mutex_unlock(&ps->phy_mutex);
+       return ret;
+}
+
+int
+mv88e6xxx_phy_write_indirect(struct dsa_switch *ds, int port, int regnum,
+                            u16 val)
+{
+       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+       int addr = mv88e6xxx_port_to_phy_addr(ds, port);
+       int ret;
+
+       if (addr < 0)
+               return addr;
+
+       mutex_lock(&ps->phy_mutex);
+       ret = _mv88e6xxx_phy_write_indirect(ds, addr, regnum, val);
+       mutex_unlock(&ps->phy_mutex);
+       return ret;
+}
+
 static int __init mv88e6xxx_init(void)
 {
 #if IS_ENABLED(CONFIG_NET_DSA_MV88E6131)
index 03e397efde36949810503a16b601f3df61f3396c..e045154f33646692cb292150efde8e0ad78a9326 100644 (file)
 #ifndef __MV88E6XXX_H
 #define __MV88E6XXX_H
 
+#define SMI_CMD                        0x00
+#define SMI_CMD_BUSY           BIT(15)
+#define SMI_CMD_CLAUSE_22      BIT(12)
+#define SMI_CMD_OP_22_WRITE    ((1 << 10) | SMI_CMD_BUSY | SMI_CMD_CLAUSE_22)
+#define SMI_CMD_OP_22_READ     ((2 << 10) | SMI_CMD_BUSY | SMI_CMD_CLAUSE_22)
+#define SMI_CMD_OP_45_WRITE_ADDR       ((0 << 10) | SMI_CMD_BUSY)
+#define SMI_CMD_OP_45_WRITE_DATA       ((1 << 10) | SMI_CMD_BUSY)
+#define SMI_CMD_OP_45_READ_DATA                ((2 << 10) | SMI_CMD_BUSY)
+#define SMI_CMD_OP_45_READ_DATA_INC    ((3 << 10) | SMI_CMD_BUSY)
+#define SMI_DATA               0x01
+
 #define REG_PORT(p)            (0x10 + (p))
+#define PORT_STATUS            0x00
+#define PORT_STATUS_PAUSE_EN   BIT(15)
+#define PORT_STATUS_MY_PAUSE   BIT(14)
+#define PORT_STATUS_HD_FLOW    BIT(13)
+#define PORT_STATUS_PHY_DETECT BIT(12)
+#define PORT_STATUS_LINK       BIT(11)
+#define PORT_STATUS_DUPLEX     BIT(10)
+#define PORT_STATUS_SPEED_MASK 0x0300
+#define PORT_STATUS_SPEED_10   0x0000
+#define PORT_STATUS_SPEED_100  0x0100
+#define PORT_STATUS_SPEED_1000 0x0200
+#define PORT_STATUS_EEE                BIT(6) /* 6352 */
+#define PORT_STATUS_AM_DIS     BIT(6) /* 6165 */
+#define PORT_STATUS_MGMII      BIT(6) /* 6185 */
+#define PORT_STATUS_TX_PAUSED  BIT(5)
+#define PORT_STATUS_FLOW_CTRL  BIT(4)
+#define PORT_PCS_CTRL          0x01
+#define PORT_SWITCH_ID         0x03
+#define PORT_SWITCH_ID_6085    0x04a0
+#define PORT_SWITCH_ID_6095    0x0950
+#define PORT_SWITCH_ID_6123    0x1210
+#define PORT_SWITCH_ID_6123_A1 0x1212
+#define PORT_SWITCH_ID_6123_A2 0x1213
+#define PORT_SWITCH_ID_6131    0x1060
+#define PORT_SWITCH_ID_6131_B2 0x1066
+#define PORT_SWITCH_ID_6152    0x1a40
+#define PORT_SWITCH_ID_6155    0x1a50
+#define PORT_SWITCH_ID_6161    0x1610
+#define PORT_SWITCH_ID_6161_A1 0x1612
+#define PORT_SWITCH_ID_6161_A2 0x1613
+#define PORT_SWITCH_ID_6165    0x1650
+#define PORT_SWITCH_ID_6165_A1 0x1652
+#define PORT_SWITCH_ID_6165_A2 0x1653
+#define PORT_SWITCH_ID_6171    0x1710
+#define PORT_SWITCH_ID_6172    0x1720
+#define PORT_SWITCH_ID_6176    0x1760
+#define PORT_SWITCH_ID_6182    0x1a60
+#define PORT_SWITCH_ID_6185    0x1a70
+#define PORT_SWITCH_ID_6352    0x3520
+#define PORT_SWITCH_ID_6352_A0 0x3521
+#define PORT_SWITCH_ID_6352_A1 0x3522
+#define PORT_CONTROL           0x04
+#define PORT_CONTROL_STATE_MASK                0x03
+#define PORT_CONTROL_STATE_DISABLED    0x00
+#define PORT_CONTROL_STATE_BLOCKING    0x01
+#define PORT_CONTROL_STATE_LEARNING    0x02
+#define PORT_CONTROL_STATE_FORWARDING  0x03
+#define PORT_CONTROL_1         0x05
+#define PORT_BASE_VLAN         0x06
+#define PORT_DEFAULT_VLAN      0x07
+#define PORT_CONTROL_2         0x08
+#define PORT_RATE_CONTROL      0x09
+#define PORT_RATE_CONTROL_2    0x0a
+#define PORT_ASSOC_VECTOR      0x0b
+#define PORT_IN_DISCARD_LO     0x10
+#define PORT_IN_DISCARD_HI     0x11
+#define PORT_IN_FILTERED       0x12
+#define PORT_OUT_FILTERED      0x13
+#define PORT_TAG_REGMAP_0123   0x19
+#define PORT_TAG_REGMAP_4567   0x1a
+
 #define REG_GLOBAL             0x1b
+#define GLOBAL_STATUS          0x00
+#define GLOBAL_STATUS_PPU_STATE BIT(15) /* 6351 and 6171 */
+/* Two bits for 6165, 6185 etc */
+#define GLOBAL_STATUS_PPU_MASK         (0x3 << 14)
+#define GLOBAL_STATUS_PPU_DISABLED_RST (0x0 << 14)
+#define GLOBAL_STATUS_PPU_INITIALIZING (0x1 << 14)
+#define GLOBAL_STATUS_PPU_DISABLED     (0x2 << 14)
+#define GLOBAL_STATUS_PPU_POLLING      (0x3 << 14)
+#define GLOBAL_MAC_01          0x01
+#define GLOBAL_MAC_23          0x02
+#define GLOBAL_MAC_45          0x03
+#define GLOBAL_CONTROL         0x04
+#define GLOBAL_CONTROL_SW_RESET                BIT(15)
+#define GLOBAL_CONTROL_PPU_ENABLE      BIT(14)
+#define GLOBAL_CONTROL_DISCARD_EXCESS  BIT(13) /* 6352 */
+#define GLOBAL_CONTROL_SCHED_PRIO      BIT(11) /* 6152 */
+#define GLOBAL_CONTROL_MAX_FRAME_1632  BIT(10) /* 6152 */
+#define GLOBAL_CONTROL_RELOAD_EEPROM   BIT(9)  /* 6152 */
+#define GLOBAL_CONTROL_DEVICE_EN       BIT(7)
+#define GLOBAL_CONTROL_STATS_DONE_EN   BIT(6)
+#define GLOBAL_CONTROL_VTU_PROBLEM_EN  BIT(5)
+#define GLOBAL_CONTROL_VTU_DONE_EN     BIT(4)
+#define GLOBAL_CONTROL_ATU_PROBLEM_EN  BIT(3)
+#define GLOBAL_CONTROL_ATU_DONE_EN     BIT(2)
+#define GLOBAL_CONTROL_TCAM_EN         BIT(1)
+#define GLOBAL_CONTROL_EEPROM_DONE_EN  BIT(0)
+#define GLOBAL_VTU_OP          0x05
+#define GLOBAL_VTU_VID         0x06
+#define GLOBAL_VTU_DATA_0_3    0x07
+#define GLOBAL_VTU_DATA_4_7    0x08
+#define GLOBAL_VTU_DATA_8_11   0x09
+#define GLOBAL_ATU_CONTROL     0x0a
+#define GLOBAL_ATU_OP          0x0b
+#define GLOBAL_ATU_OP_BUSY     BIT(15)
+#define GLOBAL_ATU_OP_NOP              (0 << 12)
+#define GLOBAL_ATU_OP_FLUSH_ALL                ((1 << 12) | GLOBAL_ATU_OP_BUSY)
+#define GLOBAL_ATU_OP_FLUSH_NON_STATIC ((2 << 12) | GLOBAL_ATU_OP_BUSY)
+#define GLOBAL_ATU_OP_LOAD_DB          ((3 << 12) | GLOBAL_ATU_OP_BUSY)
+#define GLOBAL_ATU_OP_GET_NEXT_DB      ((4 << 12) | GLOBAL_ATU_OP_BUSY)
+#define GLOBAL_ATU_OP_FLUSH_DB         ((5 << 12) | GLOBAL_ATU_OP_BUSY)
+#define GLOBAL_ATU_OP_FLUSH_NON_STATIC_DB ((6 << 12) | GLOBAL_ATU_OP_BUSY)
+#define GLOBAL_ATU_OP_GET_CLR_VIOLATION          ((7 << 12) | GLOBAL_ATU_OP_BUSY)
+#define GLOBAL_ATU_DATA                0x0c
+#define GLOBAL_ATU_DATA_STATE_MASK             0x0f
+#define GLOBAL_ATU_DATA_STATE_UNUSED           0x00
+#define GLOBAL_ATU_DATA_STATE_UC_MGMT          0x0d
+#define GLOBAL_ATU_DATA_STATE_UC_STATIC                0x0e
+#define GLOBAL_ATU_DATA_STATE_UC_PRIO_OVER     0x0f
+#define GLOBAL_ATU_DATA_STATE_MC_NONE_RATE     0x05
+#define GLOBAL_ATU_DATA_STATE_MC_STATIC                0x07
+#define GLOBAL_ATU_DATA_STATE_MC_MGMT          0x0e
+#define GLOBAL_ATU_DATA_STATE_MC_PRIO_OVER     0x0f
+#define GLOBAL_ATU_MAC_01      0x0d
+#define GLOBAL_ATU_MAC_23      0x0e
+#define GLOBAL_ATU_MAC_45      0x0f
+#define GLOBAL_IP_PRI_0                0x10
+#define GLOBAL_IP_PRI_1                0x11
+#define GLOBAL_IP_PRI_2                0x12
+#define GLOBAL_IP_PRI_3                0x13
+#define GLOBAL_IP_PRI_4                0x14
+#define GLOBAL_IP_PRI_5                0x15
+#define GLOBAL_IP_PRI_6                0x16
+#define GLOBAL_IP_PRI_7                0x17
+#define GLOBAL_IEEE_PRI                0x18
+#define GLOBAL_CORE_TAG_TYPE   0x19
+#define GLOBAL_MONITOR_CONTROL 0x1a
+#define GLOBAL_CONTROL_2       0x1c
+#define GLOBAL_STATS_OP                0x1d
+#define GLOBAL_STATS_OP_BUSY   BIT(15)
+#define GLOBAL_STATS_OP_NOP            (0 << 12)
+#define GLOBAL_STATS_OP_FLUSH_ALL      ((1 << 12) | GLOBAL_STATS_OP_BUSY)
+#define GLOBAL_STATS_OP_FLUSH_PORT     ((2 << 12) | GLOBAL_STATS_OP_BUSY)
+#define GLOBAL_STATS_OP_READ_CAPTURED  ((4 << 12) | GLOBAL_STATS_OP_BUSY)
+#define GLOBAL_STATS_OP_CAPTURE_PORT   ((5 << 12) | GLOBAL_STATS_OP_BUSY)
+#define GLOBAL_STATS_OP_HIST_RX                ((1 << 10) | GLOBAL_STATS_OP_BUSY)
+#define GLOBAL_STATS_OP_HIST_TX                ((2 << 10) | GLOBAL_STATS_OP_BUSY)
+#define GLOBAL_STATS_OP_HIST_RX_TX     ((3 << 10) | GLOBAL_STATS_OP_BUSY)
+#define GLOBAL_STATS_COUNTER_32        0x1e
+#define GLOBAL_STATS_COUNTER_01        0x1f
+
 #define REG_GLOBAL2            0x1c
+#define GLOBAL2_INT_SOURCE     0x00
+#define GLOBAL2_INT_MASK       0x01
+#define GLOBAL2_MGMT_EN_2X     0x02
+#define GLOBAL2_MGMT_EN_0X     0x03
+#define GLOBAL2_FLOW_CONTROL   0x04
+#define GLOBAL2_SWITCH_MGMT    0x05
+#define GLOBAL2_DEVICE_MAPPING 0x06
+#define GLOBAL2_TRUNK_MASK     0x07
+#define GLOBAL2_TRUNK_MAPPING  0x08
+#define GLOBAL2_INGRESS_OP     0x09
+#define GLOBAL2_INGRESS_DATA   0x0a
+#define GLOBAL2_PVT_ADDR       0x0b
+#define GLOBAL2_PVT_DATA       0x0c
+#define GLOBAL2_SWITCH_MAC     0x0d
+#define GLOBAL2_SWITCH_MAC_BUSY BIT(15)
+#define GLOBAL2_ATU_STATS      0x0e
+#define GLOBAL2_PRIO_OVERRIDE  0x0f
+#define GLOBAL2_EEPROM_OP      0x14
+#define GLOBAL2_EEPROM_OP_BUSY BIT(15)
+#define GLOBAL2_EEPROM_OP_LOAD BIT(11)
+#define GLOBAL2_EEPROM_DATA    0x15
+#define GLOBAL2_PTP_AVB_OP     0x16
+#define GLOBAL2_PTP_AVB_DATA   0x17
+#define GLOBAL2_SMI_OP         0x18
+#define GLOBAL2_SMI_OP_BUSY            BIT(15)
+#define GLOBAL2_SMI_OP_CLAUSE_22       BIT(12)
+#define GLOBAL2_SMI_OP_22_WRITE                ((1 << 10) | GLOBAL2_SMI_OP_BUSY | \
+                                        GLOBAL2_SMI_OP_CLAUSE_22)
+#define GLOBAL2_SMI_OP_22_READ         ((2 << 10) | GLOBAL2_SMI_OP_BUSY | \
+                                        GLOBAL2_SMI_OP_CLAUSE_22)
+#define GLOBAL2_SMI_OP_45_WRITE_ADDR   ((0 << 10) | GLOBAL2_SMI_OP_BUSY)
+#define GLOBAL2_SMI_OP_45_WRITE_DATA   ((1 << 10) | GLOBAL2_SMI_OP_BUSY)
+#define GLOBAL2_SMI_OP_45_READ_DATA    ((2 << 10) | GLOBAL2_SMI_OP_BUSY)
+#define GLOBAL2_SMI_DATA       0x19
+#define GLOBAL2_SCRATCH_MISC   0x1a
+#define GLOBAL2_WDOG_CONTROL   0x1b
+#define GLOBAL2_QOS_WEIGHT     0x1c
+#define GLOBAL2_MISC           0x1d
 
 struct mv88e6xxx_priv_state {
        /* When using multi-chip addressing, this mutex protects
@@ -49,6 +239,18 @@ struct mv88e6xxx_priv_state {
        struct mutex eeprom_mutex;
 
        int             id; /* switch product id */
+       int             num_ports;      /* number of switch ports */
+
+       /* hw bridging */
+
+       u32 fid_mask;
+       u8 fid[DSA_MAX_PORTS];
+       u16 bridge_mask[DSA_MAX_PORTS];
+
+       unsigned long port_state_update_mask;
+       u8 port_state[DSA_MAX_PORTS];
+
+       struct work_struct bridge_work;
 };
 
 struct mv88e6xxx_hw_stat {
@@ -57,6 +259,9 @@ struct mv88e6xxx_hw_stat {
        int reg;
 };
 
+int mv88e6xxx_switch_reset(struct dsa_switch *ds, bool ppu_active);
+int mv88e6xxx_setup_port_common(struct dsa_switch *ds, int port);
+int mv88e6xxx_setup_common(struct dsa_switch *ds);
 int __mv88e6xxx_reg_read(struct mii_bus *bus, int sw_addr, int addr, int reg);
 int mv88e6xxx_reg_read(struct dsa_switch *ds, int addr, int reg);
 int __mv88e6xxx_reg_write(struct mii_bus *bus, int sw_addr, int addr,
@@ -65,24 +270,46 @@ int mv88e6xxx_reg_write(struct dsa_switch *ds, int addr, int reg, u16 val);
 int mv88e6xxx_config_prio(struct dsa_switch *ds);
 int mv88e6xxx_set_addr_direct(struct dsa_switch *ds, u8 *addr);
 int mv88e6xxx_set_addr_indirect(struct dsa_switch *ds, u8 *addr);
-int mv88e6xxx_phy_read(struct dsa_switch *ds, int addr, int regnum);
-int mv88e6xxx_phy_write(struct dsa_switch *ds, int addr, int regnum, u16 val);
+int mv88e6xxx_phy_read(struct dsa_switch *ds, int port, int regnum);
+int mv88e6xxx_phy_write(struct dsa_switch *ds, int port, int regnum, u16 val);
+int mv88e6xxx_phy_read_indirect(struct dsa_switch *ds, int port, int regnum);
+int mv88e6xxx_phy_write_indirect(struct dsa_switch *ds, int port, int regnum,
+                                u16 val);
 void mv88e6xxx_ppu_state_init(struct dsa_switch *ds);
 int mv88e6xxx_phy_read_ppu(struct dsa_switch *ds, int addr, int regnum);
 int mv88e6xxx_phy_write_ppu(struct dsa_switch *ds, int addr,
                            int regnum, u16 val);
 void mv88e6xxx_poll_link(struct dsa_switch *ds);
-void mv88e6xxx_get_strings(struct dsa_switch *ds,
-                          int nr_stats, struct mv88e6xxx_hw_stat *stats,
-                          int port, uint8_t *data);
-void mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds,
-                                int nr_stats, struct mv88e6xxx_hw_stat *stats,
-                                int port, uint64_t *data);
+void mv88e6xxx_get_strings(struct dsa_switch *ds, int port, uint8_t *data);
+void mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds, int port,
+                                uint64_t *data);
+int mv88e6xxx_get_sset_count(struct dsa_switch *ds);
+int mv88e6xxx_get_sset_count_basic(struct dsa_switch *ds);
 int mv88e6xxx_get_regs_len(struct dsa_switch *ds, int port);
 void mv88e6xxx_get_regs(struct dsa_switch *ds, int port,
                        struct ethtool_regs *regs, void *_p);
 int  mv88e6xxx_get_temp(struct dsa_switch *ds, int *temp);
-
+int mv88e6xxx_phy_wait(struct dsa_switch *ds);
+int mv88e6xxx_eeprom_load_wait(struct dsa_switch *ds);
+int mv88e6xxx_eeprom_busy_wait(struct dsa_switch *ds);
+int mv88e6xxx_phy_read_indirect(struct dsa_switch *ds, int addr, int regnum);
+int mv88e6xxx_phy_write_indirect(struct dsa_switch *ds, int addr, int regnum,
+                                u16 val);
+int mv88e6xxx_get_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e);
+int mv88e6xxx_set_eee(struct dsa_switch *ds, int port,
+                     struct phy_device *phydev, struct ethtool_eee *e);
+int mv88e6xxx_join_bridge(struct dsa_switch *ds, int port, u32 br_port_mask);
+int mv88e6xxx_leave_bridge(struct dsa_switch *ds, int port, u32 br_port_mask);
+int mv88e6xxx_port_stp_update(struct dsa_switch *ds, int port, u8 state);
+int mv88e6xxx_port_fdb_add(struct dsa_switch *ds, int port,
+                          const unsigned char *addr, u16 vid);
+int mv88e6xxx_port_fdb_del(struct dsa_switch *ds, int port,
+                          const unsigned char *addr, u16 vid);
+int mv88e6xxx_port_fdb_getnext(struct dsa_switch *ds, int port,
+                              unsigned char *addr, bool *is_static);
+int mv88e6xxx_phy_page_read(struct dsa_switch *ds, int port, int page, int reg);
+int mv88e6xxx_phy_page_write(struct dsa_switch *ds, int port, int page,
+                            int reg, int val);
 extern struct dsa_switch_driver mv88e6131_switch_driver;
 extern struct dsa_switch_driver mv88e6123_61_65_switch_driver;
 extern struct dsa_switch_driver mv88e6352_switch_driver;
index ec20611e9de2e4eb16e962f037cb55ae9a6e7d62..096531a7312495006abdc4b81dc03d7df6def169 100644 (file)
@@ -983,10 +983,9 @@ static int bfin_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
        return 0;
 }
 
-static int bfin_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
+static int bfin_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
 {
        u64 ns;
-       u32 remainder;
        unsigned long flags;
        struct bfin_mac_local *lp =
                container_of(ptp, struct bfin_mac_local, caps);
@@ -997,21 +996,20 @@ static int bfin_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
 
        spin_unlock_irqrestore(&lp->phc_lock, flags);
 
-       ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder);
-       ts->tv_nsec = remainder;
+       *ts = ns_to_timespec64(ns);
+
        return 0;
 }
 
 static int bfin_ptp_settime(struct ptp_clock_info *ptp,
-                          const struct timespec *ts)
+                          const struct timespec64 *ts)
 {
        u64 ns;
        unsigned long flags;
        struct bfin_mac_local *lp =
                container_of(ptp, struct bfin_mac_local, caps);
 
-       ns = ts->tv_sec * 1000000000ULL;
-       ns += ts->tv_nsec;
+       ns = timespec64_to_ns(ts);
 
        spin_lock_irqsave(&lp->phc_lock, flags);
 
@@ -1039,8 +1037,8 @@ static struct ptp_clock_info bfin_ptp_caps = {
        .pps            = 0,
        .adjfreq        = bfin_ptp_adjfreq,
        .adjtime        = bfin_ptp_adjtime,
-       .gettime        = bfin_ptp_gettime,
-       .settime        = bfin_ptp_settime,
+       .gettime64      = bfin_ptp_gettime,
+       .settime64      = bfin_ptp_settime,
        .enable         = bfin_ptp_enable,
 };
 
index 2b8bfeeee9cf07e3e995003dea1cd195f0257d3f..ae89de7deb132587e0ea0993f1ade6f01c19228d 100644 (file)
@@ -1588,7 +1588,7 @@ static int greth_of_remove(struct platform_device *of_dev)
        return 0;
 }
 
-static struct of_device_id greth_of_match[] = {
+static const struct of_device_id greth_of_match[] = {
        {
         .name = "GAISLER_ETHMAC",
         },
index f3470d96837a7fb0e59307b000855fe18a882af5..bab01c84916549e7599e34fe7a1c15bf2d10b456 100644 (file)
@@ -757,7 +757,7 @@ static void emac_shutdown(struct net_device *dev)
        /* Disable all interrupt */
        writel(0, db->membase + EMAC_INT_CTL_REG);
 
-       /* clear interupt status */
+       /* clear interrupt status */
        reg_val = readl(db->membase + EMAC_INT_STA_REG);
        writel(reg_val, db->membase + EMAC_INT_STA_REG);
 
index 6725dc00750bd6da367396bceb33adaac10842d0..79ea35869e1ee31ab7f6cbda2d5b8c676d04bb1c 100644 (file)
@@ -89,7 +89,7 @@ MODULE_PARM_DESC(dma_tx_num, "Number of descriptors in the TX list");
 
 #define TXQUEUESTOP_THRESHHOLD 2
 
-static struct of_device_id altera_tse_ids[];
+static const struct of_device_id altera_tse_ids[];
 
 static inline u32 tse_tx_avail(struct altera_tse_private *priv)
 {
@@ -105,11 +105,11 @@ static int altera_tse_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
 
        /* set MDIO address */
        csrwr32((mii_id & 0x1f), priv->mac_dev,
-               tse_csroffs(mdio_phy0_addr));
+               tse_csroffs(mdio_phy1_addr));
 
        /* get the data */
        return csrrd32(priv->mac_dev,
-                      tse_csroffs(mdio_phy0) + regnum * 4) & 0xffff;
+                      tse_csroffs(mdio_phy1) + regnum * 4) & 0xffff;
 }
 
 static int altera_tse_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
@@ -120,10 +120,10 @@ static int altera_tse_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
 
        /* set MDIO address */
        csrwr32((mii_id & 0x1f), priv->mac_dev,
-               tse_csroffs(mdio_phy0_addr));
+               tse_csroffs(mdio_phy1_addr));
 
        /* write the data */
-       csrwr32(value, priv->mac_dev, tse_csroffs(mdio_phy0) + regnum * 4);
+       csrwr32(value, priv->mac_dev, tse_csroffs(mdio_phy1) + regnum * 4);
        return 0;
 }
 
@@ -1098,8 +1098,12 @@ static int tse_open(struct net_device *dev)
 
        spin_lock(&priv->mac_cfg_lock);
        ret = reset_mac(priv);
+       /* Note that reset_mac will fail if the clocks are gated by the PHY
+        * due to the PHY being put into isolation or power down mode.
+        * This is not an error if reset fails due to no clock.
+        */
        if (ret)
-               netdev_err(dev, "Cannot reset MAC core (error: %d)\n", ret);
+               netdev_dbg(dev, "Cannot reset MAC core (error: %d)\n", ret);
 
        ret = init_mac(priv);
        spin_unlock(&priv->mac_cfg_lock);
@@ -1203,8 +1207,12 @@ static int tse_shutdown(struct net_device *dev)
        spin_lock(&priv->tx_lock);
 
        ret = reset_mac(priv);
+       /* Note that reset_mac will fail if the clocks are gated by the PHY
+        * due to the PHY being put into isolation or power down mode.
+        * This is not an error if reset fails due to no clock.
+        */
        if (ret)
-               netdev_err(dev, "Cannot reset MAC core (error: %d)\n", ret);
+               netdev_dbg(dev, "Cannot reset MAC core (error: %d)\n", ret);
        priv->dmaops->reset_dma(priv);
        free_skbufs(dev);
 
@@ -1568,7 +1576,7 @@ static const struct altera_dmaops altera_dtype_msgdma = {
        .start_rxdma = msgdma_start_rxdma,
 };
 
-static struct of_device_id altera_tse_ids[] = {
+static const struct of_device_id altera_tse_ids[] = {
        { .compatible = "altr,tse-msgdma-1.0", .data = &altera_dtype_msgdma, },
        { .compatible = "altr,tse-1.0", .data = &altera_dtype_sgdma, },
        { .compatible = "ALTR,tse-1.0", .data = &altera_dtype_sgdma, },
index 4c2ae22217804fd8ce8e6cc8a27c6cf2855abba2..94960055fa1f802fcee1debd7584b4b1e7ed0cd3 100644 (file)
@@ -723,13 +723,13 @@ static int amd8111e_rx_poll(struct napi_struct *napi, int budget)
                         * the last correctly noting the error.
                         */
                        if(status & ERR_BIT) {
-                               /* reseting flags */
+                               /* resetting flags */
                                lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS;
                                goto err_next_pkt;
                        }
                        /* check for STP and ENP */
                        if(!((status & STP_BIT) && (status & ENP_BIT))){
-                               /* reseting flags */
+                               /* resetting flags */
                                lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS;
                                goto err_next_pkt;
                        }
index a75092d584cc9751fdd37197e7d8bc626867bd94..7cdb185124073022cddb73ece5ae33decc90be53 100644 (file)
@@ -614,7 +614,7 @@ typedef enum {
 /* Assume contoller gets data 10 times the maximum processing time */
 #define  REPEAT_CNT                    10
 
-/* amd8111e decriptor flag definitions */
+/* amd8111e descriptor flag definitions */
 typedef enum {
 
        OWN_BIT         =       (1 << 15),
index 15a8190a6f75f61908f5ba968d37451e2f982897..bc8b04f4288286d0bf253417b08c7fa6fe1e59ca 100644 (file)
@@ -1735,7 +1735,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
 
        /* if the ethernet address is not valid, force to 00:00:00:00:00:00 */
        if (!is_valid_ether_addr(dev->dev_addr))
-               memset(dev->dev_addr, 0, ETH_ALEN);
+               eth_zero_addr(dev->dev_addr);
 
        if (pcnet32_debug & NETIF_MSG_PROBE) {
                pr_cont(" %pM", dev->dev_addr);
index 29a09271b64a39b71a46ac1d5beb5a6472160509..34c28aac767ff916fa9a10dac02786b74fac4fa9 100644 (file)
 #define MAC_HWF0R_TXCOESEL_WIDTH       1
 #define MAC_HWF0R_VLHASH_INDEX         4
 #define MAC_HWF0R_VLHASH_WIDTH         1
+#define MAC_HWF1R_ADDR64_INDEX         14
+#define MAC_HWF1R_ADDR64_WIDTH         2
 #define MAC_HWF1R_ADVTHWORD_INDEX      13
 #define MAC_HWF1R_ADVTHWORD_WIDTH      1
 #define MAC_HWF1R_DBGMEMA_INDEX                19
index 400757b49872704f6b236579efaf048ad461c2e9..80dd7a92f3574b5d09f04d077def087016c136e0 100644 (file)
@@ -1068,7 +1068,7 @@ static void xgbe_tx_desc_reset(struct xgbe_ring_data *rdata)
        rdesc->desc3 = 0;
 
        /* Make sure ownership is written to the descriptor */
-       wmb();
+       dma_wmb();
 }
 
 static void xgbe_tx_desc_init(struct xgbe_channel *channel)
@@ -1124,12 +1124,12 @@ static void xgbe_rx_desc_reset(struct xgbe_ring_data *rdata)
         * is written to the descriptor(s) before setting the OWN bit
         * for the descriptor
         */
-       wmb();
+       dma_wmb();
 
        XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, OWN, 1);
 
        /* Make sure ownership is written to the descriptor */
-       wmb();
+       dma_wmb();
 }
 
 static void xgbe_rx_desc_init(struct xgbe_channel *channel)
@@ -1358,18 +1358,20 @@ static void xgbe_tx_start_xmit(struct xgbe_channel *channel,
        struct xgbe_prv_data *pdata = channel->pdata;
        struct xgbe_ring_data *rdata;
 
+       /* Make sure everything is written before the register write */
+       wmb();
+
        /* Issue a poll command to Tx DMA by writing address
         * of next immediate free descriptor */
        rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
        XGMAC_DMA_IOWRITE(channel, DMA_CH_TDTR_LO,
                          lower_32_bits(rdata->rdesc_dma));
 
-       /* Start the Tx coalescing timer */
+       /* Start the Tx timer */
        if (pdata->tx_usecs && !channel->tx_timer_active) {
                channel->tx_timer_active = 1;
-               hrtimer_start(&channel->tx_timer,
-                             ktime_set(0, pdata->tx_usecs * NSEC_PER_USEC),
-                             HRTIMER_MODE_REL);
+               mod_timer(&channel->tx_timer,
+                         jiffies + usecs_to_jiffies(pdata->tx_usecs));
        }
 
        ring->tx.xmit_more = 0;
@@ -1565,7 +1567,7 @@ static void xgbe_dev_xmit(struct xgbe_channel *channel)
         * is written to the descriptor(s) before setting the OWN bit
         * for the first descriptor
         */
-       wmb();
+       dma_wmb();
 
        /* Set OWN bit for the first descriptor */
        rdata = XGBE_GET_DESC_DATA(ring, start_index);
@@ -1577,7 +1579,7 @@ static void xgbe_dev_xmit(struct xgbe_channel *channel)
 #endif
 
        /* Make sure ownership is written to the descriptor */
-       wmb();
+       dma_wmb();
 
        ring->cur = cur_index + 1;
        if (!packet->skb->xmit_more ||
@@ -1613,7 +1615,7 @@ static int xgbe_dev_read(struct xgbe_channel *channel)
                return 1;
 
        /* Make sure descriptor fields are read after reading the OWN bit */
-       rmb();
+       dma_rmb();
 
 #ifdef XGMAC_ENABLE_RX_DESC_DUMP
        xgbe_dump_rx_desc(ring, rdesc, ring->cur);
@@ -2004,7 +2006,8 @@ static void xgbe_config_tx_fifo_size(struct xgbe_prv_data *pdata)
        for (i = 0; i < pdata->tx_q_count; i++)
                XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TQS, fifo_size);
 
-       netdev_notice(pdata->netdev, "%d Tx queues, %d byte fifo per queue\n",
+       netdev_notice(pdata->netdev,
+                     "%d Tx hardware queues, %d byte fifo per queue\n",
                      pdata->tx_q_count, ((fifo_size + 1) * 256));
 }
 
@@ -2019,7 +2022,8 @@ static void xgbe_config_rx_fifo_size(struct xgbe_prv_data *pdata)
        for (i = 0; i < pdata->rx_q_count; i++)
                XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RQS, fifo_size);
 
-       netdev_notice(pdata->netdev, "%d Rx queues, %d byte fifo per queue\n",
+       netdev_notice(pdata->netdev,
+                     "%d Rx hardware queues, %d byte fifo per queue\n",
                      pdata->rx_q_count, ((fifo_size + 1) * 256));
 }
 
index 885b02b5be07f6732fc0540684cb7875aeec1140..347fe2419a18a0514b7ea3f29350a0c1e33deae4 100644 (file)
@@ -411,11 +411,9 @@ static irqreturn_t xgbe_dma_isr(int irq, void *data)
        return IRQ_HANDLED;
 }
 
-static enum hrtimer_restart xgbe_tx_timer(struct hrtimer *timer)
+static void xgbe_tx_timer(unsigned long data)
 {
-       struct xgbe_channel *channel = container_of(timer,
-                                                   struct xgbe_channel,
-                                                   tx_timer);
+       struct xgbe_channel *channel = (struct xgbe_channel *)data;
        struct xgbe_prv_data *pdata = channel->pdata;
        struct napi_struct *napi;
 
@@ -437,8 +435,6 @@ static enum hrtimer_restart xgbe_tx_timer(struct hrtimer *timer)
        channel->tx_timer_active = 0;
 
        DBGPR("<--xgbe_tx_timer\n");
-
-       return HRTIMER_NORESTART;
 }
 
 static void xgbe_init_tx_timers(struct xgbe_prv_data *pdata)
@@ -454,9 +450,8 @@ static void xgbe_init_tx_timers(struct xgbe_prv_data *pdata)
                        break;
 
                DBGPR("  %s adding tx timer\n", channel->name);
-               hrtimer_init(&channel->tx_timer, CLOCK_MONOTONIC,
-                            HRTIMER_MODE_REL);
-               channel->tx_timer.function = xgbe_tx_timer;
+               setup_timer(&channel->tx_timer, xgbe_tx_timer,
+                           (unsigned long)channel);
        }
 
        DBGPR("<--xgbe_init_tx_timers\n");
@@ -475,8 +470,7 @@ static void xgbe_stop_tx_timers(struct xgbe_prv_data *pdata)
                        break;
 
                DBGPR("  %s deleting tx timer\n", channel->name);
-               channel->tx_timer_active = 0;
-               hrtimer_cancel(&channel->tx_timer);
+               del_timer_sync(&channel->tx_timer);
        }
 
        DBGPR("<--xgbe_stop_tx_timers\n");
@@ -519,6 +513,7 @@ void xgbe_get_all_hw_features(struct xgbe_prv_data *pdata)
                                                RXFIFOSIZE);
        hw_feat->tx_fifo_size  = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
                                                TXFIFOSIZE);
+       hw_feat->dma_width     = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, ADDR64);
        hw_feat->dcb           = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DCBEN);
        hw_feat->sph           = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, SPHEN);
        hw_feat->tso           = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, TSOEN);
@@ -553,6 +548,21 @@ void xgbe_get_all_hw_features(struct xgbe_prv_data *pdata)
                break;
        }
 
+       /* Translate the address width setting into actual number */
+       switch (hw_feat->dma_width) {
+       case 0:
+               hw_feat->dma_width = 32;
+               break;
+       case 1:
+               hw_feat->dma_width = 40;
+               break;
+       case 2:
+               hw_feat->dma_width = 48;
+               break;
+       default:
+               hw_feat->dma_width = 32;
+       }
+
        /* The Queue, Channel and TC counts are zero based so increment them
         * to get the actual number
         */
@@ -692,6 +702,7 @@ void xgbe_init_rx_coalesce(struct xgbe_prv_data *pdata)
        DBGPR("-->xgbe_init_rx_coalesce\n");
 
        pdata->rx_riwt = hw_if->usec_to_riwt(pdata, XGMAC_INIT_DMA_RX_USECS);
+       pdata->rx_usecs = XGMAC_INIT_DMA_RX_USECS;
        pdata->rx_frames = XGMAC_INIT_DMA_RX_FRAMES;
 
        hw_if->config_rx_coalesce(pdata);
@@ -1800,6 +1811,9 @@ static void xgbe_rx_refresh(struct xgbe_channel *channel)
                ring->dirty++;
        }
 
+       /* Make sure everything is written before the register write */
+       wmb();
+
        /* Update the Rx Tail Pointer Register with address of
         * the last cleaned entry */
        rdata = XGBE_GET_DESC_DATA(ring, ring->dirty - 1);
@@ -1807,16 +1821,15 @@ static void xgbe_rx_refresh(struct xgbe_channel *channel)
                          lower_32_bits(rdata->rdesc_dma));
 }
 
-static struct sk_buff *xgbe_create_skb(struct xgbe_prv_data *pdata,
+static struct sk_buff *xgbe_create_skb(struct napi_struct *napi,
                                       struct xgbe_ring_data *rdata,
                                       unsigned int *len)
 {
-       struct net_device *netdev = pdata->netdev;
        struct sk_buff *skb;
        u8 *packet;
        unsigned int copy_len;
 
-       skb = netdev_alloc_skb_ip_align(netdev, rdata->rx.hdr.dma_len);
+       skb = napi_alloc_skb(napi, rdata->rx.hdr.dma_len);
        if (!skb)
                return NULL;
 
@@ -1863,7 +1876,7 @@ static int xgbe_tx_poll(struct xgbe_channel *channel)
 
                /* Make sure descriptor fields are read after reading the OWN
                 * bit */
-               rmb();
+               dma_rmb();
 
 #ifdef XGMAC_ENABLE_TX_DESC_DUMP
                xgbe_dump_tx_desc(ring, ring->dirty, 1, 0);
@@ -1986,7 +1999,7 @@ read_again:
                                                        rdata->rx.hdr.dma_len,
                                                        DMA_FROM_DEVICE);
 
-                               skb = xgbe_create_skb(pdata, rdata, &put_len);
+                               skb = xgbe_create_skb(napi, rdata, &put_len);
                                if (!skb) {
                                        error = 1;
                                        goto skip_data;
index ebf489351555b19411d055a12c9812c08f73dd1c..b4f6eaaa08f0732211435c011e87e518c63cfbad 100644 (file)
@@ -291,7 +291,6 @@ static int xgbe_get_settings(struct net_device *netdev,
                return -ENODEV;
 
        ret = phy_ethtool_gset(pdata->phydev, cmd);
-       cmd->transceiver = XCVR_EXTERNAL;
 
        DBGPR("<--xgbe_get_settings\n");
 
@@ -378,18 +377,14 @@ static int xgbe_get_coalesce(struct net_device *netdev,
                             struct ethtool_coalesce *ec)
 {
        struct xgbe_prv_data *pdata = netdev_priv(netdev);
-       struct xgbe_hw_if *hw_if = &pdata->hw_if;
-       unsigned int riwt;
 
        DBGPR("-->xgbe_get_coalesce\n");
 
        memset(ec, 0, sizeof(struct ethtool_coalesce));
 
-       riwt = pdata->rx_riwt;
-       ec->rx_coalesce_usecs = hw_if->riwt_to_usec(pdata, riwt);
+       ec->rx_coalesce_usecs = pdata->rx_usecs;
        ec->rx_max_coalesced_frames = pdata->rx_frames;
 
-       ec->tx_coalesce_usecs = pdata->tx_usecs;
        ec->tx_max_coalesced_frames = pdata->tx_frames;
 
        DBGPR("<--xgbe_get_coalesce\n");
@@ -403,13 +398,14 @@ static int xgbe_set_coalesce(struct net_device *netdev,
        struct xgbe_prv_data *pdata = netdev_priv(netdev);
        struct xgbe_hw_if *hw_if = &pdata->hw_if;
        unsigned int rx_frames, rx_riwt, rx_usecs;
-       unsigned int tx_frames, tx_usecs;
+       unsigned int tx_frames;
 
        DBGPR("-->xgbe_set_coalesce\n");
 
        /* Check for not supported parameters  */
        if ((ec->rx_coalesce_usecs_irq) ||
            (ec->rx_max_coalesced_frames_irq) ||
+           (ec->tx_coalesce_usecs) ||
            (ec->tx_coalesce_usecs_irq) ||
            (ec->tx_max_coalesced_frames_irq) ||
            (ec->stats_block_coalesce_usecs) ||
@@ -439,17 +435,17 @@ static int xgbe_set_coalesce(struct net_device *netdev,
        }
 
        rx_riwt = hw_if->usec_to_riwt(pdata, ec->rx_coalesce_usecs);
+       rx_usecs = ec->rx_coalesce_usecs;
        rx_frames = ec->rx_max_coalesced_frames;
 
        /* Use smallest possible value if conversion resulted in zero */
-       if (ec->rx_coalesce_usecs && !rx_riwt)
+       if (rx_usecs && !rx_riwt)
                rx_riwt = 1;
 
        /* Check the bounds of values for Rx */
        if (rx_riwt > XGMAC_MAX_DMA_RIWT) {
-               rx_usecs = hw_if->riwt_to_usec(pdata, XGMAC_MAX_DMA_RIWT);
                netdev_alert(netdev, "rx-usec is limited to %d usecs\n",
-                            rx_usecs);
+                            hw_if->riwt_to_usec(pdata, XGMAC_MAX_DMA_RIWT));
                return -EINVAL;
        }
        if (rx_frames > pdata->rx_desc_count) {
@@ -458,7 +454,6 @@ static int xgbe_set_coalesce(struct net_device *netdev,
                return -EINVAL;
        }
 
-       tx_usecs = ec->tx_coalesce_usecs;
        tx_frames = ec->tx_max_coalesced_frames;
 
        /* Check the bounds of values for Tx */
@@ -469,10 +464,10 @@ static int xgbe_set_coalesce(struct net_device *netdev,
        }
 
        pdata->rx_riwt = rx_riwt;
+       pdata->rx_usecs = rx_usecs;
        pdata->rx_frames = rx_frames;
        hw_if->config_rx_coalesce(pdata);
 
-       pdata->tx_usecs = tx_usecs;
        pdata->tx_frames = tx_frames;
        hw_if->config_tx_coalesce(pdata);
 
index 32dd6513705117cdc434800fc68b6c52e750ed78..2e4c22d94a6bef50e0606b9fce861c29f88c24d4 100644 (file)
@@ -374,15 +374,6 @@ static int xgbe_probe(struct platform_device *pdev)
                pdata->awcache = XGBE_DMA_SYS_AWCACHE;
        }
 
-       /* Set the DMA mask */
-       if (!dev->dma_mask)
-               dev->dma_mask = &dev->coherent_dma_mask;
-       ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40));
-       if (ret) {
-               dev_err(dev, "dma_set_mask_and_coherent failed\n");
-               goto err_io;
-       }
-
        /* Get the device interrupt */
        ret = platform_get_irq(pdev, 0);
        if (ret < 0) {
@@ -409,6 +400,16 @@ static int xgbe_probe(struct platform_device *pdev)
        /* Set default configuration data */
        xgbe_default_config(pdata);
 
+       /* Set the DMA mask */
+       if (!dev->dma_mask)
+               dev->dma_mask = &dev->coherent_dma_mask;
+       ret = dma_set_mask_and_coherent(dev,
+                                       DMA_BIT_MASK(pdata->hw_feat.dma_width));
+       if (ret) {
+               dev_err(dev, "dma_set_mask_and_coherent failed\n");
+               goto err_io;
+       }
+
        /* Calculate the number of Tx and Rx rings to be created
         *  -Tx (DMA) Channels map 1-to-1 to Tx Queues so set
         *   the number of Tx queues to the number of Tx channels
index f326178ef3760122f034a27d5c5afeda4eb9aa70..b03e4f58d02ea550b4a19d7e4f7113ec3b48e4e0 100644 (file)
@@ -179,7 +179,7 @@ static int xgbe_adjtime(struct ptp_clock_info *info, s64 delta)
        return 0;
 }
 
-static int xgbe_gettime(struct ptp_clock_info *info, struct timespec *ts)
+static int xgbe_gettime(struct ptp_clock_info *info, struct timespec64 *ts)
 {
        struct xgbe_prv_data *pdata = container_of(info,
                                                   struct xgbe_prv_data,
@@ -193,12 +193,13 @@ static int xgbe_gettime(struct ptp_clock_info *info, struct timespec *ts)
 
        spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
 
-       *ts = ns_to_timespec(nsec);
+       *ts = ns_to_timespec64(nsec);
 
        return 0;
 }
 
-static int xgbe_settime(struct ptp_clock_info *info, const struct timespec *ts)
+static int xgbe_settime(struct ptp_clock_info *info,
+                       const struct timespec64 *ts)
 {
        struct xgbe_prv_data *pdata = container_of(info,
                                                   struct xgbe_prv_data,
@@ -206,7 +207,7 @@ static int xgbe_settime(struct ptp_clock_info *info, const struct timespec *ts)
        unsigned long flags;
        u64 nsec;
 
-       nsec = timespec_to_ns(ts);
+       nsec = timespec64_to_ns(ts);
 
        spin_lock_irqsave(&pdata->tstamp_lock, flags);
 
@@ -236,8 +237,8 @@ void xgbe_ptp_register(struct xgbe_prv_data *pdata)
        info->max_adj = pdata->ptpclk_rate;
        info->adjfreq = xgbe_adjfreq;
        info->adjtime = xgbe_adjtime;
-       info->gettime = xgbe_gettime;
-       info->settime = xgbe_settime;
+       info->gettime64 = xgbe_gettime;
+       info->settime64 = xgbe_settime;
        info->enable = xgbe_enable;
 
        clock = ptp_clock_register(info, pdata->dev);
index 13e8f95c077c3c57089e7b7f33c1577efc7143cf..dd742426eb0425e9c4a7b041ee4cf3764694515e 100644 (file)
         ((_idx) & ((_ring)->rdesc_count - 1)))
 
 /* Default coalescing parameters */
-#define XGMAC_INIT_DMA_TX_USECS                50
+#define XGMAC_INIT_DMA_TX_USECS                1000
 #define XGMAC_INIT_DMA_TX_FRAMES       25
 
 #define XGMAC_MAX_DMA_RIWT             0xff
@@ -410,7 +410,7 @@ struct xgbe_channel {
        unsigned int saved_ier;
 
        unsigned int tx_timer_active;
-       struct hrtimer tx_timer;
+       struct timer_list tx_timer;
 
        struct xgbe_ring *tx_ring;
        struct xgbe_ring *rx_ring;
@@ -620,7 +620,7 @@ struct xgbe_hw_features {
        unsigned int mgk;               /* PMT magic packet */
        unsigned int mmc;               /* RMON module */
        unsigned int aoe;               /* ARP Offload */
-       unsigned int ts;                /* IEEE 1588-2008 Adavanced Timestamp */
+       unsigned int ts;                /* IEEE 1588-2008 Advanced Timestamp */
        unsigned int eee;               /* Energy Efficient Ethernet */
        unsigned int tx_coe;            /* Tx Checksum Offload */
        unsigned int rx_coe;            /* Rx Checksum Offload */
@@ -632,6 +632,7 @@ struct xgbe_hw_features {
        unsigned int rx_fifo_size;      /* MTL Receive FIFO Size */
        unsigned int tx_fifo_size;      /* MTL Transmit FIFO Size */
        unsigned int adv_ts_hi;         /* Advance Timestamping High Word */
+       unsigned int dma_width;         /* DMA width */
        unsigned int dcb;               /* DCB Feature */
        unsigned int sph;               /* Split Header Feature */
        unsigned int tso;               /* TCP Segmentation Offload */
@@ -715,6 +716,7 @@ struct xgbe_prv_data {
 
        /* Rx coalescing settings */
        unsigned int rx_riwt;
+       unsigned int rx_usecs;
        unsigned int rx_frames;
 
        /* Current Rx buffer size */
index ec45f3256f0e3da2928c8be98ba9abcc0d58fb27..d9bc89d69266cfd75a644888ffcb373d0ce34b77 100644 (file)
@@ -97,6 +97,8 @@ enum xgene_enet_rm {
 #define QCOHERENT              BIT(4)
 #define RECOMBBUF              BIT(27)
 
+#define MAC_OFFSET                     0x30
+
 #define BLOCK_ETH_CSR_OFFSET           0x2000
 #define BLOCK_ETH_RING_IF_OFFSET       0x9000
 #define BLOCK_ETH_DIAG_CSR_OFFSET      0xD000
index 635a83be7e5ec5bec5670ceebca692abd9257260..40d3530d7f30966af178eb5890b2172cd3197b49 100644 (file)
@@ -428,13 +428,23 @@ static int xgene_enet_register_irq(struct net_device *ndev)
 {
        struct xgene_enet_pdata *pdata = netdev_priv(ndev);
        struct device *dev = ndev_to_dev(ndev);
+       struct xgene_enet_desc_ring *ring;
        int ret;
 
-       ret = devm_request_irq(dev, pdata->rx_ring->irq, xgene_enet_rx_irq,
-                              IRQF_SHARED, ndev->name, pdata->rx_ring);
-       if (ret) {
-               netdev_err(ndev, "rx%d interrupt request failed\n",
-                          pdata->rx_ring->irq);
+       ring = pdata->rx_ring;
+       ret = devm_request_irq(dev, ring->irq, xgene_enet_rx_irq,
+                              IRQF_SHARED, ring->irq_name, ring);
+       if (ret)
+               netdev_err(ndev, "Failed to request irq %s\n", ring->irq_name);
+
+       if (pdata->cq_cnt) {
+               ring = pdata->tx_ring->cp_ring;
+               ret = devm_request_irq(dev, ring->irq, xgene_enet_rx_irq,
+                                      IRQF_SHARED, ring->irq_name, ring);
+               if (ret) {
+                       netdev_err(ndev, "Failed to request irq %s\n",
+                                  ring->irq_name);
+               }
        }
 
        return ret;
@@ -448,6 +458,37 @@ static void xgene_enet_free_irq(struct net_device *ndev)
        pdata = netdev_priv(ndev);
        dev = ndev_to_dev(ndev);
        devm_free_irq(dev, pdata->rx_ring->irq, pdata->rx_ring);
+
+       if (pdata->cq_cnt) {
+               devm_free_irq(dev, pdata->tx_ring->cp_ring->irq,
+                             pdata->tx_ring->cp_ring);
+       }
+}
+
+static void xgene_enet_napi_enable(struct xgene_enet_pdata *pdata)
+{
+       struct napi_struct *napi;
+
+       napi = &pdata->rx_ring->napi;
+       napi_enable(napi);
+
+       if (pdata->cq_cnt) {
+               napi = &pdata->tx_ring->cp_ring->napi;
+               napi_enable(napi);
+       }
+}
+
+static void xgene_enet_napi_disable(struct xgene_enet_pdata *pdata)
+{
+       struct napi_struct *napi;
+
+       napi = &pdata->rx_ring->napi;
+       napi_disable(napi);
+
+       if (pdata->cq_cnt) {
+               napi = &pdata->tx_ring->cp_ring->napi;
+               napi_disable(napi);
+       }
 }
 
 static int xgene_enet_open(struct net_device *ndev)
@@ -462,7 +503,7 @@ static int xgene_enet_open(struct net_device *ndev)
        ret = xgene_enet_register_irq(ndev);
        if (ret)
                return ret;
-       napi_enable(&pdata->rx_ring->napi);
+       xgene_enet_napi_enable(pdata);
 
        if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII)
                phy_start(pdata->phy_dev);
@@ -486,7 +527,7 @@ static int xgene_enet_close(struct net_device *ndev)
        else
                cancel_delayed_work_sync(&pdata->link_work);
 
-       napi_disable(&pdata->rx_ring->napi);
+       xgene_enet_napi_disable(pdata);
        xgene_enet_free_irq(ndev);
        xgene_enet_process_ring(pdata->rx_ring, -1);
 
@@ -580,6 +621,8 @@ static void xgene_enet_free_desc_rings(struct xgene_enet_pdata *pdata)
        if (ring) {
                if (ring->cp_ring && ring->cp_ring->cp_skb)
                        devm_kfree(dev, ring->cp_ring->cp_skb);
+               if (ring->cp_ring && pdata->cq_cnt)
+                       xgene_enet_free_desc_ring(ring->cp_ring);
                xgene_enet_free_desc_ring(ring);
        }
 
@@ -645,9 +688,11 @@ static int xgene_enet_create_desc_rings(struct net_device *ndev)
        struct device *dev = ndev_to_dev(ndev);
        struct xgene_enet_desc_ring *rx_ring, *tx_ring, *cp_ring;
        struct xgene_enet_desc_ring *buf_pool = NULL;
-       u8 cpu_bufnum = 0, eth_bufnum = START_ETH_BUFNUM;
-       u8 bp_bufnum = START_BP_BUFNUM;
-       u16 ring_id, ring_num = START_RING_NUM;
+       u8 cpu_bufnum = pdata->cpu_bufnum;
+       u8 eth_bufnum = pdata->eth_bufnum;
+       u8 bp_bufnum = pdata->bp_bufnum;
+       u16 ring_num = pdata->ring_num;
+       u16 ring_id;
        int ret;
 
        /* allocate rx descriptor ring */
@@ -671,6 +716,12 @@ static int xgene_enet_create_desc_rings(struct net_device *ndev)
        rx_ring->nbufpool = NUM_BUFPOOL;
        rx_ring->buf_pool = buf_pool;
        rx_ring->irq = pdata->rx_irq;
+       if (!pdata->cq_cnt) {
+               snprintf(rx_ring->irq_name, IRQ_ID_SIZE, "%s-rx-txc",
+                        ndev->name);
+       } else {
+               snprintf(rx_ring->irq_name, IRQ_ID_SIZE, "%s-rx", ndev->name);
+       }
        buf_pool->rx_skb = devm_kcalloc(dev, buf_pool->slots,
                                        sizeof(struct sk_buff *), GFP_KERNEL);
        if (!buf_pool->rx_skb) {
@@ -692,7 +743,22 @@ static int xgene_enet_create_desc_rings(struct net_device *ndev)
        }
        pdata->tx_ring = tx_ring;
 
-       cp_ring = pdata->rx_ring;
+       if (!pdata->cq_cnt) {
+               cp_ring = pdata->rx_ring;
+       } else {
+               /* allocate tx completion descriptor ring */
+               ring_id = xgene_enet_get_ring_id(RING_OWNER_CPU, cpu_bufnum++);
+               cp_ring = xgene_enet_create_desc_ring(ndev, ring_num++,
+                                                     RING_CFGSIZE_16KB,
+                                                     ring_id);
+               if (!cp_ring) {
+                       ret = -ENOMEM;
+                       goto err;
+               }
+               cp_ring->irq = pdata->txc_irq;
+               snprintf(cp_ring->irq_name, IRQ_ID_SIZE, "%s-txc", ndev->name);
+       }
+
        cp_ring->cp_skb = devm_kcalloc(dev, tx_ring->slots,
                                       sizeof(struct sk_buff *), GFP_KERNEL);
        if (!cp_ring->cp_skb) {
@@ -752,6 +818,22 @@ static const struct net_device_ops xgene_ndev_ops = {
        .ndo_set_mac_address = xgene_enet_set_mac_address,
 };
 
+static int xgene_get_port_id(struct device *dev, struct xgene_enet_pdata *pdata)
+{
+       u32 id = 0;
+       int ret;
+
+       ret = device_property_read_u32(dev, "port-id", &id);
+       if (!ret && id > 1) {
+               dev_err(dev, "Incorrect port-id specified\n");
+               return -ENODEV;
+       }
+
+       pdata->port_id = id;
+
+       return 0;
+}
+
 static int xgene_get_mac_address(struct device *dev,
                                 unsigned char *addr)
 {
@@ -835,13 +917,9 @@ static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata)
                return -ENOMEM;
        }
 
-       ret = platform_get_irq(pdev, 0);
-       if (ret <= 0) {
-               dev_err(dev, "Unable to get ENET Rx IRQ\n");
-               ret = ret ? : -ENXIO;
+       ret = xgene_get_port_id(dev, pdata);
+       if (ret)
                return ret;
-       }
-       pdata->rx_irq = ret;
 
        if (xgene_get_mac_address(dev, ndev->dev_addr) != ETH_ALEN)
                eth_hw_addr_random(ndev);
@@ -860,19 +938,37 @@ static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata)
                return -ENODEV;
        }
 
+       ret = platform_get_irq(pdev, 0);
+       if (ret <= 0) {
+               dev_err(dev, "Unable to get ENET Rx IRQ\n");
+               ret = ret ? : -ENXIO;
+               return ret;
+       }
+       pdata->rx_irq = ret;
+
+       if (pdata->phy_mode != PHY_INTERFACE_MODE_RGMII) {
+               ret = platform_get_irq(pdev, 1);
+               if (ret <= 0) {
+                       dev_err(dev, "Unable to get ENET Tx completion IRQ\n");
+                       ret = ret ? : -ENXIO;
+                       return ret;
+               }
+               pdata->txc_irq = ret;
+       }
+
        pdata->clk = devm_clk_get(&pdev->dev, NULL);
        if (IS_ERR(pdata->clk)) {
                /* Firmware may have set up the clock already. */
                pdata->clk = NULL;
        }
 
-       base_addr = pdata->base_addr;
+       base_addr = pdata->base_addr - (pdata->port_id * MAC_OFFSET);
        pdata->eth_csr_addr = base_addr + BLOCK_ETH_CSR_OFFSET;
        pdata->eth_ring_if_addr = base_addr + BLOCK_ETH_RING_IF_OFFSET;
        pdata->eth_diag_csr_addr = base_addr + BLOCK_ETH_DIAG_CSR_OFFSET;
        if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII ||
            pdata->phy_mode == PHY_INTERFACE_MODE_SGMII) {
-               pdata->mcx_mac_addr = base_addr + BLOCK_ETH_MAC_OFFSET;
+               pdata->mcx_mac_addr = pdata->base_addr + BLOCK_ETH_MAC_OFFSET;
                pdata->mcx_mac_csr_addr = base_addr + BLOCK_ETH_MAC_CSR_OFFSET;
        } else {
                pdata->mcx_mac_addr = base_addr + BLOCK_AXG_MAC_OFFSET;
@@ -928,13 +1024,60 @@ static void xgene_enet_setup_ops(struct xgene_enet_pdata *pdata)
                pdata->mac_ops = &xgene_sgmac_ops;
                pdata->port_ops = &xgene_sgport_ops;
                pdata->rm = RM1;
+               pdata->cq_cnt = XGENE_MAX_TXC_RINGS;
                break;
        default:
                pdata->mac_ops = &xgene_xgmac_ops;
                pdata->port_ops = &xgene_xgport_ops;
                pdata->rm = RM0;
+               pdata->cq_cnt = XGENE_MAX_TXC_RINGS;
                break;
        }
+
+       switch (pdata->port_id) {
+       case 0:
+               pdata->cpu_bufnum = START_CPU_BUFNUM_0;
+               pdata->eth_bufnum = START_ETH_BUFNUM_0;
+               pdata->bp_bufnum = START_BP_BUFNUM_0;
+               pdata->ring_num = START_RING_NUM_0;
+               break;
+       case 1:
+               pdata->cpu_bufnum = START_CPU_BUFNUM_1;
+               pdata->eth_bufnum = START_ETH_BUFNUM_1;
+               pdata->bp_bufnum = START_BP_BUFNUM_1;
+               pdata->ring_num = START_RING_NUM_1;
+               break;
+       default:
+               break;
+       }
+
+}
+
+static void xgene_enet_napi_add(struct xgene_enet_pdata *pdata)
+{
+       struct napi_struct *napi;
+
+       napi = &pdata->rx_ring->napi;
+       netif_napi_add(pdata->ndev, napi, xgene_enet_napi, NAPI_POLL_WEIGHT);
+
+       if (pdata->cq_cnt) {
+               napi = &pdata->tx_ring->cp_ring->napi;
+               netif_napi_add(pdata->ndev, napi, xgene_enet_napi,
+                              NAPI_POLL_WEIGHT);
+       }
+}
+
+static void xgene_enet_napi_del(struct xgene_enet_pdata *pdata)
+{
+       struct napi_struct *napi;
+
+       napi = &pdata->rx_ring->napi;
+       netif_napi_del(napi);
+
+       if (pdata->cq_cnt) {
+               napi = &pdata->tx_ring->cp_ring->napi;
+               netif_napi_del(napi);
+       }
 }
 
 static int xgene_enet_probe(struct platform_device *pdev)
@@ -942,7 +1085,6 @@ static int xgene_enet_probe(struct platform_device *pdev)
        struct net_device *ndev;
        struct xgene_enet_pdata *pdata;
        struct device *dev = &pdev->dev;
-       struct napi_struct *napi;
        struct xgene_mac_ops *mac_ops;
        int ret;
 
@@ -984,8 +1126,7 @@ static int xgene_enet_probe(struct platform_device *pdev)
        if (ret)
                goto err;
 
-       napi = &pdata->rx_ring->napi;
-       netif_napi_add(ndev, napi, xgene_enet_napi, NAPI_POLL_WEIGHT);
+       xgene_enet_napi_add(pdata);
        mac_ops = pdata->mac_ops;
        if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII)
                ret = xgene_enet_mdio_config(pdata);
@@ -1012,7 +1153,7 @@ static int xgene_enet_remove(struct platform_device *pdev)
        mac_ops->rx_disable(pdata);
        mac_ops->tx_disable(pdata);
 
-       netif_napi_del(&pdata->rx_ring->napi);
+       xgene_enet_napi_del(pdata);
        xgene_enet_mdio_remove(pdata);
        xgene_enet_delete_desc_rings(pdata);
        unregister_netdev(ndev);
@@ -1033,7 +1174,7 @@ MODULE_DEVICE_TABLE(acpi, xgene_enet_acpi_match);
 #endif
 
 #ifdef CONFIG_OF
-static struct of_device_id xgene_enet_of_match[] = {
+static const struct of_device_id xgene_enet_of_match[] = {
        {.compatible = "apm,xgene-enet",},
        {.compatible = "apm,xgene1-sgenet",},
        {.compatible = "apm,xgene1-xgenet",},
index c2d465c3db66b15eaf7ddf313106605a997fb170..8f3d232b09bc8c5d0f321fc398aea52c498f22cc 100644 (file)
 #define SKB_BUFFER_SIZE                (XGENE_ENET_MAX_MTU - NET_IP_ALIGN)
 #define NUM_PKT_BUF    64
 #define NUM_BUFPOOL    32
-#define START_ETH_BUFNUM       2
-#define START_BP_BUFNUM                0x22
-#define START_RING_NUM         8
+
+#define START_CPU_BUFNUM_0     0
+#define START_ETH_BUFNUM_0     2
+#define START_BP_BUFNUM_0      0x22
+#define START_RING_NUM_0       8
+#define START_CPU_BUFNUM_1     12
+#define START_ETH_BUFNUM_1     10
+#define START_BP_BUFNUM_1      0x2A
+#define START_RING_NUM_1       264
+
+#define IRQ_ID_SIZE            16
+#define XGENE_MAX_TXC_RINGS    1
 
 #define PHY_POLL_LINK_ON       (10 * HZ)
 #define PHY_POLL_LINK_OFF      (PHY_POLL_LINK_ON / 5)
@@ -57,6 +66,7 @@ struct xgene_enet_desc_ring {
        u16 tail;
        u16 slots;
        u16 irq;
+       char irq_name[IRQ_ID_SIZE];
        u32 size;
        u32 state[NUM_RING_CONFIG];
        void __iomem *cmd_base;
@@ -111,6 +121,8 @@ struct xgene_enet_pdata {
        u32 cp_qcnt_hi;
        u32 cp_qcnt_low;
        u32 rx_irq;
+       u32 txc_irq;
+       u8 cq_cnt;
        void __iomem *eth_csr_addr;
        void __iomem *eth_ring_if_addr;
        void __iomem *eth_diag_csr_addr;
@@ -125,6 +137,11 @@ struct xgene_enet_pdata {
        struct xgene_mac_ops *mac_ops;
        struct xgene_port_ops *port_ops;
        struct delayed_work link_work;
+       u32 port_id;
+       u8 cpu_bufnum;
+       u8 eth_bufnum;
+       u8 bp_bufnum;
+       u16 ring_num;
 };
 
 struct xgene_indirect_ctl {
index f5d4f68c288c395076205ba128788797d7114cdf..f27fb6f2a93b90864bf072cc433a56a2f403d175 100644 (file)
@@ -226,6 +226,7 @@ static u32 xgene_enet_link_status(struct xgene_enet_pdata *p)
 static void xgene_sgmac_init(struct xgene_enet_pdata *p)
 {
        u32 data, loop = 10;
+       u32 offset = p->port_id * 4;
 
        xgene_sgmac_reset(p);
 
@@ -272,9 +273,9 @@ static void xgene_sgmac_init(struct xgene_enet_pdata *p)
        xgene_enet_wr_csr(p, RSIF_RAM_DBG_REG0_ADDR, 0);
 
        /* Bypass traffic gating */
-       xgene_enet_wr_csr(p, CFG_LINK_AGGR_RESUME_0_ADDR, TX_PORT0);
+       xgene_enet_wr_csr(p, CFG_LINK_AGGR_RESUME_0_ADDR + offset, TX_PORT0);
        xgene_enet_wr_csr(p, CFG_BYPASS_ADDR, RESUME_TX);
-       xgene_enet_wr_csr(p, SG_RX_DV_GATE_REG_0_ADDR, RESUME_RX0);
+       xgene_enet_wr_csr(p, SG_RX_DV_GATE_REG_0_ADDR + offset, RESUME_RX0);
 }
 
 static void xgene_sgmac_rxtx(struct xgene_enet_pdata *p, u32 bits, bool set)
@@ -330,13 +331,14 @@ static void xgene_enet_cle_bypass(struct xgene_enet_pdata *p,
                                  u32 dst_ring_num, u16 bufpool_id)
 {
        u32 data, fpsel;
+       u32 offset = p->port_id * MAC_OFFSET;
 
        data = CFG_CLE_BYPASS_EN0;
-       xgene_enet_wr_csr(p, CLE_BYPASS_REG0_0_ADDR, data);
+       xgene_enet_wr_csr(p, CLE_BYPASS_REG0_0_ADDR + offset, data);
 
        fpsel = xgene_enet_ring_bufnum(bufpool_id) - 0x20;
        data = CFG_CLE_DSTQID0(dst_ring_num) | CFG_CLE_FPSEL0(fpsel);
-       xgene_enet_wr_csr(p, CLE_BYPASS_REG1_0_ADDR, data);
+       xgene_enet_wr_csr(p, CLE_BYPASS_REG1_0_ADDR + offset, data);
 }
 
 static void xgene_enet_shutdown(struct xgene_enet_pdata *p)
index daae0e01625360598194c76fcc4239a3283bc85c..2f98846e2d898c53f4955a318f0f9b623aa531f8 100644 (file)
@@ -1621,7 +1621,7 @@ static int bmac_remove(struct macio_dev *mdev)
        return 0;
 }
 
-static struct of_device_id bmac_match[] =
+static const struct of_device_id bmac_match[] =
 {
        {
        .name           = "bmac",
index 842fe7684904351652f669d7838966597e467bcc..a18948286682527988a49b09f8c3c569071ec77b 100644 (file)
@@ -720,7 +720,7 @@ static irqreturn_t mace_interrupt(int irq, void *dev_id)
            mace_reset(dev);
                /*
                 * XXX mace likes to hang the machine after a xmtfs error.
-                * This is hard to reproduce, reseting *may* help
+                * This is hard to reproduce, resetting *may* help
                 */
        }
        cp = mp->tx_cmds + NCMDS_TX * i;
@@ -984,7 +984,7 @@ static irqreturn_t mace_rxdma_intr(int irq, void *dev_id)
     return IRQ_HANDLED;
 }
 
-static struct of_device_id mace_match[] =
+static const struct of_device_id mace_match[] =
 {
        {
        .name           = "mace",
index 6e66127e6abf5a5a17a877ba315b058e25bdd865..89914ca17a490799b3fa9ec9f54ab87c7cd85a80 100644 (file)
@@ -575,7 +575,7 @@ static irqreturn_t mace_interrupt(int irq, void *dev_id)
                        mace_reset(dev);
                        /*
                         * XXX mace likes to hang the machine after a xmtfs error.
-                        * This is hard to reproduce, reseting *may* help
+                        * This is hard to reproduce, resetting *may* help
                         */
                }
                /* dma should have finished */
index 52fdfe22597807dcde7034b8b9b5a2d84080ec42..a8b80c56ac25e3b59ab28a052f99c4e46ef5c27e 100644 (file)
@@ -307,7 +307,7 @@ void atl1c_start_phy_polling(struct atl1c_hw *hw, u16 clk_sel)
 
 /*
  * atl1c_read_phy_core
- * core funtion to read register in PHY via MDIO control regsiter.
+ * core function to read register in PHY via MDIO control regsiter.
  * ext: extension register (see IEEE 802.3)
  * dev: device address (see IEEE 802.3 DEVAD, PRTAD is fixed to 0)
  * reg: reg to read
@@ -356,7 +356,7 @@ int atl1c_read_phy_core(struct atl1c_hw *hw, bool ext, u8 dev,
 
 /*
  * atl1c_write_phy_core
- * core funtion to write to register in PHY via MDIO control regsiter.
+ * core function to write to register in PHY via MDIO control register.
  * ext: extension register (see IEEE 802.3)
  * dev: device address (see IEEE 802.3 DEVAD, PRTAD is fixed to 0)
  * reg: reg to write
index 587f63e87588f73a3e310066f3fd7494d1259e3f..932bd1862f7adeb7a95cec8ae1efda76edb7a728 100644 (file)
@@ -752,7 +752,7 @@ static void atl1c_patch_assign(struct atl1c_hw *hw)
 
        if (hw->device_id == PCI_DEVICE_ID_ATHEROS_L2C_B2 &&
            hw->revision_id == L2CB_V21) {
-               /* config acess mode */
+               /* config access mode */
                pci_write_config_dword(pdev, REG_PCIE_IND_ACC_ADDR,
                                       REG_PCIE_DEV_MISC_CTRL);
                pci_read_config_dword(pdev, REG_PCIE_IND_ACC_DATA, &misc_ctrl);
index 41a3c9804427b0931569c914d3cfc4f1418ee332..a6f9142b9048cecefbfebc886b5585b7a7617dfb 100644 (file)
@@ -71,12 +71,12 @@ config BCMGENET
          Broadcom BCM7xxx Set Top Box family chipset.
 
 config BNX2
-       tristate "QLogic NetXtremeII support"
+       tristate "QLogic bnx2 support"
        depends on PCI
        select CRC32
        select FW_LOADER
        ---help---
-         This driver supports QLogic NetXtremeII gigabit Ethernet cards.
+         This driver supports QLogic bnx2 gigabit Ethernet cards.
 
          To compile this driver as a module, choose M here: the module
          will be called bnx2.  This is recommended.
@@ -87,8 +87,8 @@ config CNIC
        select BNX2
        select UIO
        ---help---
-         This driver supports offload features of QLogic NetXtremeII
-         gigabit Ethernet cards.
+         This driver supports offload features of QLogic bnx2 gigabit
+         Ethernet cards.
 
          To compile this driver as a module, choose M here: the module
          will be called cnic.  This is recommended.
@@ -142,7 +142,7 @@ config BNX2X_SRIOV
 
 config BGMAC
        tristate "BCMA bus GBit core support"
-       depends on BCMA_HOST_SOC && HAS_DMA && BCM47XX
+       depends on BCMA_HOST_SOC && HAS_DMA && (BCM47XX || ARCH_BCM_5301X)
        select PHYLIB
        ---help---
          This driver supports GBit MAC and BCM4706 GBit MAC cores on BCMA bus.
index 0469f72c6e7e8e01147446a528d5771a147b5cc6..fa8f9e147c34b6ed2c49e4c7679cfb9bcac34298 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/etherdevice.h>
 #include <linux/mii.h>
 #include <linux/phy.h>
+#include <linux/phy_fixed.h>
 #include <linux/interrupt.h>
 #include <linux/dma-mapping.h>
 #include <bcm47xx_nvram.h>
@@ -114,53 +115,91 @@ static void bgmac_dma_tx_enable(struct bgmac *bgmac,
        bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_CTL, ctl);
 }
 
+static void
+bgmac_dma_tx_add_buf(struct bgmac *bgmac, struct bgmac_dma_ring *ring,
+                    int i, int len, u32 ctl0)
+{
+       struct bgmac_slot_info *slot;
+       struct bgmac_dma_desc *dma_desc;
+       u32 ctl1;
+
+       if (i == ring->num_slots - 1)
+               ctl0 |= BGMAC_DESC_CTL0_EOT;
+
+       ctl1 = len & BGMAC_DESC_CTL1_LEN;
+
+       slot = &ring->slots[i];
+       dma_desc = &ring->cpu_base[i];
+       dma_desc->addr_low = cpu_to_le32(lower_32_bits(slot->dma_addr));
+       dma_desc->addr_high = cpu_to_le32(upper_32_bits(slot->dma_addr));
+       dma_desc->ctl0 = cpu_to_le32(ctl0);
+       dma_desc->ctl1 = cpu_to_le32(ctl1);
+}
+
 static netdev_tx_t bgmac_dma_tx_add(struct bgmac *bgmac,
                                    struct bgmac_dma_ring *ring,
                                    struct sk_buff *skb)
 {
        struct device *dma_dev = bgmac->core->dma_dev;
        struct net_device *net_dev = bgmac->net_dev;
-       struct bgmac_dma_desc *dma_desc;
-       struct bgmac_slot_info *slot;
-       u32 ctl0, ctl1;
+       struct bgmac_slot_info *slot = &ring->slots[ring->end];
        int free_slots;
+       int nr_frags;
+       u32 flags;
+       int index = ring->end;
+       int i;
 
        if (skb->len > BGMAC_DESC_CTL1_LEN) {
                bgmac_err(bgmac, "Too long skb (%d)\n", skb->len);
-               goto err_stop_drop;
+               goto err_drop;
        }
 
+       if (skb->ip_summed == CHECKSUM_PARTIAL)
+               skb_checksum_help(skb);
+
+       nr_frags = skb_shinfo(skb)->nr_frags;
+
        if (ring->start <= ring->end)
                free_slots = ring->start - ring->end + BGMAC_TX_RING_SLOTS;
        else
                free_slots = ring->start - ring->end;
-       if (free_slots == 1) {
+
+       if (free_slots <= nr_frags + 1) {
                bgmac_err(bgmac, "TX ring is full, queue should be stopped!\n");
                netif_stop_queue(net_dev);
                return NETDEV_TX_BUSY;
        }
 
-       slot = &ring->slots[ring->end];
-       slot->skb = skb;
-       slot->dma_addr = dma_map_single(dma_dev, skb->data, skb->len,
+       slot->dma_addr = dma_map_single(dma_dev, skb->data, skb_headlen(skb),
                                        DMA_TO_DEVICE);
-       if (dma_mapping_error(dma_dev, slot->dma_addr)) {
-               bgmac_err(bgmac, "Mapping error of skb on ring 0x%X\n",
-                         ring->mmio_base);
-               goto err_stop_drop;
-       }
+       if (unlikely(dma_mapping_error(dma_dev, slot->dma_addr)))
+               goto err_dma_head;
 
-       ctl0 = BGMAC_DESC_CTL0_IOC | BGMAC_DESC_CTL0_SOF | BGMAC_DESC_CTL0_EOF;
-       if (ring->end == ring->num_slots - 1)
-               ctl0 |= BGMAC_DESC_CTL0_EOT;
-       ctl1 = skb->len & BGMAC_DESC_CTL1_LEN;
+       flags = BGMAC_DESC_CTL0_SOF;
+       if (!nr_frags)
+               flags |= BGMAC_DESC_CTL0_EOF | BGMAC_DESC_CTL0_IOC;
 
-       dma_desc = ring->cpu_base;
-       dma_desc += ring->end;
-       dma_desc->addr_low = cpu_to_le32(lower_32_bits(slot->dma_addr));
-       dma_desc->addr_high = cpu_to_le32(upper_32_bits(slot->dma_addr));
-       dma_desc->ctl0 = cpu_to_le32(ctl0);
-       dma_desc->ctl1 = cpu_to_le32(ctl1);
+       bgmac_dma_tx_add_buf(bgmac, ring, index, skb_headlen(skb), flags);
+       flags = 0;
+
+       for (i = 0; i < nr_frags; i++) {
+               struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
+               int len = skb_frag_size(frag);
+
+               index = (index + 1) % BGMAC_TX_RING_SLOTS;
+               slot = &ring->slots[index];
+               slot->dma_addr = skb_frag_dma_map(dma_dev, frag, 0,
+                                                 len, DMA_TO_DEVICE);
+               if (unlikely(dma_mapping_error(dma_dev, slot->dma_addr)))
+                       goto err_dma;
+
+               if (i == nr_frags - 1)
+                       flags |= BGMAC_DESC_CTL0_EOF | BGMAC_DESC_CTL0_IOC;
+
+               bgmac_dma_tx_add_buf(bgmac, ring, index, len, flags);
+       }
+
+       slot->skb = skb;
 
        netdev_sent_queue(net_dev, skb->len);
 
@@ -169,20 +208,35 @@ static netdev_tx_t bgmac_dma_tx_add(struct bgmac *bgmac,
        /* Increase ring->end to point empty slot. We tell hardware the first
         * slot it should *not* read.
         */
-       if (++ring->end >= BGMAC_TX_RING_SLOTS)
-               ring->end = 0;
+       ring->end = (index + 1) % BGMAC_TX_RING_SLOTS;
        bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_INDEX,
                    ring->index_base +
                    ring->end * sizeof(struct bgmac_dma_desc));
 
-       /* Always keep one slot free to allow detecting bugged calls. */
-       if (--free_slots == 1)
+       free_slots -= nr_frags + 1;
+       if (free_slots < 8)
                netif_stop_queue(net_dev);
 
        return NETDEV_TX_OK;
 
-err_stop_drop:
-       netif_stop_queue(net_dev);
+err_dma:
+       dma_unmap_single(dma_dev, slot->dma_addr, skb_headlen(skb),
+                        DMA_TO_DEVICE);
+
+       while (i > 0) {
+               int index = (ring->end + i) % BGMAC_TX_RING_SLOTS;
+               struct bgmac_slot_info *slot = &ring->slots[index];
+               u32 ctl1 = le32_to_cpu(ring->cpu_base[index].ctl1);
+               int len = ctl1 & BGMAC_DESC_CTL1_LEN;
+
+               dma_unmap_page(dma_dev, slot->dma_addr, len, DMA_TO_DEVICE);
+       }
+
+err_dma_head:
+       bgmac_err(bgmac, "Mapping error of skb on ring 0x%X\n",
+                 ring->mmio_base);
+
+err_drop:
        dev_kfree_skb(skb);
        return NETDEV_TX_OK;
 }
@@ -204,32 +258,45 @@ static void bgmac_dma_tx_free(struct bgmac *bgmac, struct bgmac_dma_ring *ring)
 
        while (ring->start != empty_slot) {
                struct bgmac_slot_info *slot = &ring->slots[ring->start];
+               u32 ctl1 = le32_to_cpu(ring->cpu_base[ring->start].ctl1);
+               int len = ctl1 & BGMAC_DESC_CTL1_LEN;
 
-               if (slot->skb) {
+               if (!slot->dma_addr) {
+                       bgmac_err(bgmac, "Hardware reported transmission for empty TX ring slot %d! End of ring: %d\n",
+                                 ring->start, ring->end);
+                       goto next;
+               }
+
+               if (ctl1 & BGMAC_DESC_CTL0_SOF)
                        /* Unmap no longer used buffer */
-                       dma_unmap_single(dma_dev, slot->dma_addr,
-                                        slot->skb->len, DMA_TO_DEVICE);
-                       slot->dma_addr = 0;
+                       dma_unmap_single(dma_dev, slot->dma_addr, len,
+                                        DMA_TO_DEVICE);
+               else
+                       dma_unmap_page(dma_dev, slot->dma_addr, len,
+                                      DMA_TO_DEVICE);
 
+               if (slot->skb) {
                        bytes_compl += slot->skb->len;
                        pkts_compl++;
 
                        /* Free memory! :) */
                        dev_kfree_skb(slot->skb);
                        slot->skb = NULL;
-               } else {
-                       bgmac_err(bgmac, "Hardware reported transmission for empty TX ring slot %d! End of ring: %d\n",
-                                 ring->start, ring->end);
                }
 
+next:
+               slot->dma_addr = 0;
                if (++ring->start >= BGMAC_TX_RING_SLOTS)
                        ring->start = 0;
                freed = true;
        }
 
+       if (!pkts_compl)
+               return;
+
        netdev_completed_queue(bgmac->net_dev, pkts_compl, bytes_compl);
 
-       if (freed && netif_queue_stopped(bgmac->net_dev))
+       if (netif_queue_stopped(bgmac->net_dev))
                netif_wake_queue(bgmac->net_dev);
 }
 
@@ -275,31 +342,31 @@ static int bgmac_dma_rx_skb_for_slot(struct bgmac *bgmac,
                                     struct bgmac_slot_info *slot)
 {
        struct device *dma_dev = bgmac->core->dma_dev;
-       struct sk_buff *skb;
        dma_addr_t dma_addr;
        struct bgmac_rx_header *rx;
+       void *buf;
 
        /* Alloc skb */
-       skb = netdev_alloc_skb(bgmac->net_dev, BGMAC_RX_BUF_SIZE);
-       if (!skb)
+       buf = netdev_alloc_frag(BGMAC_RX_ALLOC_SIZE);
+       if (!buf)
                return -ENOMEM;
 
        /* Poison - if everything goes fine, hardware will overwrite it */
-       rx = (struct bgmac_rx_header *)skb->data;
+       rx = buf;
        rx->len = cpu_to_le16(0xdead);
        rx->flags = cpu_to_le16(0xbeef);
 
        /* Map skb for the DMA */
-       dma_addr = dma_map_single(dma_dev, skb->data,
-                                 BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE);
+       dma_addr = dma_map_single(dma_dev, buf, BGMAC_RX_BUF_SIZE,
+                                 DMA_FROM_DEVICE);
        if (dma_mapping_error(dma_dev, dma_addr)) {
                bgmac_err(bgmac, "DMA mapping error\n");
-               dev_kfree_skb(skb);
+               put_page(virt_to_head_page(buf));
                return -ENOMEM;
        }
 
        /* Update the slot */
-       slot->skb = skb;
+       slot->buf = buf;
        slot->dma_addr = dma_addr;
 
        return 0;
@@ -342,8 +409,9 @@ static int bgmac_dma_rx_read(struct bgmac *bgmac, struct bgmac_dma_ring *ring,
        while (ring->start != ring->end) {
                struct device *dma_dev = bgmac->core->dma_dev;
                struct bgmac_slot_info *slot = &ring->slots[ring->start];
-               struct sk_buff *skb = slot->skb;
-               struct bgmac_rx_header *rx;
+               struct bgmac_rx_header *rx = slot->buf;
+               struct sk_buff *skb;
+               void *buf = slot->buf;
                u16 len, flags;
 
                /* Unmap buffer to make it accessible to the CPU */
@@ -351,7 +419,6 @@ static int bgmac_dma_rx_read(struct bgmac *bgmac, struct bgmac_dma_ring *ring,
                                        BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE);
 
                /* Get info from the header */
-               rx = (struct bgmac_rx_header *)skb->data;
                len = le16_to_cpu(rx->len);
                flags = le16_to_cpu(rx->flags);
 
@@ -392,12 +459,13 @@ static int bgmac_dma_rx_read(struct bgmac *bgmac, struct bgmac_dma_ring *ring,
                        dma_unmap_single(dma_dev, old_dma_addr,
                                         BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE);
 
+                       skb = build_skb(buf, BGMAC_RX_ALLOC_SIZE);
                        skb_put(skb, BGMAC_RX_FRAME_OFFSET + len);
                        skb_pull(skb, BGMAC_RX_FRAME_OFFSET);
 
                        skb_checksum_none_assert(skb);
                        skb->protocol = eth_type_trans(skb, bgmac->net_dev);
-                       netif_receive_skb(skb);
+                       napi_gro_receive(&bgmac->napi, skb);
                        handled++;
                } while (0);
 
@@ -433,40 +501,79 @@ static bool bgmac_dma_unaligned(struct bgmac *bgmac,
        return false;
 }
 
-static void bgmac_dma_ring_free(struct bgmac *bgmac,
-                               struct bgmac_dma_ring *ring)
+static void bgmac_dma_tx_ring_free(struct bgmac *bgmac,
+                                  struct bgmac_dma_ring *ring)
 {
        struct device *dma_dev = bgmac->core->dma_dev;
+       struct bgmac_dma_desc *dma_desc = ring->cpu_base;
        struct bgmac_slot_info *slot;
-       int size;
        int i;
 
        for (i = 0; i < ring->num_slots; i++) {
+               int len = dma_desc[i].ctl1 & BGMAC_DESC_CTL1_LEN;
+
                slot = &ring->slots[i];
-               if (slot->skb) {
-                       if (slot->dma_addr)
-                               dma_unmap_single(dma_dev, slot->dma_addr,
-                                                slot->skb->len, DMA_TO_DEVICE);
-                       dev_kfree_skb(slot->skb);
-               }
+               dev_kfree_skb(slot->skb);
+
+               if (!slot->dma_addr)
+                       continue;
+
+               if (slot->skb)
+                       dma_unmap_single(dma_dev, slot->dma_addr,
+                                        len, DMA_TO_DEVICE);
+               else
+                       dma_unmap_page(dma_dev, slot->dma_addr,
+                                      len, DMA_TO_DEVICE);
        }
+}
 
-       if (ring->cpu_base) {
-               /* Free ring of descriptors */
-               size = ring->num_slots * sizeof(struct bgmac_dma_desc);
-               dma_free_coherent(dma_dev, size, ring->cpu_base,
-                                 ring->dma_base);
+static void bgmac_dma_rx_ring_free(struct bgmac *bgmac,
+                                  struct bgmac_dma_ring *ring)
+{
+       struct device *dma_dev = bgmac->core->dma_dev;
+       struct bgmac_slot_info *slot;
+       int i;
+
+       for (i = 0; i < ring->num_slots; i++) {
+               slot = &ring->slots[i];
+               if (!slot->buf)
+                       continue;
+
+               if (slot->dma_addr)
+                       dma_unmap_single(dma_dev, slot->dma_addr,
+                                        BGMAC_RX_BUF_SIZE,
+                                        DMA_FROM_DEVICE);
+               put_page(virt_to_head_page(slot->buf));
        }
 }
 
+static void bgmac_dma_ring_desc_free(struct bgmac *bgmac,
+                                    struct bgmac_dma_ring *ring)
+{
+       struct device *dma_dev = bgmac->core->dma_dev;
+       int size;
+
+       if (!ring->cpu_base)
+           return;
+
+       /* Free ring of descriptors */
+       size = ring->num_slots * sizeof(struct bgmac_dma_desc);
+       dma_free_coherent(dma_dev, size, ring->cpu_base,
+                         ring->dma_base);
+}
+
 static void bgmac_dma_free(struct bgmac *bgmac)
 {
        int i;
 
-       for (i = 0; i < BGMAC_MAX_TX_RINGS; i++)
-               bgmac_dma_ring_free(bgmac, &bgmac->tx_ring[i]);
-       for (i = 0; i < BGMAC_MAX_RX_RINGS; i++)
-               bgmac_dma_ring_free(bgmac, &bgmac->rx_ring[i]);
+       for (i = 0; i < BGMAC_MAX_TX_RINGS; i++) {
+               bgmac_dma_tx_ring_free(bgmac, &bgmac->tx_ring[i]);
+               bgmac_dma_ring_desc_free(bgmac, &bgmac->tx_ring[i]);
+       }
+       for (i = 0; i < BGMAC_MAX_RX_RINGS; i++) {
+               bgmac_dma_rx_ring_free(bgmac, &bgmac->rx_ring[i]);
+               bgmac_dma_ring_desc_free(bgmac, &bgmac->rx_ring[i]);
+       }
 }
 
 static int bgmac_dma_alloc(struct bgmac *bgmac)
@@ -1330,13 +1437,46 @@ static void bgmac_adjust_link(struct net_device *net_dev)
        }
 }
 
+static int bgmac_fixed_phy_register(struct bgmac *bgmac)
+{
+       struct fixed_phy_status fphy_status = {
+               .link = 1,
+               .speed = SPEED_1000,
+               .duplex = DUPLEX_FULL,
+       };
+       struct phy_device *phy_dev;
+       int err;
+
+       phy_dev = fixed_phy_register(PHY_POLL, &fphy_status, NULL);
+       if (!phy_dev || IS_ERR(phy_dev)) {
+               bgmac_err(bgmac, "Failed to register fixed PHY device\n");
+               return -ENODEV;
+       }
+
+       err = phy_connect_direct(bgmac->net_dev, phy_dev, bgmac_adjust_link,
+                                PHY_INTERFACE_MODE_MII);
+       if (err) {
+               bgmac_err(bgmac, "Connecting PHY failed\n");
+               return err;
+       }
+
+       bgmac->phy_dev = phy_dev;
+
+       return err;
+}
+
 static int bgmac_mii_register(struct bgmac *bgmac)
 {
+       struct bcma_chipinfo *ci = &bgmac->core->bus->chipinfo;
        struct mii_bus *mii_bus;
        struct phy_device *phy_dev;
        char bus_id[MII_BUS_ID_SIZE + 3];
        int i, err = 0;
 
+       if (ci->id == BCMA_CHIP_ID_BCM4707 ||
+           ci->id == BCMA_CHIP_ID_BCM53018)
+               return bgmac_fixed_phy_register(bgmac);
+
        mii_bus = mdiobus_alloc();
        if (!mii_bus)
                return -ENOMEM;
@@ -1517,6 +1657,10 @@ static int bgmac_probe(struct bcma_device *core)
                goto err_dma_free;
        }
 
+       net_dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+       net_dev->hw_features = net_dev->features;
+       net_dev->vlan_features = net_dev->features;
+
        err = register_netdev(bgmac->net_dev);
        if (err) {
                bgmac_err(bgmac, "Cannot register net device\n");
index 89fa5bc69c515f3fd3a4c309558294448fceb386..3ad965fe7fcc8f7e998eafcbe418aa65f171b760 100644 (file)
 
 #define BGMAC_DESC_CTL0_EOT                    0x10000000      /* End of ring */
 #define BGMAC_DESC_CTL0_IOC                    0x20000000      /* IRQ on complete */
-#define BGMAC_DESC_CTL0_SOF                    0x40000000      /* Start of frame */
-#define BGMAC_DESC_CTL0_EOF                    0x80000000      /* End of frame */
+#define BGMAC_DESC_CTL0_EOF                    0x40000000      /* End of frame */
+#define BGMAC_DESC_CTL0_SOF                    0x80000000      /* Start of frame */
 #define BGMAC_DESC_CTL1_LEN                    0x00001FFF
 
 #define BGMAC_PHY_NOREGS                       0x1E
 #define BGMAC_RX_FRAME_OFFSET                  30              /* There are 2 unused bytes between header and real data */
 #define BGMAC_RX_MAX_FRAME_SIZE                        1536            /* Copied from b44/tg3 */
 #define BGMAC_RX_BUF_SIZE                      (BGMAC_RX_FRAME_OFFSET + BGMAC_RX_MAX_FRAME_SIZE)
+#define BGMAC_RX_ALLOC_SIZE                    (SKB_DATA_ALIGN(BGMAC_RX_BUF_SIZE) + \
+                                                SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
 
 #define BGMAC_BFL_ENETROBO                     0x0010          /* has ephy roboswitch spi */
 #define BGMAC_BFL_ENETADM                      0x0080          /* has ADMtek switch */
 #define ETHER_MAX_LEN   1518
 
 struct bgmac_slot_info {
-       struct sk_buff *skb;
+       union {
+               struct sk_buff *skb;
+               void *buf;
+       };
        dma_addr_t dma_addr;
 };
 
index 02bf0b86995b8e5caac66cffd174208349fce519..2b66ef3d8217cfe7ee1e4b142dc1cc8e2ad3835c 100644 (file)
@@ -1,7 +1,7 @@
-/* bnx2.c: QLogic NX2 network driver.
+/* bnx2.c: QLogic bnx2 network driver.
  *
  * Copyright (c) 2004-2014 Broadcom Corporation
- * Copyright (c) 2014 QLogic Corporation
+ * Copyright (c) 2014-2015 QLogic Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -58,8 +58,8 @@
 #include "bnx2_fw.h"
 
 #define DRV_MODULE_NAME                "bnx2"
-#define DRV_MODULE_VERSION     "2.2.5"
-#define DRV_MODULE_RELDATE     "December 20, 2013"
+#define DRV_MODULE_VERSION     "2.2.6"
+#define DRV_MODULE_RELDATE     "January 29, 2014"
 #define FW_MIPS_FILE_06                "bnx2/bnx2-mips-06-6.2.3.fw"
 #define FW_RV2P_FILE_06                "bnx2/bnx2-rv2p-06-6.0.15.fw"
 #define FW_MIPS_FILE_09                "bnx2/bnx2-mips-09-6.2.1b.fw"
 #define TX_TIMEOUT  (5*HZ)
 
 static char version[] =
-       "QLogic NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
+       "QLogic " DRV_MODULE_NAME " Gigabit Ethernet Driver v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
 
 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
-MODULE_DESCRIPTION("QLogic NetXtreme II BCM5706/5708/5709/5716 Driver");
+MODULE_DESCRIPTION("QLogic BCM5706/5708/5709/5716 Driver");
 MODULE_LICENSE("GPL");
 MODULE_VERSION(DRV_MODULE_VERSION);
 MODULE_FIRMWARE(FW_MIPS_FILE_06);
@@ -4984,8 +4984,6 @@ bnx2_init_chip(struct bnx2 *bp)
 
        bp->idle_chk_status_idx = 0xffff;
 
-       bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
-
        /* Set up how to generate a link change interrupt. */
        BNX2_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
 
@@ -7710,17 +7708,6 @@ bnx2_set_phys_id(struct net_device *dev, enum ethtool_phys_id_state state)
        return 0;
 }
 
-static netdev_features_t
-bnx2_fix_features(struct net_device *dev, netdev_features_t features)
-{
-       struct bnx2 *bp = netdev_priv(dev);
-
-       if (!(bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
-               features |= NETIF_F_HW_VLAN_CTAG_RX;
-
-       return features;
-}
-
 static int
 bnx2_set_features(struct net_device *dev, netdev_features_t features)
 {
@@ -8527,7 +8514,6 @@ static const struct net_device_ops bnx2_netdev_ops = {
        .ndo_validate_addr      = eth_validate_addr,
        .ndo_set_mac_address    = bnx2_change_mac_addr,
        .ndo_change_mtu         = bnx2_change_mtu,
-       .ndo_fix_features       = bnx2_fix_features,
        .ndo_set_features       = bnx2_set_features,
        .ndo_tx_timeout         = bnx2_tx_timeout,
 #ifdef CONFIG_NET_POLL_CONTROLLER
@@ -8578,6 +8564,9 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        dev->features |= dev->hw_features;
        dev->priv_flags |= IFF_UNICAST_FLT;
 
+       if (!(bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
+               dev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_RX;
+
        if ((rc = register_netdev(dev))) {
                dev_err(&pdev->dev, "Cannot register net device\n");
                goto error;
index 28df35d35893360af2593de3a57b3fe13b7a9f6d..f92f76c447569db422730f9c1f9a7b36d9d17b0f 100644 (file)
@@ -1,7 +1,7 @@
-/* bnx2.h: QLogic NX2 network driver.
+/* bnx2.h: QLogic bnx2 network driver.
  *
  * Copyright (c) 2004-2014 Broadcom Corporation
- * Copyright (c) 2014 QLogic Corporation
+ * Copyright (c) 2014-2015 QLogic Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
index 7db79c28b5ff53a663f519aed22807de81532794..b0f2ccadaffd489a16034680b39b5395321ed537 100644 (file)
@@ -1,7 +1,7 @@
-/* bnx2_fw.h: QLogic NX2 network driver.
+/* bnx2_fw.h: QLogic bnx2 network driver.
  *
  * Copyright (c) 2004, 2005, 2006, 2007 Broadcom Corporation
- * Copyright (c) 2014 QLogic Corporation
+ * Copyright (c) 2014-2015 QLogic Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
index ffe4e003e636db95054df9e1b76b5198a2b5e2b1..e3d853cab7c9644c241cd42ba1a2844b82e55176 100644 (file)
@@ -2446,7 +2446,7 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode)
        }
        packet = skb_put(skb, pkt_size);
        memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
-       memset(packet + ETH_ALEN, 0, ETH_ALEN);
+       eth_zero_addr(packet + ETH_ALEN);
        memset(packet + 2*ETH_ALEN, 0x77, (ETH_HLEN - 2*ETH_ALEN));
        for (i = ETH_HLEN; i < pkt_size; i++)
                packet[i] = (unsigned char) (i & 0xff);
index 583591d52497d22fb6684cadb161de167b74260e..058bc73282201e8b9f897273c18ac18856eac38e 100644 (file)
@@ -521,6 +521,17 @@ struct port_hw_cfg {                   /* port 0: 0x12c  port 1: 0x2bc */
         */
        #define PORT_HW_CFG_TX_DRV_BROADCAST_MASK                     0x000F0000
        #define PORT_HW_CFG_TX_DRV_BROADCAST_SHIFT                    16
+       /*  Set non-default values for TXFIR in SFP mode. */
+       #define PORT_HW_CFG_TX_DRV_IFIR_MASK                          0x00F00000
+       #define PORT_HW_CFG_TX_DRV_IFIR_SHIFT                         20
+
+       /*  Set non-default values for IPREDRIVER in SFP mode. */
+       #define PORT_HW_CFG_TX_DRV_IPREDRIVER_MASK                    0x0F000000
+       #define PORT_HW_CFG_TX_DRV_IPREDRIVER_SHIFT                   24
+
+       /*  Set non-default values for POST2 in SFP mode. */
+       #define PORT_HW_CFG_TX_DRV_POST2_MASK                         0xF0000000
+       #define PORT_HW_CFG_TX_DRV_POST2_SHIFT                        28
 
        u32 reserved0[5];                                   /* 0x17c */
 
@@ -2247,8 +2258,8 @@ struct shmem2_region {
        #define LINK_SFP_EEPROM_COMP_CODE_LRM   0x00004000
 
        u32 reserved5[2];
-       u32 reserved6[PORT_MAX];
-
+       u32 link_change_count[PORT_MAX];        /* Offset 0x160-0x164 */
+       #define LINK_CHANGE_COUNT_MASK 0xff     /* Offset 0x168 */
        /* driver version for each personality */
        struct os_drv_ver func_os_drv_ver[E2_FUNC_MAX]; /* Offset 0x16c */
 
index bd90e50bd8e662d4731b61f82ba9fe06071429a1..d6e1975b7b691ab51ab536a7c9ec413601f24b6c 100644 (file)
@@ -278,7 +278,7 @@ static inline void bnx2x_dcb_config_qm(struct bnx2x *bp, enum cos_mode mode,
 }
 
 
-/* congestion managment port init api description
+/* congestion management port init api description
  * the api works as follows:
  * the driver should pass the cmng_init_input struct, the port_init function
  * will prepare the required internal ram structure which will be passed back
index 778e4cd325714eb34e91a369726ea1818a0c8ebe..21a0d6afca4a53a24100289585f1e9b6d59ee497 100644 (file)
@@ -195,6 +195,10 @@ typedef int (*read_sfp_module_eeprom_func_p)(struct bnx2x_phy *phy,
 
 #define MAX_PACKET_SIZE                                        (9700)
 #define MAX_KR_LINK_RETRY                              4
+#define DEFAULT_TX_DRV_BRDCT           2
+#define DEFAULT_TX_DRV_IFIR            0
+#define DEFAULT_TX_DRV_POST2           3
+#define DEFAULT_TX_DRV_IPRE_DRIVER     6
 
 /**********************************************************/
 /*                     INTERFACE                          */
@@ -563,7 +567,7 @@ static void bnx2x_ets_e3b0_set_credit_upper_bound_nig(
 *      Will return the NIG ETS registers to init values.Except
 *      credit_upper_bound.
 *      That isn't used in this configuration (No WFQ is enabled) and will be
-*      configured acording to spec
+*      configured according to spec
 *.
 ******************************************************************************/
 static void bnx2x_ets_e3b0_nig_disabled(const struct link_params *params,
@@ -680,7 +684,7 @@ static void bnx2x_ets_e3b0_set_credit_upper_bound_pbf(
 *      Will return the PBF ETS registers to init values.Except
 *      credit_upper_bound.
 *      That isn't used in this configuration (No WFQ is enabled) and will be
-*      configured acording to spec
+*      configured according to spec
 *.
 ******************************************************************************/
 static void bnx2x_ets_e3b0_pbf_disabled(const struct link_params *params)
@@ -738,7 +742,7 @@ static void bnx2x_ets_e3b0_pbf_disabled(const struct link_params *params)
 }
 /******************************************************************************
 * Description:
-*      E3B0 disable will return basicly the values to init values.
+*      E3B0 disable will return basically the values to init values.
 *.
 ******************************************************************************/
 static int bnx2x_ets_e3b0_disabled(const struct link_params *params,
@@ -761,7 +765,7 @@ static int bnx2x_ets_e3b0_disabled(const struct link_params *params,
 
 /******************************************************************************
 * Description:
-*      Disable will return basicly the values to init values.
+*      Disable will return basically the values to init values.
 *
 ******************************************************************************/
 int bnx2x_ets_disabled(struct link_params *params,
@@ -2938,7 +2942,7 @@ static int bnx2x_eee_initial_config(struct link_params *params,
 {
        vars->eee_status |= ((u32) mode) << SHMEM_EEE_SUPPORTED_SHIFT;
 
-       /* Propogate params' bits --> vars (for migration exposure) */
+       /* Propagate params' bits --> vars (for migration exposure) */
        if (params->eee_mode & EEE_MODE_ENABLE_LPI)
                vars->eee_status |= SHMEM_EEE_LPI_REQUESTED_BIT;
        else
@@ -3595,10 +3599,11 @@ static u8 bnx2x_ext_phy_resolve_fc(struct bnx2x_phy *phy,
  * init configuration, and set/clear SGMII flag. Internal
  * phy init is done purely in phy_init stage.
  */
-#define WC_TX_DRIVER(post2, idriver, ipre) \
+#define WC_TX_DRIVER(post2, idriver, ipre, ifir) \
        ((post2 << MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_OFFSET) | \
         (idriver << MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_OFFSET) | \
-        (ipre << MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_OFFSET))
+        (ipre << MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_OFFSET) | \
+        (ifir << MDIO_WC_REG_TX0_TX_DRIVER_IFIR_OFFSET))
 
 #define WC_TX_FIR(post, main, pre) \
        ((post << MDIO_WC_REG_TX_FIR_TAP_POST_TAP_OFFSET) | \
@@ -3765,12 +3770,12 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
        lane = bnx2x_get_warpcore_lane(phy, params);
        bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
                         MDIO_WC_REG_TX0_TX_DRIVER + 0x10*lane,
-                        WC_TX_DRIVER(0x02, 0x06, 0x09));
+                        WC_TX_DRIVER(0x02, 0x06, 0x09, 0));
        /* Configure the next lane if dual mode */
        if (phy->flags & FLAGS_WC_DUAL_MODE)
                bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
                                 MDIO_WC_REG_TX0_TX_DRIVER + 0x10*(lane+1),
-                                WC_TX_DRIVER(0x02, 0x06, 0x09));
+                                WC_TX_DRIVER(0x02, 0x06, 0x09, 0));
        bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
                         MDIO_WC_REG_CL72_USERB0_CL72_OS_DEF_CTRL,
                         0x03f0);
@@ -3933,6 +3938,7 @@ static void bnx2x_warpcore_set_10G_XFI(struct bnx2x_phy *phy,
        struct bnx2x *bp = params->bp;
        u16 misc1_val, tap_val, tx_driver_val, lane, val;
        u32 cfg_tap_val, tx_drv_brdct, tx_equal;
+       u32 ifir_val, ipost2_val, ipre_driver_val;
 
        /* Hold rxSeqStart */
        bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
@@ -3978,7 +3984,7 @@ static void bnx2x_warpcore_set_10G_XFI(struct bnx2x_phy *phy,
        if (is_xfi) {
                misc1_val |= 0x5;
                tap_val = WC_TX_FIR(0x08, 0x37, 0x00);
-               tx_driver_val = WC_TX_DRIVER(0x00, 0x02, 0x03);
+               tx_driver_val = WC_TX_DRIVER(0x00, 0x02, 0x03, 0);
        } else {
                cfg_tap_val = REG_RD(bp, params->shmem_base +
                                     offsetof(struct shmem_region, dev_info.
@@ -3987,10 +3993,6 @@ static void bnx2x_warpcore_set_10G_XFI(struct bnx2x_phy *phy,
 
                tx_equal = cfg_tap_val & PORT_HW_CFG_TX_EQUALIZATION_MASK;
 
-               tx_drv_brdct = (cfg_tap_val &
-                               PORT_HW_CFG_TX_DRV_BROADCAST_MASK) >>
-                              PORT_HW_CFG_TX_DRV_BROADCAST_SHIFT;
-
                misc1_val |= 0x9;
 
                /* TAP values are controlled by nvram, if value there isn't 0 */
@@ -3999,11 +4001,36 @@ static void bnx2x_warpcore_set_10G_XFI(struct bnx2x_phy *phy,
                else
                        tap_val = WC_TX_FIR(0x0f, 0x2b, 0x02);
 
-               if (tx_drv_brdct)
-                       tx_driver_val = WC_TX_DRIVER(0x03, (u16)tx_drv_brdct,
-                                                    0x06);
-               else
-                       tx_driver_val = WC_TX_DRIVER(0x03, 0x02, 0x06);
+               ifir_val = DEFAULT_TX_DRV_IFIR;
+               ipost2_val = DEFAULT_TX_DRV_POST2;
+               ipre_driver_val = DEFAULT_TX_DRV_IPRE_DRIVER;
+               tx_drv_brdct = DEFAULT_TX_DRV_BRDCT;
+
+               /* If any of the IFIR/IPRE_DRIVER/POST@ is set, apply all
+                * configuration.
+                */
+               if (cfg_tap_val & (PORT_HW_CFG_TX_DRV_IFIR_MASK |
+                                  PORT_HW_CFG_TX_DRV_IPREDRIVER_MASK |
+                                  PORT_HW_CFG_TX_DRV_POST2_MASK)) {
+                       ifir_val = (cfg_tap_val &
+                                   PORT_HW_CFG_TX_DRV_IFIR_MASK) >>
+                               PORT_HW_CFG_TX_DRV_IFIR_SHIFT;
+                       ipre_driver_val = (cfg_tap_val &
+                                          PORT_HW_CFG_TX_DRV_IPREDRIVER_MASK)
+                       >> PORT_HW_CFG_TX_DRV_IPREDRIVER_SHIFT;
+                       ipost2_val = (cfg_tap_val &
+                                     PORT_HW_CFG_TX_DRV_POST2_MASK) >>
+                               PORT_HW_CFG_TX_DRV_POST2_SHIFT;
+               }
+
+               if (cfg_tap_val & PORT_HW_CFG_TX_DRV_BROADCAST_MASK) {
+                       tx_drv_brdct = (cfg_tap_val &
+                                       PORT_HW_CFG_TX_DRV_BROADCAST_MASK) >>
+                               PORT_HW_CFG_TX_DRV_BROADCAST_SHIFT;
+               }
+
+               tx_driver_val = WC_TX_DRIVER(ipost2_val, tx_drv_brdct,
+                                            ipre_driver_val, ifir_val);
        }
        bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
                         MDIO_WC_REG_SERDESDIGITAL_MISC1, misc1_val);
@@ -4144,7 +4171,7 @@ static void bnx2x_warpcore_set_20G_DXGXS(struct bnx2x *bp,
                          MDIO_WC_REG_TX_FIR_TAP_ENABLE));
        bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
                         MDIO_WC_REG_TX0_TX_DRIVER + 0x10*lane,
-                        WC_TX_DRIVER(0x02, 0x02, 0x02));
+                        WC_TX_DRIVER(0x02, 0x02, 0x02, 0));
 }
 
 static void bnx2x_warpcore_set_sgmii_speed(struct bnx2x_phy *phy,
@@ -6731,6 +6758,25 @@ static int bnx2x_update_link_up(struct link_params *params,
        msleep(20);
        return rc;
 }
+
+static void bnx2x_chng_link_count(struct link_params *params, bool clear)
+{
+       struct bnx2x *bp = params->bp;
+       u32 addr, val;
+
+       /* Verify the link_change_count is supported by the MFW */
+       if (!(SHMEM2_HAS(bp, link_change_count)))
+               return;
+
+       addr = params->shmem2_base +
+               offsetof(struct shmem2_region, link_change_count[params->port]);
+       if (clear)
+               val = 0;
+       else
+               val = REG_RD(bp, addr) + 1;
+       REG_WR(bp, addr, val);
+}
+
 /* The bnx2x_link_update function should be called upon link
  * interrupt.
  * Link is considered up as follows:
@@ -6749,6 +6795,7 @@ int bnx2x_link_update(struct link_params *params, struct link_vars *vars)
        struct link_vars phy_vars[MAX_PHYS];
        u8 port = params->port;
        u8 link_10g_plus, phy_index;
+       u32 prev_link_status = vars->link_status;
        u8 ext_phy_link_up = 0, cur_link_up;
        int rc = 0;
        u8 is_mi_int = 0;
@@ -6988,6 +7035,9 @@ int bnx2x_link_update(struct link_params *params, struct link_vars *vars)
        else
                rc = bnx2x_update_link_down(params, vars);
 
+       if ((prev_link_status ^ vars->link_status) & LINK_STATUS_LINK_UP)
+               bnx2x_chng_link_count(params, false);
+
        /* Update MCP link status was changed */
        if (params->feature_config_flags & FEATURE_CONFIG_BC_SUPPORTS_AFEX)
                bnx2x_fw_command(bp, DRV_MSG_CODE_LINK_STATUS_CHANGED, 0);
@@ -12631,6 +12681,7 @@ int bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
        params->link_flags = PHY_INITIALIZED;
        /* Driver opens NIG-BRB filters */
        bnx2x_set_rx_filter(params, 1);
+       bnx2x_chng_link_count(params, true);
        /* Check if link flap can be avoided */
        lfa_status = bnx2x_check_lfa(params);
 
@@ -12705,6 +12756,7 @@ int bnx2x_link_reset(struct link_params *params, struct link_vars *vars,
        DP(NETIF_MSG_LINK, "Resetting the link of port %d\n", port);
        /* Disable attentions */
        vars->link_status = 0;
+       bnx2x_chng_link_count(params, true);
        bnx2x_update_mng(params, vars->link_status);
        vars->eee_status &= ~(SHMEM_EEE_LP_ADV_STATUS_MASK |
                              SHMEM_EEE_ACTIVE_BIT);
@@ -13308,7 +13360,7 @@ static void bnx2x_check_over_curr(struct link_params *params,
                vars->phy_flags &= ~PHY_OVER_CURRENT_FLAG;
 }
 
-/* Returns 0 if no change occured since last check; 1 otherwise. */
+/* Returns 0 if no change occurred since last check; 1 otherwise. */
 static u8 bnx2x_analyze_link_error(struct link_params *params,
                                    struct link_vars *vars, u32 status,
                                    u32 phy_flag, u32 link_flag, u8 notify)
index 1ec635f549944f87e0c0df4eb0816dcf33d90706..b9f85fccb419be528ae328efc3af4303f0498103 100644 (file)
@@ -11556,13 +11556,13 @@ static void bnx2x_get_cnic_mac_hwinfo(struct bnx2x *bp)
        /* Disable iSCSI OOO if MAC configuration is invalid. */
        if (!is_valid_ether_addr(iscsi_mac)) {
                bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG;
-               memset(iscsi_mac, 0, ETH_ALEN);
+               eth_zero_addr(iscsi_mac);
        }
 
        /* Disable FCoE if MAC configuration is invalid. */
        if (!is_valid_ether_addr(fip_mac)) {
                bp->flags |= NO_FCOE_FLAG;
-               memset(bp->fip_mac, 0, ETH_ALEN);
+               eth_zero_addr(bp->fip_mac);
        }
 }
 
@@ -11573,7 +11573,7 @@ static void bnx2x_get_mac_hwinfo(struct bnx2x *bp)
        int port = BP_PORT(bp);
 
        /* Zero primary MAC configuration */
-       memset(bp->dev->dev_addr, 0, ETH_ALEN);
+       eth_zero_addr(bp->dev->dev_addr);
 
        if (BP_NOMCP(bp)) {
                BNX2X_ERROR("warning: random MAC workaround active\n");
@@ -11620,7 +11620,7 @@ static bool bnx2x_get_dropless_info(struct bnx2x *bp)
        u32 cfg;
 
        if (IS_VF(bp))
-               return 0;
+               return false;
 
        if (IS_MF(bp) && !CHIP_IS_E1x(bp)) {
                /* Take function: tmp = func */
@@ -11660,6 +11660,13 @@ static int bnx2x_get_hwinfo(struct bnx2x *bp)
        u32 val = 0, val2 = 0;
        int rc = 0;
 
+       /* Validate that chip access is feasible */
+       if (REG_RD(bp, MISC_REG_CHIP_NUM) == 0xffffffff) {
+               dev_err(&bp->pdev->dev,
+                       "Chip read returns all Fs. Preventing probe from continuing\n");
+               return -EINVAL;
+       }
+
        bnx2x_get_common_hwinfo(bp);
 
        /*
@@ -12566,6 +12573,7 @@ static netdev_features_t bnx2x_features_check(struct sk_buff *skb,
                                              struct net_device *dev,
                                              netdev_features_t features)
 {
+       features = vlan_features_check(skb, features);
        return vxlan_features_check(skb, features);
 }
 
@@ -13287,30 +13295,27 @@ static int bnx2x_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
        return 0;
 }
 
-static int bnx2x_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
+static int bnx2x_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
 {
        struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info);
        u64 ns;
-       u32 remainder;
 
        ns = timecounter_read(&bp->timecounter);
 
        DP(BNX2X_MSG_PTP, "PTP gettime called, ns = %llu\n", ns);
 
-       ts->tv_sec = div_u64_rem(ns, 1000000000ULL, &remainder);
-       ts->tv_nsec = remainder;
+       *ts = ns_to_timespec64(ns);
 
        return 0;
 }
 
 static int bnx2x_ptp_settime(struct ptp_clock_info *ptp,
-                            const struct timespec *ts)
+                            const struct timespec64 *ts)
 {
        struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info);
        u64 ns;
 
-       ns = ts->tv_sec * 1000000000ULL;
-       ns += ts->tv_nsec;
+       ns = timespec64_to_ns(ts);
 
        DP(BNX2X_MSG_PTP, "PTP settime called, ns = %llu\n", ns);
 
@@ -13342,8 +13347,8 @@ static void bnx2x_register_phc(struct bnx2x *bp)
        bp->ptp_clock_info.pps = 0;
        bp->ptp_clock_info.adjfreq = bnx2x_ptp_adjfreq;
        bp->ptp_clock_info.adjtime = bnx2x_ptp_adjtime;
-       bp->ptp_clock_info.gettime = bnx2x_ptp_gettime;
-       bp->ptp_clock_info.settime = bnx2x_ptp_settime;
+       bp->ptp_clock_info.gettime64 = bnx2x_ptp_gettime;
+       bp->ptp_clock_info.settime64 = bnx2x_ptp_settime;
        bp->ptp_clock_info.enable = bnx2x_ptp_enable;
 
        bp->ptp_clock = ptp_clock_register(&bp->ptp_clock_info, &bp->pdev->dev);
index 6fe547c93e74b0e36f8a920737f25c79c2470143..49d511092c82fc514832fc4aa30078a08a6f19ad 100644 (file)
@@ -29,7 +29,7 @@
 #define ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND                (0x1<<1)
 /* [RW 1] Initiate the ATC array - reset all the valid bits */
 #define ATC_REG_ATC_INIT_ARRAY                                  0x1100b8
-/* [R 1] ATC initalization done */
+/* [R 1] ATC initialization done */
 #define ATC_REG_ATC_INIT_DONE                                   0x1100bc
 /* [RC 6] Interrupt register #0 read clear */
 #define ATC_REG_ATC_INT_STS_CLR                                         0x1101c0
@@ -7341,6 +7341,8 @@ Theotherbitsarereservedandshouldbezero*/
 #define MDIO_WC_REG_TX2_ANA_CTRL0                      0x8081
 #define MDIO_WC_REG_TX3_ANA_CTRL0                      0x8091
 #define MDIO_WC_REG_TX0_TX_DRIVER                      0x8067
+#define MDIO_WC_REG_TX0_TX_DRIVER_IFIR_OFFSET                  0x01
+#define MDIO_WC_REG_TX0_TX_DRIVER_IFIR_MASK                            0x000e
 #define MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_OFFSET           0x04
 #define MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_MASK                     0x00f0
 #define MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_OFFSET               0x08
index cfe3c7695455e16eab516e0f99db6ee6b7e58879..d95f7b4e19e16c2e26bf23392a0d2f5275d5c474 100644 (file)
@@ -2695,7 +2695,7 @@ int bnx2x_get_vf_config(struct net_device *dev, int vfidx,
                        memcpy(&ivi->mac, bulletin->mac, ETH_ALEN);
                else
                        /* function has not been loaded yet. Show mac as 0s */
-                       memset(&ivi->mac, 0, ETH_ALEN);
+                       eth_zero_addr(ivi->mac);
 
                /* vlan */
                if (bulletin->valid_bitmap & (1 << VLAN_VALID))
index 800ab44a07cecd721c8b12f0c5b8602e1124a0dd..266b055c2360af759c7f78395636d541210e5b9d 100644 (file)
@@ -1583,7 +1583,7 @@ void bnx2x_memset_stats(struct bnx2x *bp)
        if (bp->port.pmf && bp->port.port_stx)
                bnx2x_port_stats_base_init(bp);
 
-       /* mark the end of statistics initializiation */
+       /* mark the end of statistics initialization */
        bp->stats_init = false;
 }
 
index be40eabc5304dad9e1ded4451b65b4dae6a03ae8..15b2d164756058c6c5fb154bdc128f52aa3148c3 100644 (file)
@@ -800,7 +800,7 @@ int bnx2x_vfpf_config_rss(struct bnx2x *bp,
        req->rss_key_size = T_ETH_RSS_KEY;
        req->rss_result_mask = params->rss_result_mask;
 
-       /* flags handled individually for backward/forward compatability */
+       /* flags handled individually for backward/forward compatibility */
        if (params->rss_flags & (1 << BNX2X_RSS_MODE_DISABLED))
                req->rss_flags |= VFPF_RSS_MODE_DISABLED;
        if (params->rss_flags & (1 << BNX2X_RSS_MODE_REGULAR))
@@ -1869,7 +1869,7 @@ static void bnx2x_vf_mbx_update_rss(struct bnx2x *bp, struct bnx2x_virtf *vf,
        rss.rss_obj = &vf->rss_conf_obj;
        rss.rss_result_mask = rss_tlv->rss_result_mask;
 
-       /* flags handled individually for backward/forward compatability */
+       /* flags handled individually for backward/forward compatibility */
        rss.rss_flags = 0;
        rss.ramrod_flags = 0;
 
index f05fab65d78ac62b3b7905102d39893087b1450b..17c145fdf3ff6f40e2ef25098e1c0b423dcf9c5f 100644 (file)
@@ -1,7 +1,7 @@
 /* cnic.c: QLogic CNIC core network driver.
  *
  * Copyright (c) 2006-2014 Broadcom Corporation
- * Copyright (c) 2014 QLogic Corporation
+ * Copyright (c) 2014-2015 QLogic Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
 #define CNIC_MODULE_NAME       "cnic"
 
 static char version[] =
-       "QLogic NetXtreme II CNIC Driver " CNIC_MODULE_NAME " v" CNIC_MODULE_VERSION " (" CNIC_MODULE_RELDATE ")\n";
+       "QLogic " CNIC_MODULE_NAME "Driver v" CNIC_MODULE_VERSION " (" CNIC_MODULE_RELDATE ")\n";
 
 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com> and John(Zongxi) "
              "Chen (zongxi@broadcom.com");
-MODULE_DESCRIPTION("QLogic NetXtreme II CNIC Driver");
+MODULE_DESCRIPTION("QLogic cnic Driver");
 MODULE_LICENSE("GPL");
 MODULE_VERSION(CNIC_MODULE_VERSION);
 
index 8bb36c1c4d68c472a366744224a6781880654396..ef6125b0ee3ed35fbd96dc0a2c5647fa0894e3b0 100644 (file)
@@ -1,7 +1,7 @@
-/* cnic_if.h: QLogic CNIC core network driver.
+/* cnic_if.h: QLogic cnic core network driver.
  *
  * Copyright (c) 2006-2014 Broadcom Corporation
- * Copyright (c) 2014 QLogic Corporation
+ * Copyright (c) 2014-2015 QLogic Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -15,8 +15,8 @@
 
 #include "bnx2x/bnx2x_mfw_req.h"
 
-#define CNIC_MODULE_VERSION    "2.5.20"
-#define CNIC_MODULE_RELDATE    "March 14, 2014"
+#define CNIC_MODULE_VERSION    "2.5.21"
+#define CNIC_MODULE_RELDATE    "January 29, 2015"
 
 #define CNIC_ULP_RDMA          0
 #define CNIC_ULP_ISCSI         1
index 6befde61c203461a27ac0298d619f4f78b7c366e..6043734ea613bdae8d1a8c0abe7f14719e3a8cbd 100644 (file)
 /* Default highest priority queue for multi queue support */
 #define GENET_Q0_PRIORITY      0
 
-#define GENET_DEFAULT_BD_CNT   \
-       (TOTAL_DESC - priv->hw_params->tx_queues * priv->hw_params->bds_cnt)
+#define GENET_Q16_RX_BD_CNT    \
+       (TOTAL_DESC - priv->hw_params->rx_queues * priv->hw_params->rx_bds_per_q)
+#define GENET_Q16_TX_BD_CNT    \
+       (TOTAL_DESC - priv->hw_params->tx_queues * priv->hw_params->tx_bds_per_q)
 
 #define RX_BUF_LENGTH          2048
 #define SKB_ALIGNMENT          32
@@ -195,6 +197,14 @@ enum dma_reg {
        DMA_PRIORITY_0,
        DMA_PRIORITY_1,
        DMA_PRIORITY_2,
+       DMA_INDEX2RING_0,
+       DMA_INDEX2RING_1,
+       DMA_INDEX2RING_2,
+       DMA_INDEX2RING_3,
+       DMA_INDEX2RING_4,
+       DMA_INDEX2RING_5,
+       DMA_INDEX2RING_6,
+       DMA_INDEX2RING_7,
 };
 
 static const u8 bcmgenet_dma_regs_v3plus[] = {
@@ -206,6 +216,14 @@ static const u8 bcmgenet_dma_regs_v3plus[] = {
        [DMA_PRIORITY_0]        = 0x30,
        [DMA_PRIORITY_1]        = 0x34,
        [DMA_PRIORITY_2]        = 0x38,
+       [DMA_INDEX2RING_0]      = 0x70,
+       [DMA_INDEX2RING_1]      = 0x74,
+       [DMA_INDEX2RING_2]      = 0x78,
+       [DMA_INDEX2RING_3]      = 0x7C,
+       [DMA_INDEX2RING_4]      = 0x80,
+       [DMA_INDEX2RING_5]      = 0x84,
+       [DMA_INDEX2RING_6]      = 0x88,
+       [DMA_INDEX2RING_7]      = 0x8C,
 };
 
 static const u8 bcmgenet_dma_regs_v2[] = {
@@ -829,9 +847,10 @@ static struct ethtool_ops bcmgenet_ethtool_ops = {
 };
 
 /* Power down the unimac, based on mode. */
-static void bcmgenet_power_down(struct bcmgenet_priv *priv,
+static int bcmgenet_power_down(struct bcmgenet_priv *priv,
                                enum bcmgenet_power_mode mode)
 {
+       int ret = 0;
        u32 reg;
 
        switch (mode) {
@@ -840,7 +859,7 @@ static void bcmgenet_power_down(struct bcmgenet_priv *priv,
                break;
 
        case GENET_POWER_WOL_MAGIC:
-               bcmgenet_wol_power_down_cfg(priv, mode);
+               ret = bcmgenet_wol_power_down_cfg(priv, mode);
                break;
 
        case GENET_POWER_PASSIVE:
@@ -850,11 +869,15 @@ static void bcmgenet_power_down(struct bcmgenet_priv *priv,
                        reg |= (EXT_PWR_DOWN_PHY |
                                EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_BIAS);
                        bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
+
+                       bcmgenet_phy_power_set(priv->dev, false);
                }
                break;
        default:
                break;
        }
+
+       return 0;
 }
 
 static void bcmgenet_power_up(struct bcmgenet_priv *priv,
@@ -923,7 +946,7 @@ static struct enet_cb *bcmgenet_get_txcb(struct bcmgenet_priv *priv,
 
        tx_cb_ptr = ring->cbs;
        tx_cb_ptr += ring->write_ptr - ring->cb_ptr;
-       tx_cb_ptr->bd_addr = priv->tx_bds + ring->write_ptr * DMA_DESC_SIZE;
+
        /* Advancing local write pointer */
        if (ring->write_ptr == ring->end_ptr)
                ring->write_ptr = ring->cb_ptr;
@@ -941,36 +964,54 @@ static void bcmgenet_free_cb(struct enet_cb *cb)
        dma_unmap_addr_set(cb, dma_addr, 0);
 }
 
-static inline void bcmgenet_tx_ring16_int_disable(struct bcmgenet_priv *priv,
-                                                 struct bcmgenet_tx_ring *ring)
+static inline void bcmgenet_rx_ring16_int_disable(struct bcmgenet_rx_ring *ring)
 {
-       bcmgenet_intrl2_0_writel(priv,
-                                UMAC_IRQ_TXDMA_BDONE | UMAC_IRQ_TXDMA_PDONE,
+       bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_RXDMA_DONE,
                                 INTRL2_CPU_MASK_SET);
 }
 
-static inline void bcmgenet_tx_ring16_int_enable(struct bcmgenet_priv *priv,
-                                                struct bcmgenet_tx_ring *ring)
+static inline void bcmgenet_rx_ring16_int_enable(struct bcmgenet_rx_ring *ring)
 {
-       bcmgenet_intrl2_0_writel(priv,
-                                UMAC_IRQ_TXDMA_BDONE | UMAC_IRQ_TXDMA_PDONE,
+       bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_RXDMA_DONE,
                                 INTRL2_CPU_MASK_CLEAR);
 }
 
-static inline void bcmgenet_tx_ring_int_enable(struct bcmgenet_priv *priv,
-                                              struct bcmgenet_tx_ring *ring)
+static inline void bcmgenet_rx_ring_int_disable(struct bcmgenet_rx_ring *ring)
+{
+       bcmgenet_intrl2_1_writel(ring->priv,
+                                1 << (UMAC_IRQ1_RX_INTR_SHIFT + ring->index),
+                                INTRL2_CPU_MASK_SET);
+}
+
+static inline void bcmgenet_rx_ring_int_enable(struct bcmgenet_rx_ring *ring)
 {
-       bcmgenet_intrl2_1_writel(priv, (1 << ring->index),
+       bcmgenet_intrl2_1_writel(ring->priv,
+                                1 << (UMAC_IRQ1_RX_INTR_SHIFT + ring->index),
                                 INTRL2_CPU_MASK_CLEAR);
-       priv->int1_mask &= ~(1 << ring->index);
 }
 
-static inline void bcmgenet_tx_ring_int_disable(struct bcmgenet_priv *priv,
-                                               struct bcmgenet_tx_ring *ring)
+static inline void bcmgenet_tx_ring16_int_disable(struct bcmgenet_tx_ring *ring)
 {
-       bcmgenet_intrl2_1_writel(priv, (1 << ring->index),
+       bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_TXDMA_DONE,
+                                INTRL2_CPU_MASK_SET);
+}
+
+static inline void bcmgenet_tx_ring16_int_enable(struct bcmgenet_tx_ring *ring)
+{
+       bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_TXDMA_DONE,
+                                INTRL2_CPU_MASK_CLEAR);
+}
+
+static inline void bcmgenet_tx_ring_int_enable(struct bcmgenet_tx_ring *ring)
+{
+       bcmgenet_intrl2_1_writel(ring->priv, 1 << ring->index,
+                                INTRL2_CPU_MASK_CLEAR);
+}
+
+static inline void bcmgenet_tx_ring_int_disable(struct bcmgenet_tx_ring *ring)
+{
+       bcmgenet_intrl2_1_writel(ring->priv, 1 << ring->index,
                                 INTRL2_CPU_MASK_SET);
-       priv->int1_mask |= (1 << ring->index);
 }
 
 /* Unlocked version of the reclaim routine */
@@ -978,39 +1019,32 @@ static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
                                          struct bcmgenet_tx_ring *ring)
 {
        struct bcmgenet_priv *priv = netdev_priv(dev);
-       int last_tx_cn, last_c_index, num_tx_bds;
        struct enet_cb *tx_cb_ptr;
        struct netdev_queue *txq;
        unsigned int pkts_compl = 0;
-       unsigned int bds_compl;
        unsigned int c_index;
+       unsigned int txbds_ready;
+       unsigned int txbds_processed = 0;
 
        /* Compute how many buffers are transmitted since last xmit call */
        c_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_CONS_INDEX);
-       txq = netdev_get_tx_queue(dev, ring->queue);
-
-       last_c_index = ring->c_index;
-       num_tx_bds = ring->size;
+       c_index &= DMA_C_INDEX_MASK;
 
-       c_index &= (num_tx_bds - 1);
-
-       if (c_index >= last_c_index)
-               last_tx_cn = c_index - last_c_index;
+       if (likely(c_index >= ring->c_index))
+               txbds_ready = c_index - ring->c_index;
        else
-               last_tx_cn = num_tx_bds - last_c_index + c_index;
+               txbds_ready = (DMA_C_INDEX_MASK + 1) - ring->c_index + c_index;
 
        netif_dbg(priv, tx_done, dev,
-                 "%s ring=%d index=%d last_tx_cn=%d last_index=%d\n",
-                 __func__, ring->index,
-                 c_index, last_tx_cn, last_c_index);
+                 "%s ring=%d old_c_index=%u c_index=%u txbds_ready=%u\n",
+                 __func__, ring->index, ring->c_index, c_index, txbds_ready);
 
        /* Reclaim transmitted buffers */
-       while (last_tx_cn-- > 0) {
-               tx_cb_ptr = ring->cbs + last_c_index;
-               bds_compl = 0;
+       while (txbds_processed < txbds_ready) {
+               tx_cb_ptr = &priv->tx_cbs[ring->clean_ptr];
                if (tx_cb_ptr->skb) {
                        pkts_compl++;
-                       bds_compl = skb_shinfo(tx_cb_ptr->skb)->nr_frags + 1;
+                       dev->stats.tx_packets++;
                        dev->stats.tx_bytes += tx_cb_ptr->skb->len;
                        dma_unmap_single(&dev->dev,
                                         dma_unmap_addr(tx_cb_ptr, dma_addr),
@@ -1026,20 +1060,23 @@ static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
                                       DMA_TO_DEVICE);
                        dma_unmap_addr_set(tx_cb_ptr, dma_addr, 0);
                }
-               dev->stats.tx_packets++;
-               ring->free_bds += bds_compl;
 
-               last_c_index++;
-               last_c_index &= (num_tx_bds - 1);
+               txbds_processed++;
+               if (likely(ring->clean_ptr < ring->end_ptr))
+                       ring->clean_ptr++;
+               else
+                       ring->clean_ptr = ring->cb_ptr;
        }
 
+       ring->free_bds += txbds_processed;
+       ring->c_index = (ring->c_index + txbds_processed) & DMA_C_INDEX_MASK;
+
        if (ring->free_bds > (MAX_SKB_FRAGS + 1)) {
+               txq = netdev_get_tx_queue(dev, ring->queue);
                if (netif_tx_queue_stopped(txq))
                        netif_tx_wake_queue(txq);
        }
 
-       ring->c_index = c_index;
-
        return pkts_compl;
 }
 
@@ -1066,7 +1103,7 @@ static int bcmgenet_tx_poll(struct napi_struct *napi, int budget)
 
        if (work_done == 0) {
                napi_complete(napi);
-               ring->int_enable(ring->priv, ring);
+               ring->int_enable(ring);
 
                return 0;
        }
@@ -1132,11 +1169,6 @@ static int bcmgenet_xmit_single(struct net_device *dev,
 
        dmadesc_set(priv, tx_cb_ptr->bd_addr, mapping, length_status);
 
-       /* Decrement total BD count and advance our write pointer */
-       ring->free_bds -= 1;
-       ring->prod_index += 1;
-       ring->prod_index &= DMA_P_INDEX_MASK;
-
        return 0;
 }
 
@@ -1175,11 +1207,6 @@ static int bcmgenet_xmit_frag(struct net_device *dev,
                    (frag->size << DMA_BUFLENGTH_SHIFT) | dma_desc_flags |
                    (priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT));
 
-
-       ring->free_bds -= 1;
-       ring->prod_index += 1;
-       ring->prod_index &= DMA_P_INDEX_MASK;
-
        return 0;
 }
 
@@ -1323,119 +1350,128 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
 
        skb_tx_timestamp(skb);
 
-       /* we kept a software copy of how much we should advance the TDMA
-        * producer index, now write it down to the hardware
-        */
-       bcmgenet_tdma_ring_writel(priv, ring->index,
-                                 ring->prod_index, TDMA_PROD_INDEX);
+       /* Decrement total BD count and advance our write pointer */
+       ring->free_bds -= nr_frags + 1;
+       ring->prod_index += nr_frags + 1;
+       ring->prod_index &= DMA_P_INDEX_MASK;
 
        if (ring->free_bds <= (MAX_SKB_FRAGS + 1))
                netif_tx_stop_queue(txq);
 
+       if (!skb->xmit_more || netif_xmit_stopped(txq))
+               /* Packets are ready, update producer index */
+               bcmgenet_tdma_ring_writel(priv, ring->index,
+                                         ring->prod_index, TDMA_PROD_INDEX);
 out:
        spin_unlock_irqrestore(&ring->lock, flags);
 
        return ret;
 }
 
-
-static int bcmgenet_rx_refill(struct bcmgenet_priv *priv, struct enet_cb *cb)
+static struct sk_buff *bcmgenet_rx_refill(struct bcmgenet_priv *priv,
+                                         struct enet_cb *cb)
 {
        struct device *kdev = &priv->pdev->dev;
        struct sk_buff *skb;
+       struct sk_buff *rx_skb;
        dma_addr_t mapping;
-       int ret;
 
+       /* Allocate a new Rx skb */
        skb = netdev_alloc_skb(priv->dev, priv->rx_buf_len + SKB_ALIGNMENT);
-       if (!skb)
-               return -ENOMEM;
+       if (!skb) {
+               priv->mib.alloc_rx_buff_failed++;
+               netif_err(priv, rx_err, priv->dev,
+                         "%s: Rx skb allocation failed\n", __func__);
+               return NULL;
+       }
 
-       /* a caller did not release this control block */
-       WARN_ON(cb->skb != NULL);
-       cb->skb = skb;
-       mapping = dma_map_single(kdev, skb->data,
-                                priv->rx_buf_len, DMA_FROM_DEVICE);
-       ret = dma_mapping_error(kdev, mapping);
-       if (ret) {
+       /* DMA-map the new Rx skb */
+       mapping = dma_map_single(kdev, skb->data, priv->rx_buf_len,
+                                DMA_FROM_DEVICE);
+       if (dma_mapping_error(kdev, mapping)) {
                priv->mib.rx_dma_failed++;
-               bcmgenet_free_cb(cb);
+               dev_kfree_skb_any(skb);
                netif_err(priv, rx_err, priv->dev,
-                         "%s DMA map failed\n", __func__);
-               return ret;
+                         "%s: Rx skb DMA mapping failed\n", __func__);
+               return NULL;
        }
 
-       dma_unmap_addr_set(cb, dma_addr, mapping);
-       /* assign packet, prepare descriptor, and advance pointer */
-
-       dmadesc_set_addr(priv, priv->rx_bd_assign_ptr, mapping);
-
-       /* turn on the newly assigned BD for DMA to use */
-       priv->rx_bd_assign_index++;
-       priv->rx_bd_assign_index &= (priv->num_rx_bds - 1);
+       /* Grab the current Rx skb from the ring and DMA-unmap it */
+       rx_skb = cb->skb;
+       if (likely(rx_skb))
+               dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr),
+                                priv->rx_buf_len, DMA_FROM_DEVICE);
 
-       priv->rx_bd_assign_ptr = priv->rx_bds +
-               (priv->rx_bd_assign_index * DMA_DESC_SIZE);
+       /* Put the new Rx skb on the ring */
+       cb->skb = skb;
+       dma_unmap_addr_set(cb, dma_addr, mapping);
+       dmadesc_set_addr(priv, cb->bd_addr, mapping);
 
-       return 0;
+       /* Return the current Rx skb to caller */
+       return rx_skb;
 }
 
 /* bcmgenet_desc_rx - descriptor based rx process.
  * this could be called from bottom half, or from NAPI polling method.
  */
-static unsigned int bcmgenet_desc_rx(struct bcmgenet_priv *priv,
+static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
                                     unsigned int budget)
 {
+       struct bcmgenet_priv *priv = ring->priv;
        struct net_device *dev = priv->dev;
        struct enet_cb *cb;
        struct sk_buff *skb;
        u32 dma_length_status;
        unsigned long dma_flag;
-       int len, err;
+       int len;
        unsigned int rxpktprocessed = 0, rxpkttoprocess;
        unsigned int p_index;
+       unsigned int discards;
        unsigned int chksum_ok = 0;
 
-       p_index = bcmgenet_rdma_ring_readl(priv, DESC_INDEX, RDMA_PROD_INDEX);
+       p_index = bcmgenet_rdma_ring_readl(priv, ring->index, RDMA_PROD_INDEX);
+
+       discards = (p_index >> DMA_P_INDEX_DISCARD_CNT_SHIFT) &
+                  DMA_P_INDEX_DISCARD_CNT_MASK;
+       if (discards > ring->old_discards) {
+               discards = discards - ring->old_discards;
+               dev->stats.rx_missed_errors += discards;
+               dev->stats.rx_errors += discards;
+               ring->old_discards += discards;
+
+               /* Clear HW register when we reach 75% of maximum 0xFFFF */
+               if (ring->old_discards >= 0xC000) {
+                       ring->old_discards = 0;
+                       bcmgenet_rdma_ring_writel(priv, ring->index, 0,
+                                                 RDMA_PROD_INDEX);
+               }
+       }
+
        p_index &= DMA_P_INDEX_MASK;
 
-       if (p_index < priv->rx_c_index)
-               rxpkttoprocess = (DMA_C_INDEX_MASK + 1) -
-                       priv->rx_c_index + p_index;
+       if (likely(p_index >= ring->c_index))
+               rxpkttoprocess = p_index - ring->c_index;
        else
-               rxpkttoprocess = p_index - priv->rx_c_index;
+               rxpkttoprocess = (DMA_C_INDEX_MASK + 1) - ring->c_index +
+                                p_index;
 
        netif_dbg(priv, rx_status, dev,
                  "RDMA: rxpkttoprocess=%d\n", rxpkttoprocess);
 
        while ((rxpktprocessed < rxpkttoprocess) &&
               (rxpktprocessed < budget)) {
-               cb = &priv->rx_cbs[priv->rx_read_ptr];
-               skb = cb->skb;
+               cb = &priv->rx_cbs[ring->read_ptr];
+               skb = bcmgenet_rx_refill(priv, cb);
 
-               /* We do not have a backing SKB, so we do not have a
-                * corresponding DMA mapping for this incoming packet since
-                * bcmgenet_rx_refill always either has both skb and mapping or
-                * none.
-                */
                if (unlikely(!skb)) {
                        dev->stats.rx_dropped++;
                        dev->stats.rx_errors++;
-                       goto refill;
+                       goto next;
                }
 
-               /* Unmap the packet contents such that we can use the
-                * RSV from the 64 bytes descriptor when enabled and save
-                * a 32-bits register read
-                */
-               dma_unmap_single(&dev->dev, dma_unmap_addr(cb, dma_addr),
-                                priv->rx_buf_len, DMA_FROM_DEVICE);
-
                if (!priv->desc_64b_en) {
                        dma_length_status =
-                               dmadesc_get_length_status(priv,
-                                                         priv->rx_bds +
-                                                         (priv->rx_read_ptr *
-                                                          DMA_DESC_SIZE));
+                               dmadesc_get_length_status(priv, cb->bd_addr);
                } else {
                        struct status_64 *status;
 
@@ -1451,18 +1487,18 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_priv *priv,
 
                netif_dbg(priv, rx_status, dev,
                          "%s:p_ind=%d c_ind=%d read_ptr=%d len_stat=0x%08x\n",
-                         __func__, p_index, priv->rx_c_index,
-                         priv->rx_read_ptr, dma_length_status);
+                         __func__, p_index, ring->c_index,
+                         ring->read_ptr, dma_length_status);
 
                if (unlikely(!(dma_flag & DMA_EOP) || !(dma_flag & DMA_SOP))) {
                        netif_err(priv, rx_status, dev,
                                  "dropping fragmented packet!\n");
                        dev->stats.rx_dropped++;
                        dev->stats.rx_errors++;
-                       dev_kfree_skb_any(cb->skb);
-                       cb->skb = NULL;
-                       goto refill;
+                       dev_kfree_skb_any(skb);
+                       goto next;
                }
+
                /* report errors */
                if (unlikely(dma_flag & (DMA_RX_CRC_ERROR |
                                                DMA_RX_OV |
@@ -1481,11 +1517,8 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_priv *priv,
                                dev->stats.rx_length_errors++;
                        dev->stats.rx_dropped++;
                        dev->stats.rx_errors++;
-
-                       /* discard the packet and advance consumer index.*/
-                       dev_kfree_skb_any(cb->skb);
-                       cb->skb = NULL;
-                       goto refill;
+                       dev_kfree_skb_any(skb);
+                       goto next;
                } /* error packet */
 
                chksum_ok = (dma_flag & priv->dma_rx_chk_bit) &&
@@ -1517,47 +1550,61 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_priv *priv,
                        dev->stats.multicast++;
 
                /* Notify kernel */
-               napi_gro_receive(&priv->napi, skb);
-               cb->skb = NULL;
+               napi_gro_receive(&ring->napi, skb);
                netif_dbg(priv, rx_status, dev, "pushed up to kernel\n");
 
-               /* refill RX path on the current control block */
-refill:
-               err = bcmgenet_rx_refill(priv, cb);
-               if (err) {
-                       priv->mib.alloc_rx_buff_failed++;
-                       netif_err(priv, rx_err, dev, "Rx refill failed\n");
-               }
-
+next:
                rxpktprocessed++;
-               priv->rx_read_ptr++;
-               priv->rx_read_ptr &= (priv->num_rx_bds - 1);
+               if (likely(ring->read_ptr < ring->end_ptr))
+                       ring->read_ptr++;
+               else
+                       ring->read_ptr = ring->cb_ptr;
+
+               ring->c_index = (ring->c_index + 1) & DMA_C_INDEX_MASK;
+               bcmgenet_rdma_ring_writel(priv, ring->index, ring->c_index, RDMA_CONS_INDEX);
        }
 
        return rxpktprocessed;
 }
 
+/* Rx NAPI polling method */
+static int bcmgenet_rx_poll(struct napi_struct *napi, int budget)
+{
+       struct bcmgenet_rx_ring *ring = container_of(napi,
+                       struct bcmgenet_rx_ring, napi);
+       unsigned int work_done;
+
+       work_done = bcmgenet_desc_rx(ring, budget);
+
+       if (work_done < budget) {
+               napi_complete(napi);
+               ring->int_enable(ring);
+       }
+
+       return work_done;
+}
+
 /* Assign skb to RX DMA descriptor. */
-static int bcmgenet_alloc_rx_buffers(struct bcmgenet_priv *priv)
+static int bcmgenet_alloc_rx_buffers(struct bcmgenet_priv *priv,
+                                    struct bcmgenet_rx_ring *ring)
 {
        struct enet_cb *cb;
-       int ret = 0;
+       struct sk_buff *skb;
        int i;
 
-       netif_dbg(priv, hw, priv->dev, "%s:\n", __func__);
+       netif_dbg(priv, hw, priv->dev, "%s\n", __func__);
 
        /* loop here for each buffer needing assign */
-       for (i = 0; i < priv->num_rx_bds; i++) {
-               cb = &priv->rx_cbs[priv->rx_bd_assign_index];
-               if (cb->skb)
-                       continue;
-
-               ret = bcmgenet_rx_refill(priv, cb);
-               if (ret)
-                       break;
+       for (i = 0; i < ring->size; i++) {
+               cb = ring->cbs + i;
+               skb = bcmgenet_rx_refill(priv, cb);
+               if (skb)
+                       dev_kfree_skb_any(skb);
+               if (!cb->skb)
+                       return -ENOMEM;
        }
 
-       return ret;
+       return 0;
 }
 
 static void bcmgenet_free_rx_buffers(struct bcmgenet_priv *priv)
@@ -1645,8 +1692,10 @@ static int init_umac(struct bcmgenet_priv *priv)
 {
        struct device *kdev = &priv->pdev->dev;
        int ret;
-       u32 reg, cpu_mask_clear;
-       int index;
+       u32 reg;
+       u32 int0_enable = 0;
+       u32 int1_enable = 0;
+       int i;
 
        dev_dbg(&priv->pdev->dev, "bcmgenet: init_umac\n");
 
@@ -1673,16 +1722,21 @@ static int init_umac(struct bcmgenet_priv *priv)
 
        bcmgenet_intr_disable(priv);
 
-       cpu_mask_clear = UMAC_IRQ_RXDMA_BDONE | UMAC_IRQ_TXDMA_BDONE;
+       /* Enable Rx default queue 16 interrupts */
+       int0_enable |= UMAC_IRQ_RXDMA_DONE;
 
-       dev_dbg(kdev, "%s:Enabling RXDMA_BDONE interrupt\n", __func__);
+       /* Enable Tx default queue 16 interrupts */
+       int0_enable |= UMAC_IRQ_TXDMA_DONE;
 
        /* Monitor cable plug/unplugged event for internal PHY */
        if (phy_is_internal(priv->phydev)) {
-               cpu_mask_clear |= (UMAC_IRQ_LINK_DOWN | UMAC_IRQ_LINK_UP);
+               int0_enable |= UMAC_IRQ_LINK_EVENT;
        } else if (priv->ext_phy) {
-               cpu_mask_clear |= (UMAC_IRQ_LINK_DOWN | UMAC_IRQ_LINK_UP);
+               int0_enable |= UMAC_IRQ_LINK_EVENT;
        } else if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) {
+               if (priv->hw_params->flags & GENET_HAS_MOCA_LINK_DET)
+                       int0_enable |= UMAC_IRQ_LINK_EVENT;
+
                reg = bcmgenet_bp_mc_get(priv);
                reg |= BIT(priv->hw_params->bp_in_en_shift);
 
@@ -1696,13 +1750,18 @@ static int init_umac(struct bcmgenet_priv *priv)
 
        /* Enable MDIO interrupts on GENET v3+ */
        if (priv->hw_params->flags & GENET_HAS_MDIO_INTR)
-               cpu_mask_clear |= UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR;
+               int0_enable |= (UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR);
 
-       bcmgenet_intrl2_0_writel(priv, cpu_mask_clear, INTRL2_CPU_MASK_CLEAR);
+       /* Enable Rx priority queue interrupts */
+       for (i = 0; i < priv->hw_params->rx_queues; ++i)
+               int1_enable |= (1 << (UMAC_IRQ1_RX_INTR_SHIFT + i));
 
-       for (index = 0; index < priv->hw_params->tx_queues; index++)
-               bcmgenet_intrl2_1_writel(priv, (1 << index),
-                                        INTRL2_CPU_MASK_CLEAR);
+       /* Enable Tx priority queue interrupts */
+       for (i = 0; i < priv->hw_params->tx_queues; ++i)
+               int1_enable |= (1 << i);
+
+       bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR);
+       bcmgenet_intrl2_1_writel(priv, int1_enable, INTRL2_CPU_MASK_CLEAR);
 
        /* Enable rx/tx engine.*/
        dev_dbg(kdev, "done init umac\n");
@@ -1710,21 +1769,17 @@ static int init_umac(struct bcmgenet_priv *priv)
        return 0;
 }
 
-/* Initialize all house-keeping variables for a TX ring, along
- * with corresponding hardware registers
- */
+/* Initialize a Tx ring along with corresponding hardware registers */
 static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv,
                                  unsigned int index, unsigned int size,
-                                 unsigned int write_ptr, unsigned int end_ptr)
+                                 unsigned int start_ptr, unsigned int end_ptr)
 {
        struct bcmgenet_tx_ring *ring = &priv->tx_rings[index];
        u32 words_per_bd = WORDS_PER_BD(priv);
        u32 flow_period_val = 0;
-       unsigned int first_bd;
 
        spin_lock_init(&ring->lock);
        ring->priv = priv;
-       netif_napi_add(priv->dev, &ring->napi, bcmgenet_tx_poll, 64);
        ring->index = index;
        if (index == DESC_INDEX) {
                ring->queue = 0;
@@ -1735,12 +1790,13 @@ static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv,
                ring->int_enable = bcmgenet_tx_ring_int_enable;
                ring->int_disable = bcmgenet_tx_ring_int_disable;
        }
-       ring->cbs = priv->tx_cbs + write_ptr;
+       ring->cbs = priv->tx_cbs + start_ptr;
        ring->size = size;
+       ring->clean_ptr = start_ptr;
        ring->c_index = 0;
        ring->free_bds = size;
-       ring->write_ptr = write_ptr;
-       ring->cb_ptr = write_ptr;
+       ring->write_ptr = start_ptr;
+       ring->cb_ptr = start_ptr;
        ring->end_ptr = end_ptr - 1;
        ring->prod_index = 0;
 
@@ -1754,149 +1810,319 @@ static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv,
        /* Disable rate control for now */
        bcmgenet_tdma_ring_writel(priv, index, flow_period_val,
                                  TDMA_FLOW_PERIOD);
-       /* Unclassified traffic goes to ring 16 */
        bcmgenet_tdma_ring_writel(priv, index,
                                  ((size << DMA_RING_SIZE_SHIFT) |
                                   RX_BUF_LENGTH), DMA_RING_BUF_SIZE);
 
-       first_bd = write_ptr;
-
        /* Set start and end address, read and write pointers */
-       bcmgenet_tdma_ring_writel(priv, index, first_bd * words_per_bd,
+       bcmgenet_tdma_ring_writel(priv, index, start_ptr * words_per_bd,
                                  DMA_START_ADDR);
-       bcmgenet_tdma_ring_writel(priv, index, first_bd * words_per_bd,
+       bcmgenet_tdma_ring_writel(priv, index, start_ptr * words_per_bd,
                                  TDMA_READ_PTR);
-       bcmgenet_tdma_ring_writel(priv, index, first_bd,
+       bcmgenet_tdma_ring_writel(priv, index, start_ptr * words_per_bd,
                                  TDMA_WRITE_PTR);
        bcmgenet_tdma_ring_writel(priv, index, end_ptr * words_per_bd - 1,
                                  DMA_END_ADDR);
-
-       napi_enable(&ring->napi);
-}
-
-static void bcmgenet_fini_tx_ring(struct bcmgenet_priv *priv,
-                                 unsigned int index)
-{
-       struct bcmgenet_tx_ring *ring = &priv->tx_rings[index];
-
-       napi_disable(&ring->napi);
-       netif_napi_del(&ring->napi);
 }
 
 /* Initialize a RDMA ring */
 static int bcmgenet_init_rx_ring(struct bcmgenet_priv *priv,
-                                unsigned int index, unsigned int size)
+                                unsigned int index, unsigned int size,
+                                unsigned int start_ptr, unsigned int end_ptr)
 {
+       struct bcmgenet_rx_ring *ring = &priv->rx_rings[index];
        u32 words_per_bd = WORDS_PER_BD(priv);
        int ret;
 
-       priv->num_rx_bds = TOTAL_DESC;
-       priv->rx_bds = priv->base + priv->hw_params->rdma_offset;
-       priv->rx_bd_assign_ptr = priv->rx_bds;
-       priv->rx_bd_assign_index = 0;
-       priv->rx_c_index = 0;
-       priv->rx_read_ptr = 0;
-       priv->rx_cbs = kcalloc(priv->num_rx_bds, sizeof(struct enet_cb),
-                              GFP_KERNEL);
-       if (!priv->rx_cbs)
-               return -ENOMEM;
+       ring->priv = priv;
+       ring->index = index;
+       if (index == DESC_INDEX) {
+               ring->int_enable = bcmgenet_rx_ring16_int_enable;
+               ring->int_disable = bcmgenet_rx_ring16_int_disable;
+       } else {
+               ring->int_enable = bcmgenet_rx_ring_int_enable;
+               ring->int_disable = bcmgenet_rx_ring_int_disable;
+       }
+       ring->cbs = priv->rx_cbs + start_ptr;
+       ring->size = size;
+       ring->c_index = 0;
+       ring->read_ptr = start_ptr;
+       ring->cb_ptr = start_ptr;
+       ring->end_ptr = end_ptr - 1;
 
-       ret = bcmgenet_alloc_rx_buffers(priv);
-       if (ret) {
-               kfree(priv->rx_cbs);
+       ret = bcmgenet_alloc_rx_buffers(priv, ring);
+       if (ret)
                return ret;
-       }
 
-       bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_WRITE_PTR);
        bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_PROD_INDEX);
        bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_CONS_INDEX);
+       bcmgenet_rdma_ring_writel(priv, index, 1, DMA_MBUF_DONE_THRESH);
        bcmgenet_rdma_ring_writel(priv, index,
                                  ((size << DMA_RING_SIZE_SHIFT) |
                                   RX_BUF_LENGTH), DMA_RING_BUF_SIZE);
-       bcmgenet_rdma_ring_writel(priv, index, 0, DMA_START_ADDR);
-       bcmgenet_rdma_ring_writel(priv, index,
-                                 words_per_bd * size - 1, DMA_END_ADDR);
        bcmgenet_rdma_ring_writel(priv, index,
                                  (DMA_FC_THRESH_LO <<
                                   DMA_XOFF_THRESHOLD_SHIFT) |
                                   DMA_FC_THRESH_HI, RDMA_XON_XOFF_THRESH);
-       bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_READ_PTR);
+
+       /* Set start and end address, read and write pointers */
+       bcmgenet_rdma_ring_writel(priv, index, start_ptr * words_per_bd,
+                                 DMA_START_ADDR);
+       bcmgenet_rdma_ring_writel(priv, index, start_ptr * words_per_bd,
+                                 RDMA_READ_PTR);
+       bcmgenet_rdma_ring_writel(priv, index, start_ptr * words_per_bd,
+                                 RDMA_WRITE_PTR);
+       bcmgenet_rdma_ring_writel(priv, index, end_ptr * words_per_bd - 1,
+                                 DMA_END_ADDR);
 
        return ret;
 }
 
-/* init multi xmit queues, only available for GENET2+
- * the queue is partitioned as follows:
+static void bcmgenet_init_tx_napi(struct bcmgenet_priv *priv)
+{
+       unsigned int i;
+       struct bcmgenet_tx_ring *ring;
+
+       for (i = 0; i < priv->hw_params->tx_queues; ++i) {
+               ring = &priv->tx_rings[i];
+               netif_napi_add(priv->dev, &ring->napi, bcmgenet_tx_poll, 64);
+       }
+
+       ring = &priv->tx_rings[DESC_INDEX];
+       netif_napi_add(priv->dev, &ring->napi, bcmgenet_tx_poll, 64);
+}
+
+static void bcmgenet_enable_tx_napi(struct bcmgenet_priv *priv)
+{
+       unsigned int i;
+       struct bcmgenet_tx_ring *ring;
+
+       for (i = 0; i < priv->hw_params->tx_queues; ++i) {
+               ring = &priv->tx_rings[i];
+               napi_enable(&ring->napi);
+       }
+
+       ring = &priv->tx_rings[DESC_INDEX];
+       napi_enable(&ring->napi);
+}
+
+static void bcmgenet_disable_tx_napi(struct bcmgenet_priv *priv)
+{
+       unsigned int i;
+       struct bcmgenet_tx_ring *ring;
+
+       for (i = 0; i < priv->hw_params->tx_queues; ++i) {
+               ring = &priv->tx_rings[i];
+               napi_disable(&ring->napi);
+       }
+
+       ring = &priv->tx_rings[DESC_INDEX];
+       napi_disable(&ring->napi);
+}
+
+static void bcmgenet_fini_tx_napi(struct bcmgenet_priv *priv)
+{
+       unsigned int i;
+       struct bcmgenet_tx_ring *ring;
+
+       for (i = 0; i < priv->hw_params->tx_queues; ++i) {
+               ring = &priv->tx_rings[i];
+               netif_napi_del(&ring->napi);
+       }
+
+       ring = &priv->tx_rings[DESC_INDEX];
+       netif_napi_del(&ring->napi);
+}
+
+/* Initialize Tx queues
  *
- * queue 0 - 3 is priority based, each one has 32 descriptors,
+ * Queues 0-3 are priority-based, each one has 32 descriptors,
  * with queue 0 being the highest priority queue.
  *
- * queue 16 is the default tx queue with GENET_DEFAULT_BD_CNT
- * descriptors: 256 - (number of tx queues * bds per queues) = 128
- * descriptors.
+ * Queue 16 is the default Tx queue with
+ * GENET_Q16_TX_BD_CNT = 256 - 4 * 32 = 128 descriptors.
  *
- * The transmit control block pool is then partitioned as following:
- * - tx_cbs[0...127] are for queue 16
- * - tx_ring_cbs[0] points to tx_cbs[128..159]
- * - tx_ring_cbs[1] points to tx_cbs[160..191]
- * - tx_ring_cbs[2] points to tx_cbs[192..223]
- * - tx_ring_cbs[3] points to tx_cbs[224..255]
+ * The transmit control block pool is then partitioned as follows:
+ * - Tx queue 0 uses tx_cbs[0..31]
+ * - Tx queue 1 uses tx_cbs[32..63]
+ * - Tx queue 2 uses tx_cbs[64..95]
+ * - Tx queue 3 uses tx_cbs[96..127]
+ * - Tx queue 16 uses tx_cbs[128..255]
  */
-static void bcmgenet_init_multiq(struct net_device *dev)
+static void bcmgenet_init_tx_queues(struct net_device *dev)
 {
        struct bcmgenet_priv *priv = netdev_priv(dev);
-       unsigned int i, dma_enable;
-       u32 reg, dma_ctrl, ring_cfg = 0;
+       u32 i, dma_enable;
+       u32 dma_ctrl, ring_cfg;
        u32 dma_priority[3] = {0, 0, 0};
 
-       if (!netif_is_multiqueue(dev)) {
-               netdev_warn(dev, "called with non multi queue aware HW\n");
-               return;
-       }
-
        dma_ctrl = bcmgenet_tdma_readl(priv, DMA_CTRL);
        dma_enable = dma_ctrl & DMA_EN;
        dma_ctrl &= ~DMA_EN;
        bcmgenet_tdma_writel(priv, dma_ctrl, DMA_CTRL);
 
+       dma_ctrl = 0;
+       ring_cfg = 0;
+
        /* Enable strict priority arbiter mode */
        bcmgenet_tdma_writel(priv, DMA_ARBITER_SP, DMA_ARB_CTRL);
 
+       /* Initialize Tx priority queues */
        for (i = 0; i < priv->hw_params->tx_queues; i++) {
-               /* first 64 tx_cbs are reserved for default tx queue
-                * (ring 16)
-                */
-               bcmgenet_init_tx_ring(priv, i, priv->hw_params->bds_cnt,
-                                     i * priv->hw_params->bds_cnt,
-                                     (i + 1) * priv->hw_params->bds_cnt);
-
-               /* Configure ring as descriptor ring and setup priority */
-               ring_cfg |= 1 << i;
-               dma_ctrl |= 1 << (i + DMA_RING_BUF_EN_SHIFT);
-
+               bcmgenet_init_tx_ring(priv, i, priv->hw_params->tx_bds_per_q,
+                                     i * priv->hw_params->tx_bds_per_q,
+                                     (i + 1) * priv->hw_params->tx_bds_per_q);
+               ring_cfg |= (1 << i);
+               dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
                dma_priority[DMA_PRIO_REG_INDEX(i)] |=
                        ((GENET_Q0_PRIORITY + i) << DMA_PRIO_REG_SHIFT(i));
        }
 
-       /* Set ring 16 priority and program the hardware registers */
+       /* Initialize Tx default queue 16 */
+       bcmgenet_init_tx_ring(priv, DESC_INDEX, GENET_Q16_TX_BD_CNT,
+                             priv->hw_params->tx_queues *
+                             priv->hw_params->tx_bds_per_q,
+                             TOTAL_DESC);
+       ring_cfg |= (1 << DESC_INDEX);
+       dma_ctrl |= (1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT));
        dma_priority[DMA_PRIO_REG_INDEX(DESC_INDEX)] |=
                ((GENET_Q0_PRIORITY + priv->hw_params->tx_queues) <<
                 DMA_PRIO_REG_SHIFT(DESC_INDEX));
+
+       /* Set Tx queue priorities */
        bcmgenet_tdma_writel(priv, dma_priority[0], DMA_PRIORITY_0);
        bcmgenet_tdma_writel(priv, dma_priority[1], DMA_PRIORITY_1);
        bcmgenet_tdma_writel(priv, dma_priority[2], DMA_PRIORITY_2);
 
+       /* Initialize Tx NAPI */
+       bcmgenet_init_tx_napi(priv);
+
+       /* Enable Tx queues */
+       bcmgenet_tdma_writel(priv, ring_cfg, DMA_RING_CFG);
+
+       /* Enable Tx DMA */
+       if (dma_enable)
+               dma_ctrl |= DMA_EN;
+       bcmgenet_tdma_writel(priv, dma_ctrl, DMA_CTRL);
+}
+
+static void bcmgenet_init_rx_napi(struct bcmgenet_priv *priv)
+{
+       unsigned int i;
+       struct bcmgenet_rx_ring *ring;
+
+       for (i = 0; i < priv->hw_params->rx_queues; ++i) {
+               ring = &priv->rx_rings[i];
+               netif_napi_add(priv->dev, &ring->napi, bcmgenet_rx_poll, 64);
+       }
+
+       ring = &priv->rx_rings[DESC_INDEX];
+       netif_napi_add(priv->dev, &ring->napi, bcmgenet_rx_poll, 64);
+}
+
+static void bcmgenet_enable_rx_napi(struct bcmgenet_priv *priv)
+{
+       unsigned int i;
+       struct bcmgenet_rx_ring *ring;
+
+       for (i = 0; i < priv->hw_params->rx_queues; ++i) {
+               ring = &priv->rx_rings[i];
+               napi_enable(&ring->napi);
+       }
+
+       ring = &priv->rx_rings[DESC_INDEX];
+       napi_enable(&ring->napi);
+}
+
+static void bcmgenet_disable_rx_napi(struct bcmgenet_priv *priv)
+{
+       unsigned int i;
+       struct bcmgenet_rx_ring *ring;
+
+       for (i = 0; i < priv->hw_params->rx_queues; ++i) {
+               ring = &priv->rx_rings[i];
+               napi_disable(&ring->napi);
+       }
+
+       ring = &priv->rx_rings[DESC_INDEX];
+       napi_disable(&ring->napi);
+}
+
+static void bcmgenet_fini_rx_napi(struct bcmgenet_priv *priv)
+{
+       unsigned int i;
+       struct bcmgenet_rx_ring *ring;
+
+       for (i = 0; i < priv->hw_params->rx_queues; ++i) {
+               ring = &priv->rx_rings[i];
+               netif_napi_del(&ring->napi);
+       }
+
+       ring = &priv->rx_rings[DESC_INDEX];
+       netif_napi_del(&ring->napi);
+}
+
+/* Initialize Rx queues
+ *
+ * Queues 0-15 are priority queues. Hardware Filtering Block (HFB) can be
+ * used to direct traffic to these queues.
+ *
+ * Queue 16 is the default Rx queue with GENET_Q16_RX_BD_CNT descriptors.
+ */
+static int bcmgenet_init_rx_queues(struct net_device *dev)
+{
+       struct bcmgenet_priv *priv = netdev_priv(dev);
+       u32 i;
+       u32 dma_enable;
+       u32 dma_ctrl;
+       u32 ring_cfg;
+       int ret;
+
+       dma_ctrl = bcmgenet_rdma_readl(priv, DMA_CTRL);
+       dma_enable = dma_ctrl & DMA_EN;
+       dma_ctrl &= ~DMA_EN;
+       bcmgenet_rdma_writel(priv, dma_ctrl, DMA_CTRL);
+
+       dma_ctrl = 0;
+       ring_cfg = 0;
+
+       /* Initialize Rx priority queues */
+       for (i = 0; i < priv->hw_params->rx_queues; i++) {
+               ret = bcmgenet_init_rx_ring(priv, i,
+                                           priv->hw_params->rx_bds_per_q,
+                                           i * priv->hw_params->rx_bds_per_q,
+                                           (i + 1) *
+                                           priv->hw_params->rx_bds_per_q);
+               if (ret)
+                       return ret;
+
+               ring_cfg |= (1 << i);
+               dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
+       }
+
+       /* Initialize Rx default queue 16 */
+       ret = bcmgenet_init_rx_ring(priv, DESC_INDEX, GENET_Q16_RX_BD_CNT,
+                                   priv->hw_params->rx_queues *
+                                   priv->hw_params->rx_bds_per_q,
+                                   TOTAL_DESC);
+       if (ret)
+               return ret;
+
+       ring_cfg |= (1 << DESC_INDEX);
+       dma_ctrl |= (1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT));
+
+       /* Initialize Rx NAPI */
+       bcmgenet_init_rx_napi(priv);
+
        /* Enable rings */
-       reg = bcmgenet_tdma_readl(priv, DMA_RING_CFG);
-       reg |= ring_cfg;
-       bcmgenet_tdma_writel(priv, reg, DMA_RING_CFG);
+       bcmgenet_rdma_writel(priv, ring_cfg, DMA_RING_CFG);
 
        /* Configure ring as descriptor ring and re-enable DMA if enabled */
-       reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
-       reg |= dma_ctrl;
        if (dma_enable)
-               reg |= DMA_EN;
-       bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
+               dma_ctrl |= DMA_EN;
+       bcmgenet_rdma_writel(priv, dma_ctrl, DMA_CTRL);
+
+       return 0;
 }
 
 static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv)
@@ -1950,10 +2176,13 @@ static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv)
        return ret;
 }
 
-static void __bcmgenet_fini_dma(struct bcmgenet_priv *priv)
+static void bcmgenet_fini_dma(struct bcmgenet_priv *priv)
 {
        int i;
 
+       bcmgenet_fini_rx_napi(priv);
+       bcmgenet_fini_tx_napi(priv);
+
        /* disable DMA */
        bcmgenet_dma_teardown(priv);
 
@@ -1969,37 +2198,27 @@ static void __bcmgenet_fini_dma(struct bcmgenet_priv *priv)
        kfree(priv->tx_cbs);
 }
 
-static void bcmgenet_fini_dma(struct bcmgenet_priv *priv)
-{
-       int i;
-
-       bcmgenet_fini_tx_ring(priv, DESC_INDEX);
-
-       for (i = 0; i < priv->hw_params->tx_queues; i++)
-               bcmgenet_fini_tx_ring(priv, i);
-
-       __bcmgenet_fini_dma(priv);
-}
-
 /* init_edma: Initialize DMA control register */
 static int bcmgenet_init_dma(struct bcmgenet_priv *priv)
 {
        int ret;
+       unsigned int i;
+       struct enet_cb *cb;
 
-       netif_dbg(priv, hw, priv->dev, "bcmgenet: init_edma\n");
-
-       /* by default, enable ring 16 (descriptor based) */
-       ret = bcmgenet_init_rx_ring(priv, DESC_INDEX, TOTAL_DESC);
-       if (ret) {
-               netdev_err(priv->dev, "failed to initialize RX ring\n");
-               return ret;
-       }
+       netif_dbg(priv, hw, priv->dev, "%s\n", __func__);
 
-       /* init rDma */
-       bcmgenet_rdma_writel(priv, DMA_MAX_BURST_LENGTH, DMA_SCB_BURST_SIZE);
+       /* Initialize common Rx ring structures */
+       priv->rx_bds = priv->base + priv->hw_params->rdma_offset;
+       priv->num_rx_bds = TOTAL_DESC;
+       priv->rx_cbs = kcalloc(priv->num_rx_bds, sizeof(struct enet_cb),
+                              GFP_KERNEL);
+       if (!priv->rx_cbs)
+               return -ENOMEM;
 
-       /* Init tDma */
-       bcmgenet_tdma_writel(priv, DMA_MAX_BURST_LENGTH, DMA_SCB_BURST_SIZE);
+       for (i = 0; i < priv->num_rx_bds; i++) {
+               cb = priv->rx_cbs + i;
+               cb->bd_addr = priv->rx_bds + i * DMA_DESC_SIZE;
+       }
 
        /* Initialize common TX ring structures */
        priv->tx_bds = priv->base + priv->hw_params->tdma_offset;
@@ -2007,43 +2226,35 @@ static int bcmgenet_init_dma(struct bcmgenet_priv *priv)
        priv->tx_cbs = kcalloc(priv->num_tx_bds, sizeof(struct enet_cb),
                               GFP_KERNEL);
        if (!priv->tx_cbs) {
-               __bcmgenet_fini_dma(priv);
+               kfree(priv->rx_cbs);
                return -ENOMEM;
        }
 
-       /* initialize multi xmit queue */
-       bcmgenet_init_multiq(priv->dev);
-
-       /* initialize special ring 16 */
-       bcmgenet_init_tx_ring(priv, DESC_INDEX, GENET_DEFAULT_BD_CNT,
-                             priv->hw_params->tx_queues *
-                             priv->hw_params->bds_cnt,
-                             TOTAL_DESC);
+       for (i = 0; i < priv->num_tx_bds; i++) {
+               cb = priv->tx_cbs + i;
+               cb->bd_addr = priv->tx_bds + i * DMA_DESC_SIZE;
+       }
 
-       return 0;
-}
+       /* Init rDma */
+       bcmgenet_rdma_writel(priv, DMA_MAX_BURST_LENGTH, DMA_SCB_BURST_SIZE);
 
-/* NAPI polling method*/
-static int bcmgenet_poll(struct napi_struct *napi, int budget)
-{
-       struct bcmgenet_priv *priv = container_of(napi,
-                       struct bcmgenet_priv, napi);
-       unsigned int work_done;
+       /* Initialize Rx queues */
+       ret = bcmgenet_init_rx_queues(priv->dev);
+       if (ret) {
+               netdev_err(priv->dev, "failed to initialize Rx queues\n");
+               bcmgenet_free_rx_buffers(priv);
+               kfree(priv->rx_cbs);
+               kfree(priv->tx_cbs);
+               return ret;
+       }
 
-       work_done = bcmgenet_desc_rx(priv, budget);
+       /* Init tDma */
+       bcmgenet_tdma_writel(priv, DMA_MAX_BURST_LENGTH, DMA_SCB_BURST_SIZE);
 
-       /* Advancing our consumer index*/
-       priv->rx_c_index += work_done;
-       priv->rx_c_index &= DMA_C_INDEX_MASK;
-       bcmgenet_rdma_ring_writel(priv, DESC_INDEX,
-                                 priv->rx_c_index, RDMA_CONS_INDEX);
-       if (work_done < budget) {
-               napi_complete(napi);
-               bcmgenet_intrl2_0_writel(priv, UMAC_IRQ_RXDMA_BDONE,
-                                        INTRL2_CPU_MASK_CLEAR);
-       }
+       /* Initialize Tx queues */
+       bcmgenet_init_tx_queues(priv->dev);
 
-       return work_done;
+       return 0;
 }
 
 /* Interrupt bottom half */
@@ -2063,87 +2274,100 @@ static void bcmgenet_irq_task(struct work_struct *work)
 
        /* Link UP/DOWN event */
        if ((priv->hw_params->flags & GENET_HAS_MDIO_INTR) &&
-           (priv->irq0_stat & (UMAC_IRQ_LINK_UP|UMAC_IRQ_LINK_DOWN))) {
+           (priv->irq0_stat & UMAC_IRQ_LINK_EVENT)) {
                phy_mac_interrupt(priv->phydev,
-                                 priv->irq0_stat & UMAC_IRQ_LINK_UP);
-               priv->irq0_stat &= ~(UMAC_IRQ_LINK_UP|UMAC_IRQ_LINK_DOWN);
+                                 !!(priv->irq0_stat & UMAC_IRQ_LINK_UP));
+               priv->irq0_stat &= ~UMAC_IRQ_LINK_EVENT;
        }
 }
 
-/* bcmgenet_isr1: interrupt handler for ring buffer. */
+/* bcmgenet_isr1: handle Rx and Tx priority queues */
 static irqreturn_t bcmgenet_isr1(int irq, void *dev_id)
 {
        struct bcmgenet_priv *priv = dev_id;
-       struct bcmgenet_tx_ring *ring;
+       struct bcmgenet_rx_ring *rx_ring;
+       struct bcmgenet_tx_ring *tx_ring;
        unsigned int index;
 
        /* Save irq status for bottom-half processing. */
        priv->irq1_stat =
                bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_STAT) &
                ~bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS);
+
        /* clear interrupts */
        bcmgenet_intrl2_1_writel(priv, priv->irq1_stat, INTRL2_CPU_CLEAR);
 
        netif_dbg(priv, intr, priv->dev,
                  "%s: IRQ=0x%x\n", __func__, priv->irq1_stat);
 
-       /* Check the MBDONE interrupts.
-        * packet is done, reclaim descriptors
-        */
+       /* Check Rx priority queue interrupts */
+       for (index = 0; index < priv->hw_params->rx_queues; index++) {
+               if (!(priv->irq1_stat & BIT(UMAC_IRQ1_RX_INTR_SHIFT + index)))
+                       continue;
+
+               rx_ring = &priv->rx_rings[index];
+
+               if (likely(napi_schedule_prep(&rx_ring->napi))) {
+                       rx_ring->int_disable(rx_ring);
+                       __napi_schedule(&rx_ring->napi);
+               }
+       }
+
+       /* Check Tx priority queue interrupts */
        for (index = 0; index < priv->hw_params->tx_queues; index++) {
                if (!(priv->irq1_stat & BIT(index)))
                        continue;
 
-               ring = &priv->tx_rings[index];
+               tx_ring = &priv->tx_rings[index];
 
-               if (likely(napi_schedule_prep(&ring->napi))) {
-                       ring->int_disable(priv, ring);
-                       __napi_schedule(&ring->napi);
+               if (likely(napi_schedule_prep(&tx_ring->napi))) {
+                       tx_ring->int_disable(tx_ring);
+                       __napi_schedule(&tx_ring->napi);
                }
        }
 
        return IRQ_HANDLED;
 }
 
-/* bcmgenet_isr0: Handle various interrupts. */
+/* bcmgenet_isr0: handle Rx and Tx default queues + other stuff */
 static irqreturn_t bcmgenet_isr0(int irq, void *dev_id)
 {
        struct bcmgenet_priv *priv = dev_id;
+       struct bcmgenet_rx_ring *rx_ring;
+       struct bcmgenet_tx_ring *tx_ring;
 
        /* Save irq status for bottom-half processing. */
        priv->irq0_stat =
                bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_STAT) &
                ~bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS);
+
        /* clear interrupts */
        bcmgenet_intrl2_0_writel(priv, priv->irq0_stat, INTRL2_CPU_CLEAR);
 
        netif_dbg(priv, intr, priv->dev,
                  "IRQ=0x%x\n", priv->irq0_stat);
 
-       if (priv->irq0_stat & (UMAC_IRQ_RXDMA_BDONE | UMAC_IRQ_RXDMA_PDONE)) {
-               /* We use NAPI(software interrupt throttling, if
-                * Rx Descriptor throttling is not used.
-                * Disable interrupt, will be enabled in the poll method.
-                */
-               if (likely(napi_schedule_prep(&priv->napi))) {
-                       bcmgenet_intrl2_0_writel(priv, UMAC_IRQ_RXDMA_BDONE,
-                                                INTRL2_CPU_MASK_SET);
-                       __napi_schedule(&priv->napi);
+       if (priv->irq0_stat & UMAC_IRQ_RXDMA_DONE) {
+               rx_ring = &priv->rx_rings[DESC_INDEX];
+
+               if (likely(napi_schedule_prep(&rx_ring->napi))) {
+                       rx_ring->int_disable(rx_ring);
+                       __napi_schedule(&rx_ring->napi);
                }
        }
-       if (priv->irq0_stat &
-                       (UMAC_IRQ_TXDMA_BDONE | UMAC_IRQ_TXDMA_PDONE)) {
-               struct bcmgenet_tx_ring *ring = &priv->tx_rings[DESC_INDEX];
 
-               if (likely(napi_schedule_prep(&ring->napi))) {
-                       ring->int_disable(priv, ring);
-                       __napi_schedule(&ring->napi);
+       if (priv->irq0_stat & UMAC_IRQ_TXDMA_DONE) {
+               tx_ring = &priv->tx_rings[DESC_INDEX];
+
+               if (likely(napi_schedule_prep(&tx_ring->napi))) {
+                       tx_ring->int_disable(tx_ring);
+                       __napi_schedule(&tx_ring->napi);
                }
        }
+
        if (priv->irq0_stat & (UMAC_IRQ_PHY_DET_R |
                                UMAC_IRQ_PHY_DET_F |
-                               UMAC_IRQ_LINK_UP |
-                               UMAC_IRQ_LINK_DOWN |
+                               UMAC_IRQ_LINK_EVENT |
                                UMAC_IRQ_HFB_SM |
                                UMAC_IRQ_HFB_MM |
                                UMAC_IRQ_MPD_R)) {
@@ -2227,18 +2451,170 @@ static void bcmgenet_enable_dma(struct bcmgenet_priv *priv, u32 dma_ctrl)
        bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
 }
 
+static bool bcmgenet_hfb_is_filter_enabled(struct bcmgenet_priv *priv,
+                                          u32 f_index)
+{
+       u32 offset;
+       u32 reg;
+
+       offset = HFB_FLT_ENABLE_V3PLUS + (f_index < 32) * sizeof(u32);
+       reg = bcmgenet_hfb_reg_readl(priv, offset);
+       return !!(reg & (1 << (f_index % 32)));
+}
+
+static void bcmgenet_hfb_enable_filter(struct bcmgenet_priv *priv, u32 f_index)
+{
+       u32 offset;
+       u32 reg;
+
+       offset = HFB_FLT_ENABLE_V3PLUS + (f_index < 32) * sizeof(u32);
+       reg = bcmgenet_hfb_reg_readl(priv, offset);
+       reg |= (1 << (f_index % 32));
+       bcmgenet_hfb_reg_writel(priv, reg, offset);
+}
+
+static void bcmgenet_hfb_set_filter_rx_queue_mapping(struct bcmgenet_priv *priv,
+                                                    u32 f_index, u32 rx_queue)
+{
+       u32 offset;
+       u32 reg;
+
+       offset = f_index / 8;
+       reg = bcmgenet_rdma_readl(priv, DMA_INDEX2RING_0 + offset);
+       reg &= ~(0xF << (4 * (f_index % 8)));
+       reg |= ((rx_queue & 0xF) << (4 * (f_index % 8)));
+       bcmgenet_rdma_writel(priv, reg, DMA_INDEX2RING_0 + offset);
+}
+
+static void bcmgenet_hfb_set_filter_length(struct bcmgenet_priv *priv,
+                                          u32 f_index, u32 f_length)
+{
+       u32 offset;
+       u32 reg;
+
+       offset = HFB_FLT_LEN_V3PLUS +
+                ((priv->hw_params->hfb_filter_cnt - 1 - f_index) / 4) *
+                sizeof(u32);
+       reg = bcmgenet_hfb_reg_readl(priv, offset);
+       reg &= ~(0xFF << (8 * (f_index % 4)));
+       reg |= ((f_length & 0xFF) << (8 * (f_index % 4)));
+       bcmgenet_hfb_reg_writel(priv, reg, offset);
+}
+
+static int bcmgenet_hfb_find_unused_filter(struct bcmgenet_priv *priv)
+{
+       u32 f_index;
+
+       for (f_index = 0; f_index < priv->hw_params->hfb_filter_cnt; f_index++)
+               if (!bcmgenet_hfb_is_filter_enabled(priv, f_index))
+                       return f_index;
+
+       return -ENOMEM;
+}
+
+/* bcmgenet_hfb_add_filter
+ *
+ * Add new filter to Hardware Filter Block to match and direct Rx traffic to
+ * desired Rx queue.
+ *
+ * f_data is an array of unsigned 32-bit integers where each 32-bit integer
+ * provides filter data for 2 bytes (4 nibbles) of Rx frame:
+ *
+ * bits 31:20 - unused
+ * bit  19    - nibble 0 match enable
+ * bit  18    - nibble 1 match enable
+ * bit  17    - nibble 2 match enable
+ * bit  16    - nibble 3 match enable
+ * bits 15:12 - nibble 0 data
+ * bits 11:8  - nibble 1 data
+ * bits 7:4   - nibble 2 data
+ * bits 3:0   - nibble 3 data
+ *
+ * Example:
+ * In order to match:
+ * - Ethernet frame type = 0x0800 (IP)
+ * - IP version field = 4
+ * - IP protocol field = 0x11 (UDP)
+ *
+ * The following filter is needed:
+ * u32 hfb_filter_ipv4_udp[] = {
+ *   Rx frame offset 0x00: 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ *   Rx frame offset 0x08: 0x00000000, 0x00000000, 0x000F0800, 0x00084000,
+ *   Rx frame offset 0x10: 0x00000000, 0x00000000, 0x00000000, 0x00030011,
+ * };
+ *
+ * To add the filter to HFB and direct the traffic to Rx queue 0, call:
+ * bcmgenet_hfb_add_filter(priv, hfb_filter_ipv4_udp,
+ *                         ARRAY_SIZE(hfb_filter_ipv4_udp), 0);
+ */
+int bcmgenet_hfb_add_filter(struct bcmgenet_priv *priv, u32 *f_data,
+                           u32 f_length, u32 rx_queue)
+{
+       int f_index;
+       u32 i;
+
+       f_index = bcmgenet_hfb_find_unused_filter(priv);
+       if (f_index < 0)
+               return -ENOMEM;
+
+       if (f_length > priv->hw_params->hfb_filter_size)
+               return -EINVAL;
+
+       for (i = 0; i < f_length; i++)
+               bcmgenet_hfb_writel(priv, f_data[i],
+                       (f_index * priv->hw_params->hfb_filter_size + i) *
+                       sizeof(u32));
+
+       bcmgenet_hfb_set_filter_length(priv, f_index, 2 * f_length);
+       bcmgenet_hfb_set_filter_rx_queue_mapping(priv, f_index, rx_queue);
+       bcmgenet_hfb_enable_filter(priv, f_index);
+       bcmgenet_hfb_reg_writel(priv, 0x1, HFB_CTRL);
+
+       return 0;
+}
+
+/* bcmgenet_hfb_clear
+ *
+ * Clear Hardware Filter Block and disable all filtering.
+ */
+static void bcmgenet_hfb_clear(struct bcmgenet_priv *priv)
+{
+       u32 i;
+
+       bcmgenet_hfb_reg_writel(priv, 0x0, HFB_CTRL);
+       bcmgenet_hfb_reg_writel(priv, 0x0, HFB_FLT_ENABLE_V3PLUS);
+       bcmgenet_hfb_reg_writel(priv, 0x0, HFB_FLT_ENABLE_V3PLUS + 4);
+
+       for (i = DMA_INDEX2RING_0; i <= DMA_INDEX2RING_7; i++)
+               bcmgenet_rdma_writel(priv, 0x0, i);
+
+       for (i = 0; i < (priv->hw_params->hfb_filter_cnt / 4); i++)
+               bcmgenet_hfb_reg_writel(priv, 0x0,
+                                       HFB_FLT_LEN_V3PLUS + i * sizeof(u32));
+
+       for (i = 0; i < priv->hw_params->hfb_filter_cnt *
+                       priv->hw_params->hfb_filter_size; i++)
+               bcmgenet_hfb_writel(priv, 0x0, i * sizeof(u32));
+}
+
+static void bcmgenet_hfb_init(struct bcmgenet_priv *priv)
+{
+       if (GENET_IS_V1(priv) || GENET_IS_V2(priv))
+               return;
+
+       bcmgenet_hfb_clear(priv);
+}
+
 static void bcmgenet_netif_start(struct net_device *dev)
 {
        struct bcmgenet_priv *priv = netdev_priv(dev);
 
        /* Start the network engine */
-       napi_enable(&priv->napi);
+       bcmgenet_enable_rx_napi(priv);
+       bcmgenet_enable_tx_napi(priv);
 
        umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, true);
 
-       if (phy_is_internal(priv->phydev))
-               bcmgenet_power_up(priv, GENET_POWER_PASSIVE);
-
        netif_tx_start_all_queues(dev);
 
        phy_start(priv->phydev);
@@ -2257,6 +2633,12 @@ static int bcmgenet_open(struct net_device *dev)
        if (!IS_ERR(priv->clk))
                clk_prepare_enable(priv->clk);
 
+       /* If this is an internal GPHY, power it back on now, before UniMAC is
+        * brought out of reset as absolutely no UniMAC activity is allowed
+        */
+       if (phy_is_internal(priv->phydev))
+               bcmgenet_power_up(priv, GENET_POWER_PASSIVE);
+
        /* take MAC out of reset */
        bcmgenet_umac_reset(priv);
 
@@ -2286,12 +2668,15 @@ static int bcmgenet_open(struct net_device *dev)
        ret = bcmgenet_init_dma(priv);
        if (ret) {
                netdev_err(dev, "failed to initialize DMA\n");
-               goto err_fini_dma;
+               goto err_clk_disable;
        }
 
        /* Always enable ring 16 - descriptor ring */
        bcmgenet_enable_dma(priv, dma_ctrl);
 
+       /* HFB init */
+       bcmgenet_hfb_init(priv);
+
        ret = request_irq(priv->irq0, bcmgenet_isr0, IRQF_SHARED,
                          dev->name, priv);
        if (ret < 0) {
@@ -2331,10 +2716,10 @@ static void bcmgenet_netif_stop(struct net_device *dev)
        struct bcmgenet_priv *priv = netdev_priv(dev);
 
        netif_tx_stop_all_queues(dev);
-       napi_disable(&priv->napi);
        phy_stop(priv->phydev);
-
        bcmgenet_intr_disable(priv);
+       bcmgenet_disable_rx_napi(priv);
+       bcmgenet_disable_tx_napi(priv);
 
        /* Wait for pending work items to complete. Since interrupts are
         * disabled no new work will be scheduled.
@@ -2377,12 +2762,12 @@ static int bcmgenet_close(struct net_device *dev)
        free_irq(priv->irq1, priv);
 
        if (phy_is_internal(priv->phydev))
-               bcmgenet_power_down(priv, GENET_POWER_PASSIVE);
+               ret = bcmgenet_power_down(priv, GENET_POWER_PASSIVE);
 
        if (!IS_ERR(priv->clk))
                clk_disable_unprepare(priv->clk);
 
-       return 0;
+       return ret;
 }
 
 static void bcmgenet_timeout(struct net_device *dev)
@@ -2499,8 +2884,9 @@ static const struct net_device_ops bcmgenet_netdev_ops = {
 static struct bcmgenet_hw_params bcmgenet_hw_params[] = {
        [GENET_V1] = {
                .tx_queues = 0,
+               .tx_bds_per_q = 0,
                .rx_queues = 0,
-               .bds_cnt = 0,
+               .rx_bds_per_q = 0,
                .bp_in_en_shift = 16,
                .bp_in_mask = 0xffff,
                .hfb_filter_cnt = 16,
@@ -2512,8 +2898,9 @@ static struct bcmgenet_hw_params bcmgenet_hw_params[] = {
        },
        [GENET_V2] = {
                .tx_queues = 4,
-               .rx_queues = 4,
-               .bds_cnt = 32,
+               .tx_bds_per_q = 32,
+               .rx_queues = 0,
+               .rx_bds_per_q = 0,
                .bp_in_en_shift = 16,
                .bp_in_mask = 0xffff,
                .hfb_filter_cnt = 16,
@@ -2528,11 +2915,13 @@ static struct bcmgenet_hw_params bcmgenet_hw_params[] = {
        },
        [GENET_V3] = {
                .tx_queues = 4,
-               .rx_queues = 4,
-               .bds_cnt = 32,
+               .tx_bds_per_q = 32,
+               .rx_queues = 0,
+               .rx_bds_per_q = 0,
                .bp_in_en_shift = 17,
                .bp_in_mask = 0x1ffff,
                .hfb_filter_cnt = 48,
+               .hfb_filter_size = 128,
                .qtag_mask = 0x3F,
                .tbuf_offset = 0x0600,
                .hfb_offset = 0x8000,
@@ -2540,15 +2929,18 @@ static struct bcmgenet_hw_params bcmgenet_hw_params[] = {
                .rdma_offset = 0x10000,
                .tdma_offset = 0x11000,
                .words_per_bd = 2,
-               .flags = GENET_HAS_EXT | GENET_HAS_MDIO_INTR,
+               .flags = GENET_HAS_EXT | GENET_HAS_MDIO_INTR |
+                        GENET_HAS_MOCA_LINK_DET,
        },
        [GENET_V4] = {
                .tx_queues = 4,
-               .rx_queues = 4,
-               .bds_cnt = 32,
+               .tx_bds_per_q = 32,
+               .rx_queues = 0,
+               .rx_bds_per_q = 0,
                .bp_in_en_shift = 17,
                .bp_in_mask = 0x1ffff,
                .hfb_filter_cnt = 48,
+               .hfb_filter_size = 128,
                .qtag_mask = 0x3F,
                .tbuf_offset = 0x0600,
                .hfb_offset = 0x8000,
@@ -2556,7 +2948,8 @@ static struct bcmgenet_hw_params bcmgenet_hw_params[] = {
                .rdma_offset = 0x2000,
                .tdma_offset = 0x4000,
                .words_per_bd = 3,
-               .flags = GENET_HAS_40BITS | GENET_HAS_EXT | GENET_HAS_MDIO_INTR,
+               .flags = GENET_HAS_40BITS | GENET_HAS_EXT |
+                        GENET_HAS_MDIO_INTR | GENET_HAS_MOCA_LINK_DET,
        },
 };
 
@@ -2645,14 +3038,15 @@ static void bcmgenet_set_hw_params(struct bcmgenet_priv *priv)
 #endif
 
        pr_debug("Configuration for version: %d\n"
-               "TXq: %1d, RXq: %1d, BDs: %1d\n"
+               "TXq: %1d, TXqBDs: %1d, RXq: %1d, RXqBDs: %1d\n"
                "BP << en: %2d, BP msk: 0x%05x\n"
                "HFB count: %2d, QTAQ msk: 0x%05x\n"
                "TBUF: 0x%04x, HFB: 0x%04x, HFBreg: 0x%04x\n"
                "RDMA: 0x%05x, TDMA: 0x%05x\n"
                "Words/BD: %d\n",
                priv->version,
-               params->tx_queues, params->rx_queues, params->bds_cnt,
+               params->tx_queues, params->tx_bds_per_q,
+               params->rx_queues, params->rx_bds_per_q,
                params->bp_in_en_shift, params->bp_in_mask,
                params->hfb_filter_cnt, params->qtag_mask,
                params->tbuf_offset, params->hfb_offset,
@@ -2680,8 +3074,9 @@ static int bcmgenet_probe(struct platform_device *pdev)
        struct resource *r;
        int err = -EIO;
 
-       /* Up to GENET_MAX_MQ_CNT + 1 TX queues and a single RX queue */
-       dev = alloc_etherdev_mqs(sizeof(*priv), GENET_MAX_MQ_CNT + 1, 1);
+       /* Up to GENET_MAX_MQ_CNT + 1 TX queues and RX queues */
+       dev = alloc_etherdev_mqs(sizeof(*priv), GENET_MAX_MQ_CNT + 1,
+                                GENET_MAX_MQ_CNT + 1);
        if (!dev) {
                dev_err(&pdev->dev, "can't allocate net device\n");
                return -ENOMEM;
@@ -2727,7 +3122,6 @@ static int bcmgenet_probe(struct platform_device *pdev)
        dev->watchdog_timeo = 2 * HZ;
        dev->ethtool_ops = &bcmgenet_ethtool_ops;
        dev->netdev_ops = &bcmgenet_netdev_ops;
-       netif_napi_add(dev, &priv->napi, bcmgenet_poll, 64);
 
        priv->msg_enable = netif_msg_init(-1, GENET_MSG_DEFAULT);
 
@@ -2860,14 +3254,16 @@ static int bcmgenet_suspend(struct device *d)
 
        /* Prepare the device for Wake-on-LAN and switch to the slow clock */
        if (device_may_wakeup(d) && priv->wolopts) {
-               bcmgenet_power_down(priv, GENET_POWER_WOL_MAGIC);
+               ret = bcmgenet_power_down(priv, GENET_POWER_WOL_MAGIC);
                clk_prepare_enable(priv->clk_wol);
+       } else if (phy_is_internal(priv->phydev)) {
+               ret = bcmgenet_power_down(priv, GENET_POWER_PASSIVE);
        }
 
        /* Turn off the clocks */
        clk_disable_unprepare(priv->clk);
 
-       return 0;
+       return ret;
 }
 
 static int bcmgenet_resume(struct device *d)
@@ -2886,6 +3282,12 @@ static int bcmgenet_resume(struct device *d)
        if (ret)
                return ret;
 
+       /* If this is an internal GPHY, power it back on now, before UniMAC is
+        * brought out of reset as absolutely no UniMAC activity is allowed
+        */
+       if (phy_is_internal(priv->phydev))
+               bcmgenet_power_up(priv, GENET_POWER_PASSIVE);
+
        bcmgenet_umac_reset(priv);
 
        ret = init_umac(priv);
index 0d370d168aee0ea24924fbc3afc011f3a8697841..6f2887a5e0be693d625b6328349a2ad3b66d19ba 100644 (file)
@@ -293,6 +293,7 @@ struct bcmgenet_mib_counters {
 #define UMAC_IRQ_PHY_DET_F             (1 << 3)
 #define UMAC_IRQ_LINK_UP               (1 << 4)
 #define UMAC_IRQ_LINK_DOWN             (1 << 5)
+#define UMAC_IRQ_LINK_EVENT            (UMAC_IRQ_LINK_UP | UMAC_IRQ_LINK_DOWN)
 #define UMAC_IRQ_UMAC                  (1 << 6)
 #define UMAC_IRQ_UMAC_TSV              (1 << 7)
 #define UMAC_IRQ_TBUF_UNDERRUN         (1 << 8)
@@ -303,13 +304,22 @@ struct bcmgenet_mib_counters {
 #define UMAC_IRQ_RXDMA_MBDONE          (1 << 13)
 #define UMAC_IRQ_RXDMA_PDONE           (1 << 14)
 #define UMAC_IRQ_RXDMA_BDONE           (1 << 15)
+#define UMAC_IRQ_RXDMA_DONE            (UMAC_IRQ_RXDMA_PDONE | \
+                                        UMAC_IRQ_RXDMA_BDONE)
 #define UMAC_IRQ_TXDMA_MBDONE          (1 << 16)
 #define UMAC_IRQ_TXDMA_PDONE           (1 << 17)
 #define UMAC_IRQ_TXDMA_BDONE           (1 << 18)
+#define UMAC_IRQ_TXDMA_DONE            (UMAC_IRQ_TXDMA_PDONE | \
+                                        UMAC_IRQ_TXDMA_BDONE)
 /* Only valid for GENETv3+ */
 #define UMAC_IRQ_MDIO_DONE             (1 << 23)
 #define UMAC_IRQ_MDIO_ERROR            (1 << 24)
 
+/* INTRL2 instance 1 definitions */
+#define UMAC_IRQ1_TX_INTR_MASK         0xFFFF
+#define UMAC_IRQ1_RX_INTR_MASK         0xFFFF
+#define UMAC_IRQ1_RX_INTR_SHIFT                16
+
 /* Register block offsets */
 #define GENET_SYS_OFF                  0x0000
 #define GENET_GR_BRIDGE_OFF            0x0040
@@ -354,6 +364,7 @@ struct bcmgenet_mib_counters {
 #define EXT_GPHY_CTRL                  0x1C
 #define  EXT_CFG_IDDQ_BIAS             (1 << 0)
 #define  EXT_CFG_PWR_DOWN              (1 << 1)
+#define  EXT_CK25_DIS                  (1 << 4)
 #define  EXT_GPHY_RESET                        (1 << 5)
 
 /* DMA rings size */
@@ -497,17 +508,20 @@ enum bcmgenet_version {
 #define GENET_HAS_40BITS       (1 << 0)
 #define GENET_HAS_EXT          (1 << 1)
 #define GENET_HAS_MDIO_INTR    (1 << 2)
+#define GENET_HAS_MOCA_LINK_DET        (1 << 3)
 
 /* BCMGENET hardware parameters, keep this structure nicely aligned
  * since it is going to be used in hot paths
  */
 struct bcmgenet_hw_params {
        u8              tx_queues;
+       u8              tx_bds_per_q;
        u8              rx_queues;
-       u8              bds_cnt;
+       u8              rx_bds_per_q;
        u8              bp_in_en_shift;
        u32             bp_in_mask;
        u8              hfb_filter_cnt;
+       u8              hfb_filter_size;
        u8              qtag_mask;
        u16             tbuf_offset;
        u32             hfb_offset;
@@ -525,16 +539,30 @@ struct bcmgenet_tx_ring {
        unsigned int    queue;          /* queue index */
        struct enet_cb  *cbs;           /* tx ring buffer control block*/
        unsigned int    size;           /* size of each tx ring */
+       unsigned int    clean_ptr;      /* Tx ring clean pointer */
        unsigned int    c_index;        /* last consumer index of each ring*/
        unsigned int    free_bds;       /* # of free bds for each ring */
        unsigned int    write_ptr;      /* Tx ring write pointer SW copy */
        unsigned int    prod_index;     /* Tx ring producer index SW copy */
        unsigned int    cb_ptr;         /* Tx ring initial CB ptr */
        unsigned int    end_ptr;        /* Tx ring end CB ptr */
-       void (*int_enable)(struct bcmgenet_priv *priv,
-                          struct bcmgenet_tx_ring *);
-       void (*int_disable)(struct bcmgenet_priv *priv,
-                           struct bcmgenet_tx_ring *);
+       void (*int_enable)(struct bcmgenet_tx_ring *);
+       void (*int_disable)(struct bcmgenet_tx_ring *);
+       struct bcmgenet_priv *priv;
+};
+
+struct bcmgenet_rx_ring {
+       struct napi_struct napi;        /* Rx NAPI struct */
+       unsigned int    index;          /* Rx ring index */
+       struct enet_cb  *cbs;           /* Rx ring buffer control block */
+       unsigned int    size;           /* Rx ring size */
+       unsigned int    c_index;        /* Rx last consumer index */
+       unsigned int    read_ptr;       /* Rx ring read pointer */
+       unsigned int    cb_ptr;         /* Rx ring initial CB ptr */
+       unsigned int    end_ptr;        /* Rx ring end CB ptr */
+       unsigned int    old_discards;
+       void (*int_enable)(struct bcmgenet_rx_ring *);
+       void (*int_disable)(struct bcmgenet_rx_ring *);
        struct bcmgenet_priv *priv;
 };
 
@@ -543,11 +571,6 @@ struct bcmgenet_priv {
        void __iomem *base;
        enum bcmgenet_version version;
        struct net_device *dev;
-       u32 int0_mask;
-       u32 int1_mask;
-
-       /* NAPI for descriptor based rx */
-       struct napi_struct napi ____cacheline_aligned;
 
        /* transmit variables */
        void __iomem *tx_bds;
@@ -558,13 +581,11 @@ struct bcmgenet_priv {
 
        /* receive variables */
        void __iomem *rx_bds;
-       void __iomem *rx_bd_assign_ptr;
-       int rx_bd_assign_index;
        struct enet_cb *rx_cbs;
        unsigned int num_rx_bds;
        unsigned int rx_buf_len;
-       unsigned int rx_read_ptr;
-       unsigned int rx_c_index;
+
+       struct bcmgenet_rx_ring rx_rings[DESC_INDEX + 1];
 
        /* other misc variables */
        struct bcmgenet_hw_params *hw_params;
@@ -651,6 +672,7 @@ int bcmgenet_mii_init(struct net_device *dev);
 int bcmgenet_mii_config(struct net_device *dev, bool init);
 void bcmgenet_mii_exit(struct net_device *dev);
 void bcmgenet_mii_reset(struct net_device *dev);
+void bcmgenet_phy_power_set(struct net_device *dev, bool enable);
 void bcmgenet_mii_setup(struct net_device *dev);
 
 /* Wake-on-LAN routines */
index 446889cc3c6a207ebe4ef8a55ccee6a556e2fae4..e7651b3c6c5767f7609115ef0430c13aac8d17a9 100644 (file)
@@ -168,7 +168,7 @@ void bcmgenet_mii_reset(struct net_device *dev)
        }
 }
 
-static void bcmgenet_ephy_power_up(struct net_device *dev)
+void bcmgenet_phy_power_set(struct net_device *dev, bool enable)
 {
        struct bcmgenet_priv *priv = netdev_priv(dev);
        u32 reg = 0;
@@ -178,14 +178,25 @@ static void bcmgenet_ephy_power_up(struct net_device *dev)
                return;
 
        reg = bcmgenet_ext_readl(priv, EXT_GPHY_CTRL);
-       reg &= ~(EXT_CFG_IDDQ_BIAS | EXT_CFG_PWR_DOWN);
-       reg |= EXT_GPHY_RESET;
-       bcmgenet_ext_writel(priv, reg, EXT_GPHY_CTRL);
-       mdelay(2);
+       if (enable) {
+               reg &= ~EXT_CK25_DIS;
+               bcmgenet_ext_writel(priv, reg, EXT_GPHY_CTRL);
+               mdelay(1);
+
+               reg &= ~(EXT_CFG_IDDQ_BIAS | EXT_CFG_PWR_DOWN);
+               reg |= EXT_GPHY_RESET;
+               bcmgenet_ext_writel(priv, reg, EXT_GPHY_CTRL);
+               mdelay(1);
 
-       reg &= ~EXT_GPHY_RESET;
+               reg &= ~EXT_GPHY_RESET;
+       } else {
+               reg |= EXT_CFG_IDDQ_BIAS | EXT_CFG_PWR_DOWN | EXT_GPHY_RESET;
+               bcmgenet_ext_writel(priv, reg, EXT_GPHY_CTRL);
+               mdelay(1);
+               reg |= EXT_CK25_DIS;
+       }
        bcmgenet_ext_writel(priv, reg, EXT_GPHY_CTRL);
-       udelay(20);
+       udelay(60);
 }
 
 static void bcmgenet_internal_phy_setup(struct net_device *dev)
@@ -193,8 +204,8 @@ static void bcmgenet_internal_phy_setup(struct net_device *dev)
        struct bcmgenet_priv *priv = netdev_priv(dev);
        u32 reg;
 
-       /* Power up EPHY */
-       bcmgenet_ephy_power_up(dev);
+       /* Power up PHY */
+       bcmgenet_phy_power_set(dev, true);
        /* enable APD */
        reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
        reg |= EXT_PWR_DN_EN_LD;
@@ -451,6 +462,15 @@ static int bcmgenet_mii_of_init(struct bcmgenet_priv *priv)
        return 0;
 }
 
+static int bcmgenet_fixed_phy_link_update(struct net_device *dev,
+                                         struct fixed_phy_status *status)
+{
+       if (dev && dev->phydev && status)
+               status->link = dev->phydev->link;
+
+       return 0;
+}
+
 static int bcmgenet_mii_pd_init(struct bcmgenet_priv *priv)
 {
        struct device *kdev = &priv->pdev->dev;
@@ -502,6 +522,13 @@ static int bcmgenet_mii_pd_init(struct bcmgenet_priv *priv)
                        dev_err(kdev, "failed to register fixed PHY device\n");
                        return -ENODEV;
                }
+
+               if (priv->hw_params->flags & GENET_HAS_MOCA_LINK_DET) {
+                       ret = fixed_phy_set_link_update(
+                               phydev, bcmgenet_fixed_phy_link_update);
+                       if (!ret)
+                               phydev->link = 0;
+               }
        }
 
        priv->phydev = phydev;
index 23a019cee279af1e502d05dc5f2363372d35b2ce..1270b189a9a2ffd7776985f8e0a96fada80f8de8 100644 (file)
@@ -6217,10 +6217,9 @@ static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
        return 0;
 }
 
-static int tg3_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
+static int tg3_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
 {
        u64 ns;
-       u32 remainder;
        struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
 
        tg3_full_lock(tp, 0);
@@ -6228,19 +6227,18 @@ static int tg3_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
        ns += tp->ptp_adjust;
        tg3_full_unlock(tp);
 
-       ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder);
-       ts->tv_nsec = remainder;
+       *ts = ns_to_timespec64(ns);
 
        return 0;
 }
 
 static int tg3_ptp_settime(struct ptp_clock_info *ptp,
-                          const struct timespec *ts)
+                          const struct timespec64 *ts)
 {
        u64 ns;
        struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
 
-       ns = timespec_to_ns(ts);
+       ns = timespec64_to_ns(ts);
 
        tg3_full_lock(tp, 0);
        tg3_refclk_write(tp, ns);
@@ -6320,8 +6318,8 @@ static const struct ptp_clock_info tg3_ptp_caps = {
        .pps            = 0,
        .adjfreq        = tg3_ptp_adjfreq,
        .adjtime        = tg3_ptp_adjtime,
-       .gettime        = tg3_ptp_gettime,
-       .settime        = tg3_ptp_settime,
+       .gettime64      = tg3_ptp_gettime,
+       .settime64      = tg3_ptp_settime,
        .enable         = tg3_ptp_enable,
 };
 
@@ -7244,7 +7242,7 @@ static int tg3_poll_msix(struct napi_struct *napi, int budget)
                        if (tnapi == &tp->napi[1] && tp->rx_refill)
                                continue;
 
-                       napi_complete(napi);
+                       napi_complete_done(napi, work_done);
                        /* Reenable interrupts. */
                        tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
 
@@ -7337,7 +7335,7 @@ static int tg3_poll(struct napi_struct *napi, int budget)
                        sblk->status &= ~SD_STATUS_UPDATED;
 
                if (likely(!tg3_has_work(tnapi))) {
-                       napi_complete(napi);
+                       napi_complete_done(napi, work_done);
                        tg3_int_reenable(tnapi);
                        break;
                }
index 2641557788575e6fec7ed26a2161e35a5eedf9ab..4e8c0b6c57d02eedb02330322455633cecbd7146 100644 (file)
@@ -1,9 +1,9 @@
 #
-# Brocade device configuration
+# QLogic BR-series device configuration
 #
 
 config NET_VENDOR_BROCADE
-       bool "Brocade devices"
+       bool "QLogic BR-series devices"
        default y
        depends on PCI
        ---help---
@@ -13,8 +13,8 @@ config NET_VENDOR_BROCADE
 
          Note that the answer to this question doesn't directly affect the
          kernel: saying N will just cause the configurator to skip all
-         the questions about Brocade cards. If you say Y, you will be asked for
-         your specific card in the following questions.
+         the questions about QLogic BR-series cards. If you say Y, you will be
+         asked for your specific card in the following questions.
 
 if NET_VENDOR_BROCADE
 
index b58238d2df6a1a92cd0e6801aa9d0c1ae56ab3b3..fec10f9b45582246afa35dfc1049fef243aaebf8 100644 (file)
@@ -1,5 +1,5 @@
 #
-# Makefile for the Brocade device drivers.
+# Makefile for the QLogic BR-series device drivers.
 #
 
 obj-$(CONFIG_BNA) += bna/
index dc2eb526fbf7d4264a7996212de725a9577dd4ec..fe01279a8843cedbd703f3b05657026ab686ceac 100644 (file)
@@ -1,17 +1,17 @@
 #
-# Brocade network device configuration
+# QLogic BR-series network device configuration
 #
 
 config BNA
-       tristate "Brocade 1010/1020 10Gb Ethernet Driver support"
+       tristate "QLogic BR-series 1010/1020/1860 10Gb Ethernet Driver support"
        depends on PCI
        ---help---
-         This driver supports Brocade 1010/1020 10Gb CEE capable Ethernet
-         cards.
+         This driver supports QLogic BR-series 1010/1020/1860 10Gb CEE capable
+         Ethernet cards.
          To compile this driver as a module, choose M here: the module
          will be called bna.
 
-         For general information and support, go to the Brocade support
+         For general information and support, go to the QLogic support
          website at:
 
-         <http://support.brocade.com>
+         <http://support.qlogic.com>
index 6027302ae73aca095e29e6ee44e7808aa38cc973..6e10b99733a230560ff8e6e218323834735c9a0a 100644 (file)
@@ -1,5 +1,6 @@
 #
-# Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+# Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
+# Copyright (c) 2014-2015 QLogic Corporation.
 # All rights reserved.
 #
 
index 550d2521ba76a4c32b68beced3199f53ef36e8f0..cf9f3956f198f3332a30f9cd397a2d5ebc7bfc4b 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Linux network driver for Brocade Converged Network Adapter.
+ * Linux network driver for QLogic BR-series Converged Network Adapter.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License (GPL) Version 2 as
  * General Public License for more details.
  */
 /*
- * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
+ * Copyright (c) 2014-2015 QLogic Corporation
  * All rights reserved
- * www.brocade.com
+ * www.qlogic.com
  */
 
 #include "bfa_cee.h"
index 93fde633d6f33762eae9691610ae27e17872f705..d04eef5d5a770eb7b479c11a5ab5a940d5cc27b1 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Linux network driver for Brocade Converged Network Adapter.
+ * Linux network driver for QLogic BR-series Converged Network Adapter.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License (GPL) Version 2 as
  * General Public License for more details.
  */
 /*
- * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
+ * Copyright (c) 2014-2015 QLogic Corporation
  * All rights reserved
- * www.brocade.com
+ * www.qlogic.com
  */
 
 #ifndef __BFA_CEE_H__
index ad004a4c3897ecb3925c47da387a9985a13c51d4..af25d8e8fae01f1ea54f428f87ac796cf08bb50f 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Linux network driver for Brocade Converged Network Adapter.
+ * Linux network driver for QLogic BR-series Converged Network Adapter.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License (GPL) Version 2 as
  * General Public License for more details.
  */
 /*
- * Copyright (c) 2005-2011 Brocade Communications Systems, Inc.
+ * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
+ * Copyright (c) 2014-2015 QLogic Corporation
  * All rights reserved
- * www.brocade.com
+ * www.qlogic.com
  */
 
 /* BFA common services */
index b7d8127c198f7ef92f6e9f74201041464da3f64b..3bfd9da92630edb450ad586798f36a76bb48335b 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Linux network driver for Brocade Converged Network Adapter.
+ * Linux network driver for QLogic BR-series Converged Network Adapter.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License (GPL) Version 2 as
  * General Public License for more details.
  */
 /*
- * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
+ * Copyright (c) 2014-2015 QLogic Corporation
  * All rights reserved
- * www.brocade.com
+ * www.qlogic.com
  */
 
 #ifndef __BFA_DEFS_H__
index b39c5f23974b0bbdeec349d097e5be0b48147a47..a37326d44fbb3c9fc41852e6d177fad2ec85f755 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Linux network driver for Brocade Converged Network Adapter.
+ * Linux network driver for QLogic BR-series Converged Network Adapter.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License (GPL) Version 2 as
  * General Public License for more details.
  */
 /*
- * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
+ * Copyright (c) 2014-2015 QLogic Corporation
  * All rights reserved
- * www.brocade.com
+ * www.qlogic.com
  */
 #ifndef __BFA_DEFS_CNA_H__
 #define __BFA_DEFS_CNA_H__
@@ -134,7 +135,7 @@ struct bfa_cee_lldp_str {
        u8 value[BFA_CEE_LLDP_MAX_STRING_LEN];
 };
 
-/* LLDP paramters */
+/* LLDP parameters */
 struct bfa_cee_lldp_cfg {
        struct bfa_cee_lldp_str chassis_id;
        struct bfa_cee_lldp_str port_id;
index 7fb396fe679d4daf1e2b7dc616a87f6dc8924385..7a45cd0b594d71adf1e1eb72bd482567b2a041c8 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Linux network driver for Brocade Converged Network Adapter.
+ * Linux network driver for QLogic BR-series Converged Network Adapter.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License (GPL) Version 2 as
  * General Public License for more details.
  */
 /*
- * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
+ * Copyright (c) 2014-2015 QLogic Corporation
  * All rights reserved
- * www.brocade.com
+ * www.qlogic.com
  */
 #ifndef __BFA_DEFS_MFG_COMM_H__
 #define __BFA_DEFS_MFG_COMM_H__
index ea9af9ae754d244d7bf26b65889b1cf4d88424a0..a43b56002752fd25627deffe147bb63ff590bfa9 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Linux network driver for Brocade Converged Network Adapter.
+ * Linux network driver for QLogic BR-series Converged Network Adapter.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License (GPL) Version 2 as
  * General Public License for more details.
  */
 /*
- * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
+ * Copyright (c) 2014-2015 QLogic Corporation
  * All rights reserved
- * www.brocade.com
+ * www.qlogic.com
  */
 #ifndef __BFA_DEFS_STATUS_H__
 #define __BFA_DEFS_STATUS_H__
index 354ae9792badb329e89b0ab37f59ff78c73efd11..594a2ab36d3175de2633490eec1e0395dbb74e59 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Linux network driver for Brocade Converged Network Adapter.
+ * Linux network driver for QLogic BR-series Converged Network Adapter.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License (GPL) Version 2 as
  * General Public License for more details.
  */
 /*
- * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
+ * Copyright (c) 2014-2015 QLogic Corporation
  * All rights reserved
- * www.brocade.com
+ * www.qlogic.com
  */
 
 #include "bfa_ioc.h"
@@ -1339,7 +1340,7 @@ bfa_ioc_fwver_md5_check(struct bfi_ioc_image_hdr *fwhdr_1,
        return true;
 }
 
-/* Returns TRUE if major minor and maintainence are same.
+/* Returns TRUE if major minor and maintenance are same.
  * If patch version are same, check for MD5 Checksum to be same.
  */
 static bool
@@ -2763,7 +2764,7 @@ bfa_nw_ioc_notify_register(struct bfa_ioc *ioc,
        list_add_tail(&notify->qe, &ioc->notify_q);
 }
 
-#define BFA_MFG_NAME "Brocade"
+#define BFA_MFG_NAME "QLogic"
 static void
 bfa_ioc_get_adapter_attr(struct bfa_ioc *ioc,
                         struct bfa_adapter_attr *ad_attr)
index 20cff7df4b55f726df624d6b43dccc347f0aad2e..effb7156e7a4799d606627340312e00b5bef9617 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Linux network driver for Brocade Converged Network Adapter.
+ * Linux network driver for QLogic BR-series Converged Network Adapter.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License (GPL) Version 2 as
  * General Public License for more details.
  */
 /*
- * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
+ * Copyright (c) 2014-2015 QLogic Corporation
  * All rights reserved
- * www.brocade.com
+ * www.qlogic.com
  */
 
 #ifndef __BFA_IOC_H__
index d639558455cb1a68c97c9a8f3cc84b577e2e0a9c..2e72445dbb4f661748d0e90597889c2e92fc5b8d 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Linux network driver for Brocade Converged Network Adapter.
+ * Linux network driver for QLogic BR-series Converged Network Adapter.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License (GPL) Version 2 as
  * General Public License for more details.
  */
 /*
- * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
+ * Copyright (c) 2014-2015 QLogic Corporation
  * All rights reserved
- * www.brocade.com
+ * www.qlogic.com
  */
 
 #include "bfa_ioc.h"
@@ -698,7 +699,7 @@ bfa_ioc_ct2_sclk_init(void __iomem *rb)
 
        /*
         * Ignore mode and program for the max clock (which is FC16)
-        * Firmware/NFC will do the PLL init appropiately
+        * Firmware/NFC will do the PLL init appropriately
         */
        r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
        r32 &= ~(__APP_PLL_SCLK_REFCLK_SEL | __APP_PLL_SCLK_CLK_DIV2);
index 55067d0d25cfd3ab0ab541e3c7ac94318cfd3498..c07d5b9372f46af0e80a4e478fa432b959c384cf 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Linux network driver for Brocade Converged Network Adapter.
+ * Linux network driver for QLogic BR-series Converged Network Adapter.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License (GPL) Version 2 as
  * General Public License for more details.
  */
 /*
- * Copyright (c) 2005-2011 Brocade Communications Systems, Inc.
+ * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
+ * Copyright (c) 2014-2015 QLogic Corporation
  * All rights reserved
- * www.brocade.com
+ * www.qlogic.com
  */
 
 /* MSGQ module source file. */
index a6a565a366dca5401616e38ffc2702ccb6500a5d..66bc8b5acd57d55160d8dac9786b3873167b215a 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Linux network driver for Brocade Converged Network Adapter.
+ * Linux network driver for QLogic BR-series Converged Network Adapter.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License (GPL) Version 2 as
  * General Public License for more details.
  */
 /*
- * Copyright (c) 2005-2011 Brocade Communications Systems, Inc.
+ * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
+ * Copyright (c) 2014-2015 QLogic Corporation
  * All rights reserved
- * www.brocade.com
+ * www.qlogic.com
  */
 
 #ifndef __BFA_MSGQ_H__
index 8c563a77cdf6a237c02bb599f27eada95bb61dde..2bcde4042268add6440deaca93a3255740e54a16 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Linux network driver for Brocade Converged Network Adapter.
+ * Linux network driver for QLogic BR-series Converged Network Adapter.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License (GPL) Version 2 as
  * General Public License for more details.
  */
 /*
- * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
+ * Copyright (c) 2014-2015 QLogic Corporation
  * All rights reserved
- * www.brocade.com
+ * www.qlogic.com
  */
 #ifndef __BFI_H__
 #define __BFI_H__
@@ -158,8 +159,8 @@ enum bfi_asic_gen {
 };
 
 enum bfi_asic_mode {
-       BFI_ASIC_MODE_FC        = 1,    /* FC upto 8G speed             */
-       BFI_ASIC_MODE_FC16      = 2,    /* FC upto 16G speed            */
+       BFI_ASIC_MODE_FC        = 1,    /* FC up to 8G speed            */
+       BFI_ASIC_MODE_FC16      = 2,    /* FC up to 16G speed           */
        BFI_ASIC_MODE_ETH       = 3,    /* Ethernet ports               */
        BFI_ASIC_MODE_COMBO     = 4,    /* FC 16G and Ethernet 10G port */
 };
index 6704a4392973b2721adcc771c004de46826c12c5..bd605bee72eea83de3c785d822a25216f4b2eb16 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Linux network driver for Brocade Converged Network Adapter.
+ * Linux network driver for QLogic BR-series Converged Network Adapter.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License (GPL) Version 2 as
  * General Public License for more details.
  */
 /*
- * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
+ * Copyright (c) 2014-2015 QLogic Corporation
  * All rights reserved
- * www.brocade.com
+ * www.qlogic.com
  */
 #ifndef __BFI_CNA_H__
 #define __BFI_CNA_H__
index ae072dc5d238eeba86671d1d8e36aacd6542887c..bccca3bbadb85c4edacb2f6e439a35e4541d64d7 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Linux network driver for Brocade Converged Network Adapter.
+ * Linux network driver for QLogic BR-series Converged Network Adapter.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License (GPL) Version 2 as
  * General Public License for more details.
  */
 /*
- * Copyright (c) 2005-2011 Brocade Communications Systems, Inc.
+ * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
+ * Copyright (c) 2014-2015 QLogic Corporation
  * All rights reserved
- * www.brocade.com
+ * www.qlogic.com
  */
 
 /* BNA Hardware and Firmware Interface */
index c49fa312ddbd61225c439be655bbe93cdd2afaa3..2835b51eabecca707507886954d503f461502e86 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Linux network driver for Brocade Converged Network Adapter.
+ * Linux network driver for QLogic BR-series Converged Network Adapter.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License (GPL) Version 2 as
  * General Public License for more details.
  */
 /*
- * Copyright (c) 2005-2011 Brocade Communications Systems, Inc.
+ * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
+ * Copyright (c) 2014-2015 QLogic Corporation
  * All rights reserved
- * www.brocade.com
+ * www.qlogic.com
  */
 
 /*
- * bfi_reg.h ASIC register defines for all Brocade adapter ASICs
+ * bfi_reg.h ASIC register defines for all QLogic BR-series adapter ASICs
  */
 
 #ifndef __BFI_REG_H__
@@ -221,7 +222,7 @@ enum {
 #define __PMM_1T_RESET_P               0x00000001
 #define PMM_1T_RESET_REG_P1            0x00023c1c
 
-/* Brocade 1860 Adapter specific defines */
+/* QLogic BR-series 1860 Adapter specific defines */
 #define CT2_PCI_CPQ_BASE               0x00030000
 #define CT2_PCI_APP_BASE               0x00030100
 #define CT2_PCI_ETH_BASE               0x00030400
@@ -264,7 +265,7 @@ enum {
 #define CT2_HOSTFN_MSIX_VT_INDEX_MBOX_ERR      (CT2_PCI_APP_BASE + 0x38)
 
 /*
- * Brocade 1860 adapter CPQ block registers
+ * QLogic BR-series 1860 adapter CPQ block registers
  */
 #define CT2_HOSTFN_LPU0_MBOX0          (CT2_PCI_CPQ_BASE + 0x00)
 #define CT2_HOSTFN_LPU1_MBOX0          (CT2_PCI_CPQ_BASE + 0x20)
index 1f512190d696dca54919ac7896e0cbb82441e86f..8ba72b1f36d9f249883b48f07727d4779f83399c 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Linux network driver for Brocade Converged Network Adapter.
+ * Linux network driver for QLogic BR-series Converged Network Adapter.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License (GPL) Version 2 as
  * General Public License for more details.
  */
 /*
- * Copyright (c) 2005-2011 Brocade Communications Systems, Inc.
+ * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
+ * Copyright (c) 2014-2015 QLogic Corporation
  * All rights reserved
- * www.brocade.com
+ * www.qlogic.com
  */
 #ifndef __BNA_H__
 #define __BNA_H__
index 903466ef41c06ed4f9812b754f245f1c32644b2f..deb8da6ab9cc4a4fcb434a8d8d819203b397454f 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Linux network driver for Brocade Converged Network Adapter.
+ * Linux network driver for QLogic BR-series Converged Network Adapter.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License (GPL) Version 2 as
  * General Public License for more details.
  */
 /*
- * Copyright (c) 2005-2011 Brocade Communications Systems, Inc.
+ * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
+ * Copyright (c) 2014-2015 QLogic Corporation
  * All rights reserved
- * www.brocade.com
+ * www.qlogic.com
  */
 #include "bna.h"
 
index 2702d02e98d99632c90ff87a70bf243cbed4577d..174af0e9d05611fa40a54a6a774aa1a21cfa1ab1 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Linux network driver for Brocade Converged Network Adapter.
+ * Linux network driver for QLogic BR-series Converged Network Adapter.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License (GPL) Version 2 as
  * General Public License for more details.
  */
 /*
- * Copyright (c) 2005-2011 Brocade Communications Systems, Inc.
+ * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
+ * Copyright (c) 2014-2015 QLogic Corporation
  * All rights reserved
- * www.brocade.com
+ * www.qlogic.com
  */
 
 /* File for interrupt macros and functions */
@@ -362,7 +363,7 @@ struct bna_txq_wi_vector {
 
 /*  TxQ Entry Structure
  *
- *  BEWARE:  Load values into this structure with correct endianess.
+ *  BEWARE:  Load values into this structure with correct endianness.
  */
 struct bna_txq_entry {
        union {
index 5fac411c52f43ef4c8365a306b216e002c2aee0d..8ab3a5f62706c1463bcc86ab54d3a5ad58afca93 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Linux network driver for Brocade Converged Network Adapter.
+ * Linux network driver for QLogic BR-series Converged Network Adapter.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License (GPL) Version 2 as
  * General Public License for more details.
   */
 /*
- * Copyright (c) 2005-2011 Brocade Communications Systems, Inc.
+ * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
+ * Copyright (c) 2014-2015 QLogic Corporation
  * All rights reserved
- * www.brocade.com
+ * www.qlogic.com
  */
 #include "bna.h"
 #include "bfi.h"
index 621547cd3504323d0b66944b9580f0d291fa09d0..d0a7a566f5d656a58febc0cdbfdf0fc8ffd92e1c 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Linux network driver for Brocade Converged Network Adapter.
+ * Linux network driver for QLogic BR-series Converged Network Adapter.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License (GPL) Version 2 as
  * General Public License for more details.
  */
 /*
- * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
+ * Copyright (c) 2014-2015 QLogic Corporation
  * All rights reserved
- * www.brocade.com
+ * www.qlogic.com
  */
 #ifndef __BNA_TYPES_H__
 #define __BNA_TYPES_H__
index 7714d7790089cc1c35aa37511d07ccce7d27dfa3..37072a83f9d6d0afb29de683051e13af94a78fd8 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Linux network driver for Brocade Converged Network Adapter.
+ * Linux network driver for QLogic BR-series Converged Network Adapter.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License (GPL) Version 2 as
  * General Public License for more details.
  */
 /*
- * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
+ * Copyright (c) 2014-2015 QLogic Corporation
  * All rights reserved
- * www.brocade.com
+ * www.qlogic.com
  */
 #include <linux/bitops.h>
 #include <linux/netdevice.h>
@@ -3867,7 +3868,7 @@ bnad_module_init(void)
 {
        int err;
 
-       pr_info("Brocade 10G Ethernet driver - version: %s\n",
+       pr_info("QLogic BR-series 10G Ethernet driver - version: %s\n",
                        BNAD_VERSION);
 
        bfa_nw_ioc_auto_recover(bnad_ioc_auto_recover);
@@ -3894,7 +3895,7 @@ module_exit(bnad_module_exit);
 
 MODULE_AUTHOR("Brocade");
 MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("Brocade 10G PCIe Ethernet driver");
+MODULE_DESCRIPTION("QLogic BR-series 10G PCIe Ethernet driver");
 MODULE_VERSION(BNAD_VERSION);
 MODULE_FIRMWARE(CNA_FW_FILE_CT);
 MODULE_FIRMWARE(CNA_FW_FILE_CT2);
index 2842c188e0da52c65493e39358dd41698720e90e..7ead6c23edb65ce2d9fa9351b02e3ced609bbe30 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Linux network driver for Brocade Converged Network Adapter.
+ * Linux network driver for QLogic BR-series Converged Network Adapter.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License (GPL) Version 2 as
  * General Public License for more details.
  */
 /*
- * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
+ * Copyright (c) 2014-2015 QLogic Corporation
  * All rights reserved
- * www.brocade.com
+ * www.qlogic.com
  */
 #ifndef __BNAD_H__
 #define __BNAD_H__
@@ -71,7 +72,7 @@ struct bnad_rx_ctrl {
 #define BNAD_NAME                      "bna"
 #define BNAD_NAME_LEN                  64
 
-#define BNAD_VERSION                   "3.2.23.0"
+#define BNAD_VERSION                   "3.2.25.1"
 
 #define BNAD_MAILBOX_MSIX_INDEX                0
 #define BNAD_MAILBOX_MSIX_VECTORS      1
index 619083a860a4b4bfc64518619202aa515c415b71..72c89550417c71f5f67b2c1b7c4c8fa2afe7b475 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Linux network driver for Brocade Converged Network Adapter.
+ * Linux network driver for QLogic BR-series Converged Network Adapter.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License (GPL) Version 2 as
  * General Public License for more details.
  */
 /*
- * Copyright (c) 2005-2011 Brocade Communications Systems, Inc.
+ * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
+ * Copyright (c) 2014-2015 QLogic Corporation
  * All rights reserved
- * www.brocade.com
+ * www.qlogic.com
  */
 
 #include <linux/debugfs.h>
index d26adac6ab99e8eb91e90ddded98536493b04597..12f344debd1c4f885efdeeea55d693e7fc790be2 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Linux network driver for Brocade Converged Network Adapter.
+ * Linux network driver for QLogic BR-series Converged Network Adapter.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License (GPL) Version 2 as
  * General Public License for more details.
  */
 /*
- * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
+ * Copyright (c) 2014-2015 QLogic Corporation
  * All rights reserved
- * www.brocade.com
+ * www.qlogic.com
  */
 
 #include "cna.h"
index b3ff6d507951bfd99ce559e91607ac0a5402e4b1..28e7d0ffeab18e427e5bae07d41527fc7ce1648d 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Linux network driver for Brocade Converged Network Adapter.
+ * Linux network driver for QLogic BR-series Converged Network Adapter.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License (GPL) Version 2 as
  * General Public License for more details.
  */
 /*
- * Copyright (c) 2006-2010 Brocade Communications Systems, Inc.
+ * Copyright (c) 2006-2014 Brocade Communications Systems, Inc.
+ * Copyright (c) 2014-2015 QLogic Corporation
  * All rights reserved
- * www.brocade.com
+ * www.qlogic.com
  */
 
 #ifndef __CNA_H__
@@ -37,8 +38,8 @@
 
 extern char bfa_version[];
 
-#define CNA_FW_FILE_CT "ctfw-3.2.3.0.bin"
-#define CNA_FW_FILE_CT2        "ct2fw-3.2.3.0.bin"
+#define CNA_FW_FILE_CT "ctfw-3.2.5.1.bin"
+#define CNA_FW_FILE_CT2        "ct2fw-3.2.5.1.bin"
 #define FC_SYMNAME_MAX 256     /*!< max name server symbolic name size */
 
 #pragma pack(1)
index 6f72771caea66a6de0d3380b6edcc83cccbd7fbf..ebf462d8082f79373c1ea234e4f3034a16c53e73 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Linux network driver for Brocade Converged Network Adapter.
+ * Linux network driver for QLogic BR-series Converged Network Adapter.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License (GPL) Version 2 as
  * General Public License for more details.
  */
 /*
- * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
+ * Copyright (c) 2014-2015 QLogic Corporation
  * All rights reserved
- * www.brocade.com
+ * www.qlogic.com
  */
 #include <linux/firmware.h>
 #include "bnad.h"
index 321d2ad235d9143547efdb9d3100b8d91fd45d68..1ba3e3a67389d8a793fb7fbc6ed8a431586597e5 100644 (file)
@@ -4,7 +4,7 @@
 
 config NET_CADENCE
        bool "Cadence devices"
-       depends on HAS_IOMEM && (ARM || AVR32 || MICROBLAZE || COMPILE_TEST)
+       depends on HAS_IOMEM
        default y
        ---help---
          If you have a network (Ethernet) card belonging to this class, say Y.
@@ -20,17 +20,9 @@ config NET_CADENCE
 
 if NET_CADENCE
 
-config ARM_AT91_ETHER
-       tristate "AT91RM9200 Ethernet support"
-       depends on HAS_DMA && (ARCH_AT91 || COMPILE_TEST)
-       select MACB
-       ---help---
-         If you wish to compile a kernel for the AT91RM9200 and enable
-         ethernet support, then you should always answer Y to this.
-
 config MACB
        tristate "Cadence MACB/GEM support"
-       depends on HAS_DMA && (PLATFORM_AT32AP || ARCH_AT91 || ARCH_PICOXCELL || ARCH_ZYNQ || MICROBLAZE || COMPILE_TEST)
+       depends on HAS_DMA
        select PHYLIB
        ---help---
          The Cadence MACB ethernet interface is found on many Atmel AT32 and
index 9068b8331ed18b52f5df76b75d5f5f9ad4125cf2..91f79b1f0505d25beb9935790f47146c555024cf 100644 (file)
@@ -2,5 +2,4 @@
 # Makefile for the Atmel network device drivers.
 #
 
-obj-$(CONFIG_ARM_AT91_ETHER) += at91_ether.o
 obj-$(CONFIG_MACB) += macb.o
diff --git a/drivers/net/ethernet/cadence/at91_ether.c b/drivers/net/ethernet/cadence/at91_ether.c
deleted file mode 100644 (file)
index 7ef55f5..0000000
+++ /dev/null
@@ -1,481 +0,0 @@
-/*
- * Ethernet driver for the Atmel AT91RM9200 (Thunder)
- *
- *  Copyright (C) 2003 SAN People (Pty) Ltd
- *
- * Based on an earlier Atmel EMAC macrocell driver by Atmel and Lineo Inc.
- * Initial version by Rick Bronson 01/11/2003
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-#include <linux/dma-mapping.h>
-#include <linux/ethtool.h>
-#include <linux/platform_data/macb.h>
-#include <linux/platform_device.h>
-#include <linux/clk.h>
-#include <linux/gfp.h>
-#include <linux/phy.h>
-#include <linux/io.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
-#include <linux/of_net.h>
-
-#include "macb.h"
-
-/* 1518 rounded up */
-#define MAX_RBUFF_SZ   0x600
-/* max number of receive buffers */
-#define MAX_RX_DESCR   9
-
-/* Initialize and start the Receiver and Transmit subsystems */
-static int at91ether_start(struct net_device *dev)
-{
-       struct macb *lp = netdev_priv(dev);
-       dma_addr_t addr;
-       u32 ctl;
-       int i;
-
-       lp->rx_ring = dma_alloc_coherent(&lp->pdev->dev,
-                                        (MAX_RX_DESCR *
-                                         sizeof(struct macb_dma_desc)),
-                                        &lp->rx_ring_dma, GFP_KERNEL);
-       if (!lp->rx_ring)
-               return -ENOMEM;
-
-       lp->rx_buffers = dma_alloc_coherent(&lp->pdev->dev,
-                                           MAX_RX_DESCR * MAX_RBUFF_SZ,
-                                           &lp->rx_buffers_dma, GFP_KERNEL);
-       if (!lp->rx_buffers) {
-               dma_free_coherent(&lp->pdev->dev,
-                                 MAX_RX_DESCR * sizeof(struct macb_dma_desc),
-                                 lp->rx_ring, lp->rx_ring_dma);
-               lp->rx_ring = NULL;
-               return -ENOMEM;
-       }
-
-       addr = lp->rx_buffers_dma;
-       for (i = 0; i < MAX_RX_DESCR; i++) {
-               lp->rx_ring[i].addr = addr;
-               lp->rx_ring[i].ctrl = 0;
-               addr += MAX_RBUFF_SZ;
-       }
-
-       /* Set the Wrap bit on the last descriptor */
-       lp->rx_ring[MAX_RX_DESCR - 1].addr |= MACB_BIT(RX_WRAP);
-
-       /* Reset buffer index */
-       lp->rx_tail = 0;
-
-       /* Program address of descriptor list in Rx Buffer Queue register */
-       macb_writel(lp, RBQP, lp->rx_ring_dma);
-
-       /* Enable Receive and Transmit */
-       ctl = macb_readl(lp, NCR);
-       macb_writel(lp, NCR, ctl | MACB_BIT(RE) | MACB_BIT(TE));
-
-       return 0;
-}
-
-/* Open the ethernet interface */
-static int at91ether_open(struct net_device *dev)
-{
-       struct macb *lp = netdev_priv(dev);
-       u32 ctl;
-       int ret;
-
-       /* Clear internal statistics */
-       ctl = macb_readl(lp, NCR);
-       macb_writel(lp, NCR, ctl | MACB_BIT(CLRSTAT));
-
-       macb_set_hwaddr(lp);
-
-       ret = at91ether_start(dev);
-       if (ret)
-               return ret;
-
-       /* Enable MAC interrupts */
-       macb_writel(lp, IER, MACB_BIT(RCOMP)    |
-                            MACB_BIT(RXUBR)    |
-                            MACB_BIT(ISR_TUND) |
-                            MACB_BIT(ISR_RLE)  |
-                            MACB_BIT(TCOMP)    |
-                            MACB_BIT(ISR_ROVR) |
-                            MACB_BIT(HRESP));
-
-       /* schedule a link state check */
-       phy_start(lp->phy_dev);
-
-       netif_start_queue(dev);
-
-       return 0;
-}
-
-/* Close the interface */
-static int at91ether_close(struct net_device *dev)
-{
-       struct macb *lp = netdev_priv(dev);
-       u32 ctl;
-
-       /* Disable Receiver and Transmitter */
-       ctl = macb_readl(lp, NCR);
-       macb_writel(lp, NCR, ctl & ~(MACB_BIT(TE) | MACB_BIT(RE)));
-
-       /* Disable MAC interrupts */
-       macb_writel(lp, IDR, MACB_BIT(RCOMP)    |
-                            MACB_BIT(RXUBR)    |
-                            MACB_BIT(ISR_TUND) |
-                            MACB_BIT(ISR_RLE)  |
-                            MACB_BIT(TCOMP)    |
-                            MACB_BIT(ISR_ROVR) |
-                            MACB_BIT(HRESP));
-
-       netif_stop_queue(dev);
-
-       dma_free_coherent(&lp->pdev->dev,
-                               MAX_RX_DESCR * sizeof(struct macb_dma_desc),
-                               lp->rx_ring, lp->rx_ring_dma);
-       lp->rx_ring = NULL;
-
-       dma_free_coherent(&lp->pdev->dev,
-                               MAX_RX_DESCR * MAX_RBUFF_SZ,
-                               lp->rx_buffers, lp->rx_buffers_dma);
-       lp->rx_buffers = NULL;
-
-       return 0;
-}
-
-/* Transmit packet */
-static int at91ether_start_xmit(struct sk_buff *skb, struct net_device *dev)
-{
-       struct macb *lp = netdev_priv(dev);
-
-       if (macb_readl(lp, TSR) & MACB_BIT(RM9200_BNQ)) {
-               netif_stop_queue(dev);
-
-               /* Store packet information (to free when Tx completed) */
-               lp->skb = skb;
-               lp->skb_length = skb->len;
-               lp->skb_physaddr = dma_map_single(NULL, skb->data, skb->len,
-                                                       DMA_TO_DEVICE);
-
-               /* Set address of the data in the Transmit Address register */
-               macb_writel(lp, TAR, lp->skb_physaddr);
-               /* Set length of the packet in the Transmit Control register */
-               macb_writel(lp, TCR, skb->len);
-
-       } else {
-               netdev_err(dev, "%s called, but device is busy!\n", __func__);
-               return NETDEV_TX_BUSY;
-       }
-
-       return NETDEV_TX_OK;
-}
-
-/* Extract received frame from buffer descriptors and sent to upper layers.
- * (Called from interrupt context)
- */
-static void at91ether_rx(struct net_device *dev)
-{
-       struct macb *lp = netdev_priv(dev);
-       unsigned char *p_recv;
-       struct sk_buff *skb;
-       unsigned int pktlen;
-
-       while (lp->rx_ring[lp->rx_tail].addr & MACB_BIT(RX_USED)) {
-               p_recv = lp->rx_buffers + lp->rx_tail * MAX_RBUFF_SZ;
-               pktlen = MACB_BF(RX_FRMLEN, lp->rx_ring[lp->rx_tail].ctrl);
-               skb = netdev_alloc_skb(dev, pktlen + 2);
-               if (skb) {
-                       skb_reserve(skb, 2);
-                       memcpy(skb_put(skb, pktlen), p_recv, pktlen);
-
-                       skb->protocol = eth_type_trans(skb, dev);
-                       lp->stats.rx_packets++;
-                       lp->stats.rx_bytes += pktlen;
-                       netif_rx(skb);
-               } else {
-                       lp->stats.rx_dropped++;
-               }
-
-               if (lp->rx_ring[lp->rx_tail].ctrl & MACB_BIT(RX_MHASH_MATCH))
-                       lp->stats.multicast++;
-
-               /* reset ownership bit */
-               lp->rx_ring[lp->rx_tail].addr &= ~MACB_BIT(RX_USED);
-
-               /* wrap after last buffer */
-               if (lp->rx_tail == MAX_RX_DESCR - 1)
-                       lp->rx_tail = 0;
-               else
-                       lp->rx_tail++;
-       }
-}
-
-/* MAC interrupt handler */
-static irqreturn_t at91ether_interrupt(int irq, void *dev_id)
-{
-       struct net_device *dev = dev_id;
-       struct macb *lp = netdev_priv(dev);
-       u32 intstatus, ctl;
-
-       /* MAC Interrupt Status register indicates what interrupts are pending.
-        * It is automatically cleared once read.
-        */
-       intstatus = macb_readl(lp, ISR);
-
-       /* Receive complete */
-       if (intstatus & MACB_BIT(RCOMP))
-               at91ether_rx(dev);
-
-       /* Transmit complete */
-       if (intstatus & MACB_BIT(TCOMP)) {
-               /* The TCOM bit is set even if the transmission failed */
-               if (intstatus & (MACB_BIT(ISR_TUND) | MACB_BIT(ISR_RLE)))
-                       lp->stats.tx_errors++;
-
-               if (lp->skb) {
-                       dev_kfree_skb_irq(lp->skb);
-                       lp->skb = NULL;
-                       dma_unmap_single(NULL, lp->skb_physaddr, lp->skb_length, DMA_TO_DEVICE);
-                       lp->stats.tx_packets++;
-                       lp->stats.tx_bytes += lp->skb_length;
-               }
-               netif_wake_queue(dev);
-       }
-
-       /* Work-around for EMAC Errata section 41.3.1 */
-       if (intstatus & MACB_BIT(RXUBR)) {
-               ctl = macb_readl(lp, NCR);
-               macb_writel(lp, NCR, ctl & ~MACB_BIT(RE));
-               macb_writel(lp, NCR, ctl | MACB_BIT(RE));
-       }
-
-       if (intstatus & MACB_BIT(ISR_ROVR))
-               netdev_err(dev, "ROVR error\n");
-
-       return IRQ_HANDLED;
-}
-
-#ifdef CONFIG_NET_POLL_CONTROLLER
-static void at91ether_poll_controller(struct net_device *dev)
-{
-       unsigned long flags;
-
-       local_irq_save(flags);
-       at91ether_interrupt(dev->irq, dev);
-       local_irq_restore(flags);
-}
-#endif
-
-static const struct net_device_ops at91ether_netdev_ops = {
-       .ndo_open               = at91ether_open,
-       .ndo_stop               = at91ether_close,
-       .ndo_start_xmit         = at91ether_start_xmit,
-       .ndo_get_stats          = macb_get_stats,
-       .ndo_set_rx_mode        = macb_set_rx_mode,
-       .ndo_set_mac_address    = eth_mac_addr,
-       .ndo_do_ioctl           = macb_ioctl,
-       .ndo_validate_addr      = eth_validate_addr,
-       .ndo_change_mtu         = eth_change_mtu,
-#ifdef CONFIG_NET_POLL_CONTROLLER
-       .ndo_poll_controller    = at91ether_poll_controller,
-#endif
-};
-
-#if defined(CONFIG_OF)
-static const struct of_device_id at91ether_dt_ids[] = {
-       { .compatible = "cdns,at91rm9200-emac" },
-       { .compatible = "cdns,emac" },
-       { /* sentinel */ }
-};
-MODULE_DEVICE_TABLE(of, at91ether_dt_ids);
-#endif
-
-/* Detect MAC & PHY and perform ethernet interface initialization */
-static int __init at91ether_probe(struct platform_device *pdev)
-{
-       struct macb_platform_data *board_data = dev_get_platdata(&pdev->dev);
-       struct resource *regs;
-       struct net_device *dev;
-       struct phy_device *phydev;
-       struct macb *lp;
-       int res;
-       u32 reg;
-       const char *mac;
-
-       regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!regs)
-               return -ENOENT;
-
-       dev = alloc_etherdev(sizeof(struct macb));
-       if (!dev)
-               return -ENOMEM;
-
-       lp = netdev_priv(dev);
-       lp->pdev = pdev;
-       lp->dev = dev;
-       spin_lock_init(&lp->lock);
-
-       /* physical base address */
-       dev->base_addr = regs->start;
-       lp->regs = devm_ioremap(&pdev->dev, regs->start, resource_size(regs));
-       if (!lp->regs) {
-               res = -ENOMEM;
-               goto err_free_dev;
-       }
-
-       /* Clock */
-       lp->pclk = devm_clk_get(&pdev->dev, "ether_clk");
-       if (IS_ERR(lp->pclk)) {
-               res = PTR_ERR(lp->pclk);
-               goto err_free_dev;
-       }
-       clk_prepare_enable(lp->pclk);
-
-       lp->hclk = ERR_PTR(-ENOENT);
-       lp->tx_clk = ERR_PTR(-ENOENT);
-
-       /* Install the interrupt handler */
-       dev->irq = platform_get_irq(pdev, 0);
-       res = devm_request_irq(&pdev->dev, dev->irq, at91ether_interrupt, 0, dev->name, dev);
-       if (res)
-               goto err_disable_clock;
-
-       dev->netdev_ops = &at91ether_netdev_ops;
-       dev->ethtool_ops = &macb_ethtool_ops;
-       platform_set_drvdata(pdev, dev);
-       SET_NETDEV_DEV(dev, &pdev->dev);
-
-       mac = of_get_mac_address(pdev->dev.of_node);
-       if (mac)
-               memcpy(lp->dev->dev_addr, mac, ETH_ALEN);
-       else
-               macb_get_hwaddr(lp);
-
-       res = of_get_phy_mode(pdev->dev.of_node);
-       if (res < 0) {
-               if (board_data && board_data->is_rmii)
-                       lp->phy_interface = PHY_INTERFACE_MODE_RMII;
-               else
-                       lp->phy_interface = PHY_INTERFACE_MODE_MII;
-       } else {
-               lp->phy_interface = res;
-       }
-
-       macb_writel(lp, NCR, 0);
-
-       reg = MACB_BF(CLK, MACB_CLK_DIV32) | MACB_BIT(BIG);
-       if (lp->phy_interface == PHY_INTERFACE_MODE_RMII)
-               reg |= MACB_BIT(RM9200_RMII);
-
-       macb_writel(lp, NCFGR, reg);
-
-       /* Register the network interface */
-       res = register_netdev(dev);
-       if (res)
-               goto err_disable_clock;
-
-       res = macb_mii_init(lp);
-       if (res)
-               goto err_out_unregister_netdev;
-
-       /* will be enabled in open() */
-       netif_carrier_off(dev);
-
-       phydev = lp->phy_dev;
-       netdev_info(dev, "attached PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n",
-                               phydev->drv->name, dev_name(&phydev->dev),
-                               phydev->irq);
-
-       /* Display ethernet banner */
-       netdev_info(dev, "AT91 ethernet at 0x%08lx int=%d (%pM)\n",
-                               dev->base_addr, dev->irq, dev->dev_addr);
-
-       return 0;
-
-err_out_unregister_netdev:
-       unregister_netdev(dev);
-err_disable_clock:
-       clk_disable_unprepare(lp->pclk);
-err_free_dev:
-       free_netdev(dev);
-       return res;
-}
-
-static int at91ether_remove(struct platform_device *pdev)
-{
-       struct net_device *dev = platform_get_drvdata(pdev);
-       struct macb *lp = netdev_priv(dev);
-
-       if (lp->phy_dev)
-               phy_disconnect(lp->phy_dev);
-
-       mdiobus_unregister(lp->mii_bus);
-       kfree(lp->mii_bus->irq);
-       mdiobus_free(lp->mii_bus);
-       unregister_netdev(dev);
-       clk_disable_unprepare(lp->pclk);
-       free_netdev(dev);
-
-       return 0;
-}
-
-#ifdef CONFIG_PM
-static int at91ether_suspend(struct platform_device *pdev, pm_message_t mesg)
-{
-       struct net_device *net_dev = platform_get_drvdata(pdev);
-       struct macb *lp = netdev_priv(net_dev);
-
-       if (netif_running(net_dev)) {
-               netif_stop_queue(net_dev);
-               netif_device_detach(net_dev);
-
-               clk_disable_unprepare(lp->pclk);
-       }
-       return 0;
-}
-
-static int at91ether_resume(struct platform_device *pdev)
-{
-       struct net_device *net_dev = platform_get_drvdata(pdev);
-       struct macb *lp = netdev_priv(net_dev);
-
-       if (netif_running(net_dev)) {
-               clk_prepare_enable(lp->pclk);
-
-               netif_device_attach(net_dev);
-               netif_start_queue(net_dev);
-       }
-       return 0;
-}
-#else
-#define at91ether_suspend      NULL
-#define at91ether_resume       NULL
-#endif
-
-static struct platform_driver at91ether_driver = {
-       .remove         = at91ether_remove,
-       .suspend        = at91ether_suspend,
-       .resume         = at91ether_resume,
-       .driver         = {
-               .name   = "at91_ether",
-               .of_match_table = of_match_ptr(at91ether_dt_ids),
-       },
-};
-
-module_platform_driver_probe(at91ether_driver, at91ether_probe);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("AT91RM9200 EMAC Ethernet driver");
-MODULE_AUTHOR("Andrew Victor");
-MODULE_ALIAS("platform:at91_ether");
index 81d41539fcbab8e015d28e4b7729bc4812f60f0a..448a32309dd08c79c99bca7692fea10d429c1b41 100644 (file)
@@ -102,7 +102,7 @@ static void *macb_rx_buffer(struct macb *bp, unsigned int index)
        return bp->rx_buffers + bp->rx_buffer_size * macb_rx_ring_wrap(index);
 }
 
-void macb_set_hwaddr(struct macb *bp)
+static void macb_set_hwaddr(struct macb *bp)
 {
        u32 bottom;
        u16 top;
@@ -120,9 +120,8 @@ void macb_set_hwaddr(struct macb *bp)
        macb_or_gem_writel(bp, SA4B, 0);
        macb_or_gem_writel(bp, SA4T, 0);
 }
-EXPORT_SYMBOL_GPL(macb_set_hwaddr);
 
-void macb_get_hwaddr(struct macb *bp)
+static void macb_get_hwaddr(struct macb *bp)
 {
        struct macb_platform_data *pdata;
        u32 bottom;
@@ -162,7 +161,6 @@ void macb_get_hwaddr(struct macb *bp)
        netdev_info(bp->dev, "invalid hw address, using random\n");
        eth_hw_addr_random(bp->dev);
 }
-EXPORT_SYMBOL_GPL(macb_get_hwaddr);
 
 static int macb_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
 {
@@ -213,6 +211,9 @@ static void macb_set_tx_clk(struct clk *clk, int speed, struct net_device *dev)
 {
        long ferr, rate, rate_rounded;
 
+       if (!clk)
+               return;
+
        switch (speed) {
        case SPEED_10:
                rate = 2500000;
@@ -292,11 +293,13 @@ static void macb_handle_link_change(struct net_device *dev)
 
        spin_unlock_irqrestore(&bp->lock, flags);
 
-       if (!IS_ERR(bp->tx_clk))
-               macb_set_tx_clk(bp->tx_clk, phydev->speed, dev);
-
        if (status_change) {
                if (phydev->link) {
+                       /* Update the TX clock rate if and only if the link is
+                        * up and there has been a link change.
+                        */
+                       macb_set_tx_clk(bp->tx_clk, phydev->speed, dev);
+
                        netif_carrier_on(dev);
                        netdev_info(dev, "link up (%d/%s)\n",
                                    phydev->speed,
@@ -357,7 +360,7 @@ static int macb_mii_probe(struct net_device *dev)
        return 0;
 }
 
-int macb_mii_init(struct macb *bp)
+static int macb_mii_init(struct macb *bp)
 {
        struct macb_platform_data *pdata;
        struct device_node *np;
@@ -438,7 +441,6 @@ err_out_free_mdiobus:
 err_out:
        return err;
 }
-EXPORT_SYMBOL_GPL(macb_mii_init);
 
 static void macb_update_stats(struct macb *bp)
 {
@@ -449,7 +451,7 @@ static void macb_update_stats(struct macb *bp)
        WARN_ON((unsigned long)(end - p - 1) != (MACB_TPF - MACB_PFR) / 4);
 
        for(; p < end; p++, reg++)
-               *p += __raw_readl(reg);
+               *p += readl_relaxed(reg);
 }
 
 static int macb_halt_tx(struct macb *bp)
@@ -1578,6 +1580,7 @@ static u32 macb_dbw(struct macb *bp)
 static void macb_configure_dma(struct macb *bp)
 {
        u32 dmacfg;
+       u32 tmp, ncr;
 
        if (macb_is_gem(bp)) {
                dmacfg = gem_readl(bp, DMACFG) & ~GEM_BF(RXBS, -1L);
@@ -1585,7 +1588,24 @@ static void macb_configure_dma(struct macb *bp)
                if (bp->dma_burst_length)
                        dmacfg = GEM_BFINS(FBLDO, bp->dma_burst_length, dmacfg);
                dmacfg |= GEM_BIT(TXPBMS) | GEM_BF(RXBMS, -1L);
-               dmacfg &= ~GEM_BIT(ENDIA);
+               dmacfg &= ~GEM_BIT(ENDIA_PKT);
+
+               /* Find the CPU endianness by using the loopback bit of net_ctrl
+                * register. save it first. When the CPU is in big endian we
+                * need to program swaped mode for management descriptor access.
+                */
+               ncr = macb_readl(bp, NCR);
+               __raw_writel(MACB_BIT(LLB), bp->regs + MACB_NCR);
+               tmp =  __raw_readl(bp->regs + MACB_NCR);
+
+               if (tmp == MACB_BIT(LLB))
+                       dmacfg &= ~GEM_BIT(ENDIA_DESC);
+               else
+                       dmacfg |= GEM_BIT(ENDIA_DESC); /* CPU in big endian */
+
+               /* Restore net_ctrl */
+               macb_writel(bp, NCR, ncr);
+
                if (bp->dev->features & NETIF_F_HW_CSUM)
                        dmacfg |= GEM_BIT(TXCOEN);
                else
@@ -1723,7 +1743,7 @@ static void macb_sethashtable(struct net_device *dev)
 /*
  * Enable/Disable promiscuous and multicast modes.
  */
-void macb_set_rx_mode(struct net_device *dev)
+static void macb_set_rx_mode(struct net_device *dev)
 {
        unsigned long cfg;
        struct macb *bp = netdev_priv(dev);
@@ -1764,7 +1784,6 @@ void macb_set_rx_mode(struct net_device *dev)
 
        macb_writel(bp, NCFGR, cfg);
 }
-EXPORT_SYMBOL_GPL(macb_set_rx_mode);
 
 static int macb_open(struct net_device *dev)
 {
@@ -1832,14 +1851,14 @@ static void gem_update_stats(struct macb *bp)
 
        for (i = 0; i < GEM_STATS_LEN; ++i, ++p) {
                u32 offset = gem_statistics[i].offset;
-               u64 val = __raw_readl(bp->regs + offset);
+               u64 val = readl_relaxed(bp->regs + offset);
 
                bp->ethtool_stats[i] += val;
                *p += val;
 
                if (offset == GEM_OCTTXL || offset == GEM_OCTRXL) {
                        /* Add GEM_OCTTXH, GEM_OCTRXH */
-                       val = __raw_readl(bp->regs + offset + 4);
+                       val = readl_relaxed(bp->regs + offset + 4);
                        bp->ethtool_stats[i] += ((u64)val) << 32;
                        *(++p) += val;
                }
@@ -1917,7 +1936,7 @@ static void gem_get_ethtool_strings(struct net_device *dev, u32 sset, u8 *p)
        }
 }
 
-struct net_device_stats *macb_get_stats(struct net_device *dev)
+static struct net_device_stats *macb_get_stats(struct net_device *dev)
 {
        struct macb *bp = netdev_priv(dev);
        struct net_device_stats *nstat = &bp->stats;
@@ -1963,7 +1982,6 @@ struct net_device_stats *macb_get_stats(struct net_device *dev)
 
        return nstat;
 }
-EXPORT_SYMBOL_GPL(macb_get_stats);
 
 static int macb_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
 {
@@ -2019,13 +2037,13 @@ static void macb_get_regs(struct net_device *dev, struct ethtool_regs *regs,
        regs_buff[10] = macb_tx_dma(&bp->queues[0], tail);
        regs_buff[11] = macb_tx_dma(&bp->queues[0], head);
 
+       regs_buff[12] = macb_or_gem_readl(bp, USRIO);
        if (macb_is_gem(bp)) {
-               regs_buff[12] = gem_readl(bp, USRIO);
                regs_buff[13] = gem_readl(bp, DMACFG);
        }
 }
 
-const struct ethtool_ops macb_ethtool_ops = {
+static const struct ethtool_ops macb_ethtool_ops = {
        .get_settings           = macb_get_settings,
        .set_settings           = macb_set_settings,
        .get_regs_len           = macb_get_regs_len,
@@ -2033,7 +2051,6 @@ const struct ethtool_ops macb_ethtool_ops = {
        .get_link               = ethtool_op_get_link,
        .get_ts_info            = ethtool_op_get_ts_info,
 };
-EXPORT_SYMBOL_GPL(macb_ethtool_ops);
 
 static const struct ethtool_ops gem_ethtool_ops = {
        .get_settings           = macb_get_settings,
@@ -2047,7 +2064,7 @@ static const struct ethtool_ops gem_ethtool_ops = {
        .get_sset_count         = gem_get_sset_count,
 };
 
-int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+static int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
 {
        struct macb *bp = netdev_priv(dev);
        struct phy_device *phydev = bp->phy_dev;
@@ -2060,7 +2077,6 @@ int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
 
        return phy_mii_ioctl(phydev, rq, cmd);
 }
-EXPORT_SYMBOL_GPL(macb_ioctl);
 
 static int macb_set_features(struct net_device *netdev,
                             netdev_features_t features)
@@ -2112,63 +2128,20 @@ static const struct net_device_ops macb_netdev_ops = {
        .ndo_set_features       = macb_set_features,
 };
 
-#if defined(CONFIG_OF)
-static const struct macb_config pc302gem_config = {
-       .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE,
-       .dma_burst_length = 16,
-};
-
-static const struct macb_config sama5d3_config = {
-       .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE,
-       .dma_burst_length = 16,
-};
-
-static const struct macb_config sama5d4_config = {
-       .caps = 0,
-       .dma_burst_length = 4,
-};
-
-static const struct of_device_id macb_dt_ids[] = {
-       { .compatible = "cdns,at32ap7000-macb" },
-       { .compatible = "cdns,at91sam9260-macb" },
-       { .compatible = "cdns,macb" },
-       { .compatible = "cdns,pc302-gem", .data = &pc302gem_config },
-       { .compatible = "cdns,gem", .data = &pc302gem_config },
-       { .compatible = "atmel,sama5d3-gem", .data = &sama5d3_config },
-       { .compatible = "atmel,sama5d4-gem", .data = &sama5d4_config },
-       { /* sentinel */ }
-};
-MODULE_DEVICE_TABLE(of, macb_dt_ids);
-#endif
-
 /*
- * Configure peripheral capacities according to device tree
+ * Configure peripheral capabilities according to device tree
  * and integration options used
  */
-static void macb_configure_caps(struct macb *bp)
+static void macb_configure_caps(struct macb *bp, const struct macb_config *dt_conf)
 {
        u32 dcfg;
-       const struct of_device_id *match;
-       const struct macb_config *config;
 
-       if (bp->pdev->dev.of_node) {
-               match = of_match_node(macb_dt_ids, bp->pdev->dev.of_node);
-               if (match && match->data) {
-                       config = match->data;
+       if (dt_conf)
+               bp->caps = dt_conf->caps;
 
-                       bp->caps = config->caps;
-                       /*
-                        * As we have access to the matching node, configure
-                        * DMA burst length as well
-                        */
-                       bp->dma_burst_length = config->dma_burst_length;
-               }
-       }
-
-       if (MACB_BFEXT(IDNUM, macb_readl(bp, MID)) == 0x2)
+       if (macb_is_gem_hw(bp->regs)) {
                bp->caps |= MACB_CAPS_MACB_IS_GEM;
 
-       if (macb_is_gem(bp)) {
                dcfg = gem_readl(bp, DCFG1);
                if (GEM_BFEXT(IRQCOR, dcfg) == 0)
                        bp->caps |= MACB_CAPS_ISR_CLEAR_ON_WRITE;
@@ -2185,18 +2158,22 @@ static void macb_probe_queues(void __iomem *mem,
                              unsigned int *num_queues)
 {
        unsigned int hw_q;
-       u32 mid;
 
        *queue_mask = 0x1;
        *num_queues = 1;
 
-       /* is it macb or gem ? */
-       mid = __raw_readl(mem + MACB_MID);
-       if (MACB_BFEXT(IDNUM, mid) != 0x2)
+       /* is it macb or gem ?
+        *
+        * We need to read directly from the hardware here because
+        * we are early in the probe process and don't have the
+        * MACB_CAPS_MACB_IS_GEM flag positioned
+        */
+       if (!macb_is_gem_hw(mem))
                return;
 
        /* bit 0 is never set but queue 0 always exists */
-       *queue_mask = __raw_readl(mem + GEM_DCFG6) & 0xff;
+       *queue_mask = readl_relaxed(mem + GEM_DCFG6) & 0xff;
+
        *queue_mask |= 0x1;
 
        for (hw_q = 1; hw_q < MACB_MAX_QUEUES; ++hw_q)
@@ -2204,95 +2181,73 @@ static void macb_probe_queues(void __iomem *mem,
                        (*num_queues)++;
 }
 
-static int macb_probe(struct platform_device *pdev)
+static int macb_clk_init(struct platform_device *pdev, struct clk **pclk,
+                        struct clk **hclk, struct clk **tx_clk)
 {
-       struct macb_platform_data *pdata;
-       struct resource *regs;
-       struct net_device *dev;
-       struct macb *bp;
-       struct macb_queue *queue;
-       struct phy_device *phydev;
-       u32 config;
-       int err = -ENXIO;
-       const char *mac;
-       void __iomem *mem;
-       unsigned int hw_q, queue_mask, q, num_queues;
-       struct clk *pclk, *hclk, *tx_clk;
-
-       regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!regs) {
-               dev_err(&pdev->dev, "no mmio resource defined\n");
-               goto err_out;
-       }
+       int err;
 
-       pclk = devm_clk_get(&pdev->dev, "pclk");
-       if (IS_ERR(pclk)) {
-               err = PTR_ERR(pclk);
+       *pclk = devm_clk_get(&pdev->dev, "pclk");
+       if (IS_ERR(*pclk)) {
+               err = PTR_ERR(*pclk);
                dev_err(&pdev->dev, "failed to get macb_clk (%u)\n", err);
-               goto err_out;
+               return err;
        }
 
-       hclk = devm_clk_get(&pdev->dev, "hclk");
-       if (IS_ERR(hclk)) {
-               err = PTR_ERR(hclk);
+       *hclk = devm_clk_get(&pdev->dev, "hclk");
+       if (IS_ERR(*hclk)) {
+               err = PTR_ERR(*hclk);
                dev_err(&pdev->dev, "failed to get hclk (%u)\n", err);
-               goto err_out;
+               return err;
        }
 
-       tx_clk = devm_clk_get(&pdev->dev, "tx_clk");
+       *tx_clk = devm_clk_get(&pdev->dev, "tx_clk");
+       if (IS_ERR(*tx_clk))
+               *tx_clk = NULL;
 
-       err = clk_prepare_enable(pclk);
+       err = clk_prepare_enable(*pclk);
        if (err) {
                dev_err(&pdev->dev, "failed to enable pclk (%u)\n", err);
-               goto err_out;
+               return err;
        }
 
-       err = clk_prepare_enable(hclk);
+       err = clk_prepare_enable(*hclk);
        if (err) {
                dev_err(&pdev->dev, "failed to enable hclk (%u)\n", err);
-               goto err_out_disable_pclk;
+               goto err_disable_pclk;
        }
 
-       if (!IS_ERR(tx_clk)) {
-               err = clk_prepare_enable(tx_clk);
-               if (err) {
-                       dev_err(&pdev->dev, "failed to enable tx_clk (%u)\n",
-                               err);
-                       goto err_out_disable_hclk;
-               }
+       err = clk_prepare_enable(*tx_clk);
+       if (err) {
+               dev_err(&pdev->dev, "failed to enable tx_clk (%u)\n", err);
+               goto err_disable_hclk;
        }
 
-       err = -ENOMEM;
-       mem = devm_ioremap(&pdev->dev, regs->start, resource_size(regs));
-       if (!mem) {
-               dev_err(&pdev->dev, "failed to map registers, aborting.\n");
-               goto err_out_disable_clocks;
-       }
+       return 0;
 
-       macb_probe_queues(mem, &queue_mask, &num_queues);
-       dev = alloc_etherdev_mq(sizeof(*bp), num_queues);
-       if (!dev)
-               goto err_out_disable_clocks;
+err_disable_hclk:
+       clk_disable_unprepare(*hclk);
 
-       SET_NETDEV_DEV(dev, &pdev->dev);
+err_disable_pclk:
+       clk_disable_unprepare(*pclk);
 
-       bp = netdev_priv(dev);
-       bp->pdev = pdev;
-       bp->dev = dev;
-       bp->regs = mem;
-       bp->num_queues = num_queues;
-       bp->pclk = pclk;
-       bp->hclk = hclk;
-       bp->tx_clk = tx_clk;
+       return err;
+}
 
-       spin_lock_init(&bp->lock);
+static int macb_init(struct platform_device *pdev)
+{
+       struct net_device *dev = platform_get_drvdata(pdev);
+       unsigned int hw_q, q;
+       struct macb *bp = netdev_priv(dev);
+       struct macb_queue *queue;
+       int err;
+       u32 val;
 
        /* set the queue register mapping once for all: queue0 has a special
         * register mapping but we don't want to test the queue index then
         * compute the corresponding register offset at run time.
         */
        for (hw_q = 0, q = 0; hw_q < MACB_MAX_QUEUES; ++hw_q) {
-               if (!(queue_mask & (1 << hw_q)))
+               if (!(bp->queue_mask & (1 << hw_q)))
                        continue;
 
                queue = &bp->queues[q];
@@ -2319,27 +2274,21 @@ static int macb_probe(struct platform_device *pdev)
                 */
                queue->irq = platform_get_irq(pdev, q);
                err = devm_request_irq(&pdev->dev, queue->irq, macb_interrupt,
-                                      0, dev->name, queue);
+                                      IRQF_SHARED, dev->name, queue);
                if (err) {
                        dev_err(&pdev->dev,
                                "Unable to request IRQ %d (error %d)\n",
                                queue->irq, err);
-                       goto err_out_free_netdev;
+                       return err;
                }
 
                INIT_WORK(&queue->tx_error_task, macb_tx_error_task);
                q++;
        }
-       dev->irq = bp->queues[0].irq;
 
        dev->netdev_ops = &macb_netdev_ops;
        netif_napi_add(dev, &bp->napi, macb_poll, 64);
 
-       dev->base_addr = regs->start;
-
-       /* setup capacities */
-       macb_configure_caps(bp);
-
        /* setup appropriated routines according to adapter type */
        if (macb_is_gem(bp)) {
                bp->max_tx_length = GEM_MAX_TX_LEN;
@@ -2366,18 +2315,470 @@ static int macb_probe(struct platform_device *pdev)
                dev->hw_features &= ~NETIF_F_SG;
        dev->features = dev->hw_features;
 
+       val = 0;
+       if (bp->phy_interface == PHY_INTERFACE_MODE_RGMII)
+               val = GEM_BIT(RGMII);
+       else if (bp->phy_interface == PHY_INTERFACE_MODE_RMII &&
+                (bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII))
+               val = MACB_BIT(RMII);
+       else if (!(bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII))
+               val = MACB_BIT(MII);
+
+       if (bp->caps & MACB_CAPS_USRIO_HAS_CLKEN)
+               val |= MACB_BIT(CLKEN);
+
+       macb_or_gem_writel(bp, USRIO, val);
+
        /* Set MII management clock divider */
-       config = macb_mdc_clk_div(bp);
-       config |= macb_dbw(bp);
-       macb_writel(bp, NCFGR, config);
+       val = macb_mdc_clk_div(bp);
+       val |= macb_dbw(bp);
+       macb_writel(bp, NCFGR, val);
+
+       return 0;
+}
+
+#if defined(CONFIG_OF)
+/* 1518 rounded up */
+#define AT91ETHER_MAX_RBUFF_SZ 0x600
+/* max number of receive buffers */
+#define AT91ETHER_MAX_RX_DESCR 9
+
+/* Initialize and start the Receiver and Transmit subsystems */
+static int at91ether_start(struct net_device *dev)
+{
+       struct macb *lp = netdev_priv(dev);
+       dma_addr_t addr;
+       u32 ctl;
+       int i;
+
+       lp->rx_ring = dma_alloc_coherent(&lp->pdev->dev,
+                                        (AT91ETHER_MAX_RX_DESCR *
+                                         sizeof(struct macb_dma_desc)),
+                                        &lp->rx_ring_dma, GFP_KERNEL);
+       if (!lp->rx_ring)
+               return -ENOMEM;
+
+       lp->rx_buffers = dma_alloc_coherent(&lp->pdev->dev,
+                                           AT91ETHER_MAX_RX_DESCR *
+                                           AT91ETHER_MAX_RBUFF_SZ,
+                                           &lp->rx_buffers_dma, GFP_KERNEL);
+       if (!lp->rx_buffers) {
+               dma_free_coherent(&lp->pdev->dev,
+                                 AT91ETHER_MAX_RX_DESCR *
+                                 sizeof(struct macb_dma_desc),
+                                 lp->rx_ring, lp->rx_ring_dma);
+               lp->rx_ring = NULL;
+               return -ENOMEM;
+       }
+
+       addr = lp->rx_buffers_dma;
+       for (i = 0; i < AT91ETHER_MAX_RX_DESCR; i++) {
+               lp->rx_ring[i].addr = addr;
+               lp->rx_ring[i].ctrl = 0;
+               addr += AT91ETHER_MAX_RBUFF_SZ;
+       }
+
+       /* Set the Wrap bit on the last descriptor */
+       lp->rx_ring[AT91ETHER_MAX_RX_DESCR - 1].addr |= MACB_BIT(RX_WRAP);
+
+       /* Reset buffer index */
+       lp->rx_tail = 0;
+
+       /* Program address of descriptor list in Rx Buffer Queue register */
+       macb_writel(lp, RBQP, lp->rx_ring_dma);
+
+       /* Enable Receive and Transmit */
+       ctl = macb_readl(lp, NCR);
+       macb_writel(lp, NCR, ctl | MACB_BIT(RE) | MACB_BIT(TE));
+
+       return 0;
+}
+
+/* Open the ethernet interface */
+static int at91ether_open(struct net_device *dev)
+{
+       struct macb *lp = netdev_priv(dev);
+       u32 ctl;
+       int ret;
+
+       /* Clear internal statistics */
+       ctl = macb_readl(lp, NCR);
+       macb_writel(lp, NCR, ctl | MACB_BIT(CLRSTAT));
+
+       macb_set_hwaddr(lp);
+
+       ret = at91ether_start(dev);
+       if (ret)
+               return ret;
+
+       /* Enable MAC interrupts */
+       macb_writel(lp, IER, MACB_BIT(RCOMP)    |
+                            MACB_BIT(RXUBR)    |
+                            MACB_BIT(ISR_TUND) |
+                            MACB_BIT(ISR_RLE)  |
+                            MACB_BIT(TCOMP)    |
+                            MACB_BIT(ISR_ROVR) |
+                            MACB_BIT(HRESP));
+
+       /* schedule a link state check */
+       phy_start(lp->phy_dev);
+
+       netif_start_queue(dev);
+
+       return 0;
+}
+
+/* Close the interface */
+static int at91ether_close(struct net_device *dev)
+{
+       struct macb *lp = netdev_priv(dev);
+       u32 ctl;
+
+       /* Disable Receiver and Transmitter */
+       ctl = macb_readl(lp, NCR);
+       macb_writel(lp, NCR, ctl & ~(MACB_BIT(TE) | MACB_BIT(RE)));
+
+       /* Disable MAC interrupts */
+       macb_writel(lp, IDR, MACB_BIT(RCOMP)    |
+                            MACB_BIT(RXUBR)    |
+                            MACB_BIT(ISR_TUND) |
+                            MACB_BIT(ISR_RLE)  |
+                            MACB_BIT(TCOMP)    |
+                            MACB_BIT(ISR_ROVR) |
+                            MACB_BIT(HRESP));
+
+       netif_stop_queue(dev);
+
+       dma_free_coherent(&lp->pdev->dev,
+                         AT91ETHER_MAX_RX_DESCR *
+                         sizeof(struct macb_dma_desc),
+                         lp->rx_ring, lp->rx_ring_dma);
+       lp->rx_ring = NULL;
+
+       dma_free_coherent(&lp->pdev->dev,
+                         AT91ETHER_MAX_RX_DESCR * AT91ETHER_MAX_RBUFF_SZ,
+                         lp->rx_buffers, lp->rx_buffers_dma);
+       lp->rx_buffers = NULL;
+
+       return 0;
+}
+
+/* Transmit packet */
+static int at91ether_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+       struct macb *lp = netdev_priv(dev);
+
+       if (macb_readl(lp, TSR) & MACB_BIT(RM9200_BNQ)) {
+               netif_stop_queue(dev);
+
+               /* Store packet information (to free when Tx completed) */
+               lp->skb = skb;
+               lp->skb_length = skb->len;
+               lp->skb_physaddr = dma_map_single(NULL, skb->data, skb->len,
+                                                       DMA_TO_DEVICE);
+
+               /* Set address of the data in the Transmit Address register */
+               macb_writel(lp, TAR, lp->skb_physaddr);
+               /* Set length of the packet in the Transmit Control register */
+               macb_writel(lp, TCR, skb->len);
+
+       } else {
+               netdev_err(dev, "%s called, but device is busy!\n", __func__);
+               return NETDEV_TX_BUSY;
+       }
+
+       return NETDEV_TX_OK;
+}
 
-       mac = of_get_mac_address(pdev->dev.of_node);
+/* Extract received frame from buffer descriptors and sent to upper layers.
+ * (Called from interrupt context)
+ */
+static void at91ether_rx(struct net_device *dev)
+{
+       struct macb *lp = netdev_priv(dev);
+       unsigned char *p_recv;
+       struct sk_buff *skb;
+       unsigned int pktlen;
+
+       while (lp->rx_ring[lp->rx_tail].addr & MACB_BIT(RX_USED)) {
+               p_recv = lp->rx_buffers + lp->rx_tail * AT91ETHER_MAX_RBUFF_SZ;
+               pktlen = MACB_BF(RX_FRMLEN, lp->rx_ring[lp->rx_tail].ctrl);
+               skb = netdev_alloc_skb(dev, pktlen + 2);
+               if (skb) {
+                       skb_reserve(skb, 2);
+                       memcpy(skb_put(skb, pktlen), p_recv, pktlen);
+
+                       skb->protocol = eth_type_trans(skb, dev);
+                       lp->stats.rx_packets++;
+                       lp->stats.rx_bytes += pktlen;
+                       netif_rx(skb);
+               } else {
+                       lp->stats.rx_dropped++;
+               }
+
+               if (lp->rx_ring[lp->rx_tail].ctrl & MACB_BIT(RX_MHASH_MATCH))
+                       lp->stats.multicast++;
+
+               /* reset ownership bit */
+               lp->rx_ring[lp->rx_tail].addr &= ~MACB_BIT(RX_USED);
+
+               /* wrap after last buffer */
+               if (lp->rx_tail == AT91ETHER_MAX_RX_DESCR - 1)
+                       lp->rx_tail = 0;
+               else
+                       lp->rx_tail++;
+       }
+}
+
+/* MAC interrupt handler */
+static irqreturn_t at91ether_interrupt(int irq, void *dev_id)
+{
+       struct net_device *dev = dev_id;
+       struct macb *lp = netdev_priv(dev);
+       u32 intstatus, ctl;
+
+       /* MAC Interrupt Status register indicates what interrupts are pending.
+        * It is automatically cleared once read.
+        */
+       intstatus = macb_readl(lp, ISR);
+
+       /* Receive complete */
+       if (intstatus & MACB_BIT(RCOMP))
+               at91ether_rx(dev);
+
+       /* Transmit complete */
+       if (intstatus & MACB_BIT(TCOMP)) {
+               /* The TCOM bit is set even if the transmission failed */
+               if (intstatus & (MACB_BIT(ISR_TUND) | MACB_BIT(ISR_RLE)))
+                       lp->stats.tx_errors++;
+
+               if (lp->skb) {
+                       dev_kfree_skb_irq(lp->skb);
+                       lp->skb = NULL;
+                       dma_unmap_single(NULL, lp->skb_physaddr,
+                                        lp->skb_length, DMA_TO_DEVICE);
+                       lp->stats.tx_packets++;
+                       lp->stats.tx_bytes += lp->skb_length;
+               }
+               netif_wake_queue(dev);
+       }
+
+       /* Work-around for EMAC Errata section 41.3.1 */
+       if (intstatus & MACB_BIT(RXUBR)) {
+               ctl = macb_readl(lp, NCR);
+               macb_writel(lp, NCR, ctl & ~MACB_BIT(RE));
+               macb_writel(lp, NCR, ctl | MACB_BIT(RE));
+       }
+
+       if (intstatus & MACB_BIT(ISR_ROVR))
+               netdev_err(dev, "ROVR error\n");
+
+       return IRQ_HANDLED;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void at91ether_poll_controller(struct net_device *dev)
+{
+       unsigned long flags;
+
+       local_irq_save(flags);
+       at91ether_interrupt(dev->irq, dev);
+       local_irq_restore(flags);
+}
+#endif
+
+static const struct net_device_ops at91ether_netdev_ops = {
+       .ndo_open               = at91ether_open,
+       .ndo_stop               = at91ether_close,
+       .ndo_start_xmit         = at91ether_start_xmit,
+       .ndo_get_stats          = macb_get_stats,
+       .ndo_set_rx_mode        = macb_set_rx_mode,
+       .ndo_set_mac_address    = eth_mac_addr,
+       .ndo_do_ioctl           = macb_ioctl,
+       .ndo_validate_addr      = eth_validate_addr,
+       .ndo_change_mtu         = eth_change_mtu,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+       .ndo_poll_controller    = at91ether_poll_controller,
+#endif
+};
+
+static int at91ether_clk_init(struct platform_device *pdev, struct clk **pclk,
+                             struct clk **hclk, struct clk **tx_clk)
+{
+       int err;
+
+       *hclk = NULL;
+       *tx_clk = NULL;
+
+       *pclk = devm_clk_get(&pdev->dev, "ether_clk");
+       if (IS_ERR(*pclk))
+               return PTR_ERR(*pclk);
+
+       err = clk_prepare_enable(*pclk);
+       if (err) {
+               dev_err(&pdev->dev, "failed to enable pclk (%u)\n", err);
+               return err;
+       }
+
+       return 0;
+}
+
+static int at91ether_init(struct platform_device *pdev)
+{
+       struct net_device *dev = platform_get_drvdata(pdev);
+       struct macb *bp = netdev_priv(dev);
+       int err;
+       u32 reg;
+
+       dev->netdev_ops = &at91ether_netdev_ops;
+       dev->ethtool_ops = &macb_ethtool_ops;
+
+       err = devm_request_irq(&pdev->dev, dev->irq, at91ether_interrupt,
+                              0, dev->name, dev);
+       if (err)
+               return err;
+
+       macb_writel(bp, NCR, 0);
+
+       reg = MACB_BF(CLK, MACB_CLK_DIV32) | MACB_BIT(BIG);
+       if (bp->phy_interface == PHY_INTERFACE_MODE_RMII)
+               reg |= MACB_BIT(RM9200_RMII);
+
+       macb_writel(bp, NCFGR, reg);
+
+       return 0;
+}
+
+static const struct macb_config at91sam9260_config = {
+       .caps = MACB_CAPS_USRIO_HAS_CLKEN | MACB_CAPS_USRIO_DEFAULT_IS_MII,
+       .clk_init = macb_clk_init,
+       .init = macb_init,
+};
+
+static const struct macb_config pc302gem_config = {
+       .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE,
+       .dma_burst_length = 16,
+       .clk_init = macb_clk_init,
+       .init = macb_init,
+};
+
+static const struct macb_config sama5d3_config = {
+       .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE,
+       .dma_burst_length = 16,
+       .clk_init = macb_clk_init,
+       .init = macb_init,
+};
+
+static const struct macb_config sama5d4_config = {
+       .caps = 0,
+       .dma_burst_length = 4,
+       .clk_init = macb_clk_init,
+       .init = macb_init,
+};
+
+static const struct macb_config emac_config = {
+       .clk_init = at91ether_clk_init,
+       .init = at91ether_init,
+};
+
+static const struct of_device_id macb_dt_ids[] = {
+       { .compatible = "cdns,at32ap7000-macb" },
+       { .compatible = "cdns,at91sam9260-macb", .data = &at91sam9260_config },
+       { .compatible = "cdns,macb" },
+       { .compatible = "cdns,pc302-gem", .data = &pc302gem_config },
+       { .compatible = "cdns,gem", .data = &pc302gem_config },
+       { .compatible = "atmel,sama5d3-gem", .data = &sama5d3_config },
+       { .compatible = "atmel,sama5d4-gem", .data = &sama5d4_config },
+       { .compatible = "cdns,at91rm9200-emac", .data = &emac_config },
+       { .compatible = "cdns,emac", .data = &emac_config },
+       { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, macb_dt_ids);
+#endif /* CONFIG_OF */
+
+static int macb_probe(struct platform_device *pdev)
+{
+       int (*clk_init)(struct platform_device *, struct clk **,
+                       struct clk **, struct clk **)
+                                             = macb_clk_init;
+       int (*init)(struct platform_device *) = macb_init;
+       struct device_node *np = pdev->dev.of_node;
+       const struct macb_config *macb_config = NULL;
+       struct clk *pclk, *hclk, *tx_clk;
+       unsigned int queue_mask, num_queues;
+       struct macb_platform_data *pdata;
+       struct phy_device *phydev;
+       struct net_device *dev;
+       struct resource *regs;
+       void __iomem *mem;
+       const char *mac;
+       struct macb *bp;
+       int err;
+
+       if (np) {
+               const struct of_device_id *match;
+
+               match = of_match_node(macb_dt_ids, np);
+               if (match && match->data) {
+                       macb_config = match->data;
+                       clk_init = macb_config->clk_init;
+                       init = macb_config->init;
+               }
+       }
+
+       err = clk_init(pdev, &pclk, &hclk, &tx_clk);
+       if (err)
+               return err;
+
+       regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       mem = devm_ioremap_resource(&pdev->dev, regs);
+       if (IS_ERR(mem)) {
+               err = PTR_ERR(mem);
+               goto err_disable_clocks;
+       }
+
+       macb_probe_queues(mem, &queue_mask, &num_queues);
+       dev = alloc_etherdev_mq(sizeof(*bp), num_queues);
+       if (!dev) {
+               err = -ENOMEM;
+               goto err_disable_clocks;
+       }
+
+       dev->base_addr = regs->start;
+
+       SET_NETDEV_DEV(dev, &pdev->dev);
+
+       bp = netdev_priv(dev);
+       bp->pdev = pdev;
+       bp->dev = dev;
+       bp->regs = mem;
+       bp->num_queues = num_queues;
+       bp->queue_mask = queue_mask;
+       if (macb_config)
+               bp->dma_burst_length = macb_config->dma_burst_length;
+       bp->pclk = pclk;
+       bp->hclk = hclk;
+       bp->tx_clk = tx_clk;
+       spin_lock_init(&bp->lock);
+
+       /* setup capabilities */
+       macb_configure_caps(bp, macb_config);
+
+       platform_set_drvdata(pdev, dev);
+
+       dev->irq = platform_get_irq(pdev, 0);
+       if (dev->irq < 0) {
+               err = dev->irq;
+               goto err_disable_clocks;
+       }
+
+       mac = of_get_mac_address(np);
        if (mac)
                memcpy(bp->dev->dev_addr, mac, ETH_ALEN);
        else
                macb_get_hwaddr(bp);
 
-       err = of_get_phy_mode(pdev->dev.of_node);
+       err = of_get_phy_mode(np);
        if (err < 0) {
                pdata = dev_get_platdata(&pdev->dev);
                if (pdata && pdata->is_rmii)
@@ -2388,34 +2789,21 @@ static int macb_probe(struct platform_device *pdev)
                bp->phy_interface = err;
        }
 
-       if (bp->phy_interface == PHY_INTERFACE_MODE_RGMII)
-               macb_or_gem_writel(bp, USRIO, GEM_BIT(RGMII));
-       else if (bp->phy_interface == PHY_INTERFACE_MODE_RMII)
-#if defined(CONFIG_ARCH_AT91)
-               macb_or_gem_writel(bp, USRIO, (MACB_BIT(RMII) |
-                                              MACB_BIT(CLKEN)));
-#else
-               macb_or_gem_writel(bp, USRIO, 0);
-#endif
-       else
-#if defined(CONFIG_ARCH_AT91)
-               macb_or_gem_writel(bp, USRIO, MACB_BIT(CLKEN));
-#else
-               macb_or_gem_writel(bp, USRIO, MACB_BIT(MII));
-#endif
+       /* IP specific init */
+       err = init(pdev);
+       if (err)
+               goto err_out_free_netdev;
 
        err = register_netdev(dev);
        if (err) {
                dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
-               goto err_out_free_netdev;
+               goto err_out_unregister_netdev;
        }
 
        err = macb_mii_init(bp);
        if (err)
                goto err_out_unregister_netdev;
 
-       platform_set_drvdata(pdev, dev);
-
        netif_carrier_off(dev);
 
        netdev_info(dev, "Cadence %s rev 0x%08x at 0x%08lx irq %d (%pM)\n",
@@ -2430,16 +2818,15 @@ static int macb_probe(struct platform_device *pdev)
 
 err_out_unregister_netdev:
        unregister_netdev(dev);
+
 err_out_free_netdev:
        free_netdev(dev);
-err_out_disable_clocks:
-       if (!IS_ERR(tx_clk))
-               clk_disable_unprepare(tx_clk);
-err_out_disable_hclk:
+
+err_disable_clocks:
+       clk_disable_unprepare(tx_clk);
        clk_disable_unprepare(hclk);
-err_out_disable_pclk:
        clk_disable_unprepare(pclk);
-err_out:
+
        return err;
 }
 
@@ -2458,8 +2845,7 @@ static int macb_remove(struct platform_device *pdev)
                kfree(bp->mii_bus->irq);
                mdiobus_free(bp->mii_bus);
                unregister_netdev(dev);
-               if (!IS_ERR(bp->tx_clk))
-                       clk_disable_unprepare(bp->tx_clk);
+               clk_disable_unprepare(bp->tx_clk);
                clk_disable_unprepare(bp->hclk);
                clk_disable_unprepare(bp->pclk);
                free_netdev(dev);
@@ -2477,8 +2863,7 @@ static int __maybe_unused macb_suspend(struct device *dev)
        netif_carrier_off(netdev);
        netif_device_detach(netdev);
 
-       if (!IS_ERR(bp->tx_clk))
-               clk_disable_unprepare(bp->tx_clk);
+       clk_disable_unprepare(bp->tx_clk);
        clk_disable_unprepare(bp->hclk);
        clk_disable_unprepare(bp->pclk);
 
@@ -2493,8 +2878,7 @@ static int __maybe_unused macb_resume(struct device *dev)
 
        clk_prepare_enable(bp->pclk);
        clk_prepare_enable(bp->hclk);
-       if (!IS_ERR(bp->tx_clk))
-               clk_prepare_enable(bp->tx_clk);
+       clk_prepare_enable(bp->tx_clk);
 
        netif_device_attach(netdev);
 
index ff85619a97325fc0f1fa64a8896cbb8cc417d3cc..eb7d76f7bf6aaf983e97408ced9b359b54c8ddc3 100644 (file)
@@ -11,7 +11,7 @@
 #define _MACB_H
 
 #define MACB_GREGS_NBR 16
-#define MACB_GREGS_VERSION 1
+#define MACB_GREGS_VERSION 2
 #define MACB_MAX_QUEUES 8
 
 /* MACB register offsets */
 /* Bitfields in DMACFG. */
 #define GEM_FBLDO_OFFSET       0 /* fixed burst length for DMA */
 #define GEM_FBLDO_SIZE         5
-#define GEM_ENDIA_OFFSET       7 /* endian swap mode for packet data access */
-#define GEM_ENDIA_SIZE         1
+#define GEM_ENDIA_DESC_OFFSET  6 /* endian swap mode for management descriptor access */
+#define GEM_ENDIA_DESC_SIZE    1
+#define GEM_ENDIA_PKT_OFFSET   7 /* endian swap mode for packet data access */
+#define GEM_ENDIA_PKT_SIZE     1
 #define GEM_RXBMS_OFFSET       8 /* RX packet buffer memory size select */
 #define GEM_RXBMS_SIZE         2
 #define GEM_TXPBMS_OFFSET      10 /* TX packet buffer memory size select */
 
 /* Capability mask bits */
 #define MACB_CAPS_ISR_CLEAR_ON_WRITE           0x00000001
+#define MACB_CAPS_USRIO_HAS_CLKEN              0x00000002
+#define MACB_CAPS_USRIO_DEFAULT_IS_MII         0x00000004
 #define MACB_CAPS_FIFO_MODE                    0x10000000
 #define MACB_CAPS_GIGABIT_MODE_AVAILABLE       0x20000000
 #define MACB_CAPS_SG_DISABLED                  0x40000000
 
 /* Register access macros */
 #define macb_readl(port,reg)                           \
-       __raw_readl((port)->regs + MACB_##reg)
+       readl_relaxed((port)->regs + MACB_##reg)
 #define macb_writel(port,reg,value)                    \
-       __raw_writel((value), (port)->regs + MACB_##reg)
+       writel_relaxed((value), (port)->regs + MACB_##reg)
 #define gem_readl(port, reg)                           \
-       __raw_readl((port)->regs + GEM_##reg)
+       readl_relaxed((port)->regs + GEM_##reg)
 #define gem_writel(port, reg, value)                   \
-       __raw_writel((value), (port)->regs + GEM_##reg)
+       writel_relaxed((value), (port)->regs + GEM_##reg)
 #define queue_readl(queue, reg)                                \
-       __raw_readl((queue)->bp->regs + (queue)->reg)
+       readl_relaxed((queue)->bp->regs + (queue)->reg)
 #define queue_writel(queue, reg, value)                        \
-       __raw_writel((value), (queue)->bp->regs + (queue)->reg)
+       writel_relaxed((value), (queue)->bp->regs + (queue)->reg)
 
 /* Conditional GEM/MACB macros.  These perform the operation to the correct
  * register dependent on whether the device is a GEM or a MACB.  For registers
@@ -750,6 +754,9 @@ struct macb_or_gem_ops {
 struct macb_config {
        u32                     caps;
        unsigned int            dma_burst_length;
+       int     (*clk_init)(struct platform_device *pdev, struct clk **pclk,
+                           struct clk **hclk, struct clk **tx_clk);
+       int     (*init)(struct platform_device *pdev);
 };
 
 struct macb_queue {
@@ -780,6 +787,7 @@ struct macb {
        size_t                  rx_buffer_size;
 
        unsigned int            num_queues;
+       unsigned int            queue_mask;
        struct macb_queue       queues[MACB_MAX_QUEUES];
 
        spinlock_t              lock;
@@ -820,18 +828,14 @@ struct macb {
        u64                     ethtool_stats[GEM_STATS_LEN];
 };
 
-extern const struct ethtool_ops macb_ethtool_ops;
-
-int macb_mii_init(struct macb *bp);
-int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
-struct net_device_stats *macb_get_stats(struct net_device *dev);
-void macb_set_rx_mode(struct net_device *dev);
-void macb_set_hwaddr(struct macb *bp);
-void macb_get_hwaddr(struct macb *bp);
-
 static inline bool macb_is_gem(struct macb *bp)
 {
        return !!(bp->caps & MACB_CAPS_MACB_IS_GEM);
 }
 
+static inline bool macb_is_gem_hw(void __iomem *addr)
+{
+       return !!(MACB_BFEXT(IDNUM, readl_relaxed(addr + MACB_MID)) >= 0x2);
+}
+
 #endif /* _MACB_H */
index 47bfea24b9e1b1bce64f07d5eb0424fc2a9b7e0f..63efa0dc45ba61b11f8e1a245513485412e50027 100644 (file)
@@ -47,9 +47,9 @@
 #define XGMAC_REMOTE_WAKE      0x00000700      /* Remote Wake-Up Frm Filter */
 #define XGMAC_PMT              0x00000704      /* PMT Control and Status */
 #define XGMAC_MMC_CTRL         0x00000800      /* XGMAC MMC Control */
-#define XGMAC_MMC_INTR_RX      0x00000804      /* Recieve Interrupt */
+#define XGMAC_MMC_INTR_RX      0x00000804      /* Receive Interrupt */
 #define XGMAC_MMC_INTR_TX      0x00000808      /* Transmit Interrupt */
-#define XGMAC_MMC_INTR_MASK_RX 0x0000080c      /* Recieve Interrupt Mask */
+#define XGMAC_MMC_INTR_MASK_RX 0x0000080c      /* Receive Interrupt Mask */
 #define XGMAC_MMC_INTR_MASK_TX 0x00000810      /* Transmit Interrupt Mask */
 
 /* Hardware TX Statistics Counters */
 #define XGMAC_FLOW_CTRL_PT_MASK        0xffff0000      /* Pause Time Mask */
 #define XGMAC_FLOW_CTRL_PT_SHIFT       16
 #define XGMAC_FLOW_CTRL_DZQP   0x00000080      /* Disable Zero-Quanta Phase */
-#define XGMAC_FLOW_CTRL_PLT    0x00000020      /* Pause Low Threshhold */
+#define XGMAC_FLOW_CTRL_PLT    0x00000020      /* Pause Low Threshold */
 #define XGMAC_FLOW_CTRL_PLT_MASK 0x00000030    /* PLT MASK */
 #define XGMAC_FLOW_CTRL_UP     0x00000008      /* Unicast Pause Frame Detect */
 #define XGMAC_FLOW_CTRL_RFE    0x00000004      /* Rx Flow Control Enable */
 /* XGMAC Operation Mode Register */
 #define XGMAC_OMR_TSF          0x00200000      /* TX FIFO Store and Forward */
 #define XGMAC_OMR_FTF          0x00100000      /* Flush Transmit FIFO */
-#define XGMAC_OMR_TTC          0x00020000      /* Transmit Threshhold Ctrl */
+#define XGMAC_OMR_TTC          0x00020000      /* Transmit Threshold Ctrl */
 #define XGMAC_OMR_TTC_MASK     0x00030000
-#define XGMAC_OMR_RFD          0x00006000      /* FC Deactivation Threshhold */
-#define XGMAC_OMR_RFD_MASK     0x00007000      /* FC Deact Threshhold MASK */
-#define XGMAC_OMR_RFA          0x00000600      /* FC Activation Threshhold */
-#define XGMAC_OMR_RFA_MASK     0x00000E00      /* FC Act Threshhold MASK */
+#define XGMAC_OMR_RFD          0x00006000      /* FC Deactivation Threshold */
+#define XGMAC_OMR_RFD_MASK     0x00007000      /* FC Deact Threshold MASK */
+#define XGMAC_OMR_RFA          0x00000600      /* FC Activation Threshold */
+#define XGMAC_OMR_RFA_MASK     0x00000E00      /* FC Act Threshold MASK */
 #define XGMAC_OMR_EFC          0x00000100      /* Enable Hardware FC */
 #define XGMAC_OMR_FEF          0x00000080      /* Forward Error Frames */
 #define XGMAC_OMR_DT           0x00000040      /* Drop TCP/IP csum Errors */
 #define XGMAC_OMR_RSF          0x00000020      /* RX FIFO Store and Forward */
-#define XGMAC_OMR_RTC_256      0x00000018      /* RX Threshhold Ctrl */
-#define XGMAC_OMR_RTC_MASK     0x00000018      /* RX Threshhold Ctrl MASK */
+#define XGMAC_OMR_RTC_256      0x00000018      /* RX Threshold Ctrl */
+#define XGMAC_OMR_RTC_MASK     0x00000018      /* RX Threshold Ctrl MASK */
 
 /* XGMAC HW Features Register */
 #define DMA_HW_FEAT_TXCOESEL   0x00010000      /* TX Checksum offload */
index ac6473f75eb9c212682b0ceea755f601f62772e2..7daa088a9bb7adeec8d2d0afa9431b72d0a09575 100644 (file)
@@ -97,6 +97,17 @@ config CHELSIO_T4_DCB
 
          If unsure, say N.
 
+config CHELSIO_T4_FCOE
+       bool "Fibre Channel over Ethernet (FCoE) Support for Chelsio T5 cards"
+       default n
+       depends on CHELSIO_T4 && CHELSIO_T4_DCB && FCOE
+       ---help---
+         Enable FCoE offload features.
+         Say Y here if you want to enable Fibre Channel over Ethernet (FCoE) support
+         in the driver.
+
+         If unsure, say N.
+
 config CHELSIO_T4VF
        tristate "Chelsio Communications T4/T5 Virtual Function Ethernet support"
        depends on PCI
index 186566bfdbc8c579ada4136f0d71a9242e9b7629..f5f1b0b51ebd225c4d82967391e5dedda2a7848f 100644 (file)
@@ -354,7 +354,7 @@ static void set_msglevel(struct net_device *dev, u32 val)
        adapter->msg_enable = val;
 }
 
-static char stats_strings[][ETH_GSTRING_LEN] = {
+static const char stats_strings[][ETH_GSTRING_LEN] = {
        "TxOctetsOK",
        "TxOctetsBad",
        "TxUnicastFramesOK",
index db76f70404551c84b924f560957ca740975eebf2..b96e4bfcac41a8086d5fbb45bf508bd5175072ee 100644 (file)
@@ -1537,7 +1537,7 @@ static void set_msglevel(struct net_device *dev, u32 val)
        adapter->msg_enable = val;
 }
 
-static char stats_strings[][ETH_GSTRING_LEN] = {
+static const char stats_strings[][ETH_GSTRING_LEN] = {
        "TxOctetsOK         ",
        "TxFramesOK         ",
        "TxMulticastFramesOK",
index 184a8d545ac4230e07788fbb831be2dcdfa57f83..a22768c94200efe915016c85d18da2eaf806b5ff 100644 (file)
@@ -840,7 +840,7 @@ static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
  *     Read the specified number of 32-bit words from the serial flash.
  *     If @byte_oriented is set the read data is stored as a byte array
  *     (i.e., big-endian), otherwise as 32-bit words in the platform's
- *     natural endianess.
+ *     natural endianness.
  */
 static int t3_read_flash(struct adapter *adapter, unsigned int addr,
                         unsigned int nwords, u32 *data, int byte_oriented)
index ae50cd72358cb4bda0c78784f6d0cb2586bac7c2..07d9b68a4da20146f14cbc1aa418fed1dcbdabb7 100644 (file)
@@ -6,4 +6,5 @@ obj-$(CONFIG_CHELSIO_T4) += cxgb4.o
 
 cxgb4-objs := cxgb4_main.o l2t.o t4_hw.o sge.o clip_tbl.o
 cxgb4-$(CONFIG_CHELSIO_T4_DCB) +=  cxgb4_dcb.o
+cxgb4-$(CONFIG_CHELSIO_T4_FCOE) +=  cxgb4_fcoe.o
 cxgb4-$(CONFIG_DEBUG_FS) += cxgb4_debugfs.o
index c6ff4890d171a1509c7b3ca2aa21e5b0edd6039f..6c80eb2e61f4b5e271f968787e5a2958c0e20e53 100644 (file)
@@ -369,7 +369,7 @@ enum {
        MAX_OFLD_QSETS = 16,          /* # of offload Tx/Rx queue sets */
        MAX_CTRL_QUEUES = NCHAN,      /* # of control Tx queues */
        MAX_RDMA_QUEUES = NCHAN,      /* # of streaming RDMA Rx queues */
-       MAX_RDMA_CIQS = NCHAN,        /* # of  RDMA concentrator IQs */
+       MAX_RDMA_CIQS = 32,        /* # of  RDMA concentrator IQs */
        MAX_ISCSI_QUEUES = NCHAN,     /* # of streaming iSCSI Rx queues */
 };
 
@@ -385,6 +385,10 @@ struct sge_rspq;
 
 #include "cxgb4_dcb.h"
 
+#ifdef CONFIG_CHELSIO_T4_FCOE
+#include "cxgb4_fcoe.h"
+#endif /* CONFIG_CHELSIO_T4_FCOE */
+
 struct port_info {
        struct adapter *adapter;
        u16    viid;
@@ -404,6 +408,9 @@ struct port_info {
 #ifdef CONFIG_CHELSIO_T4_DCB
        struct port_dcb_info dcb;     /* Data Center Bridging support */
 #endif
+#ifdef CONFIG_CHELSIO_T4_FCOE
+       struct cxgb_fcoe fcoe;
+#endif /* CONFIG_CHELSIO_T4_FCOE */
 };
 
 struct dentry;
@@ -597,8 +604,8 @@ struct sge {
        u16 rdmaqs;                 /* # of available RDMA Rx queues */
        u16 rdmaciqs;               /* # of available RDMA concentrator IQs */
        u16 ofld_rxq[MAX_OFLD_QSETS];
-       u16 rdma_rxq[NCHAN];
-       u16 rdma_ciq[NCHAN];
+       u16 rdma_rxq[MAX_RDMA_QUEUES];
+       u16 rdma_ciq[MAX_RDMA_CIQS];
        u16 timer_val[SGE_NTIMERS];
        u8 counter_val[SGE_NCOUNTERS];
        u32 fl_pg_order;            /* large page allocation size */
index dcb0479452907abd43732aa9332f927f7f0c3ef3..f0285bcbe5981e1202071d536080d774566cb20e 100644 (file)
@@ -1775,6 +1775,8 @@ do { \
                int n = min(4, adap->sge.rdmaqs - 4 * rdma_idx);
 
                S("QType:", "RDMA-CPL");
+               S("Interface:",
+                 rx[i].rspq.netdev ? rx[i].rspq.netdev->name : "N/A");
                R("RspQ ID:", rspq.abs_id);
                R("RspQ size:", rspq.size);
                R("RspQE size:", rspq.iqe_len);
@@ -1794,6 +1796,8 @@ do { \
                int n = min(4, adap->sge.rdmaciqs - 4 * ciq_idx);
 
                S("QType:", "RDMA-CIQ");
+               S("Interface:",
+                 rx[i].rspq.netdev ? rx[i].rspq.netdev->name : "N/A");
                R("RspQ ID:", rspq.abs_id);
                R("RspQ size:", rspq.size);
                R("RspQE size:", rspq.iqe_len);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_fcoe.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_fcoe.c
new file mode 100644 (file)
index 0000000..6c8a62e
--- /dev/null
@@ -0,0 +1,122 @@
+/*
+ * This file is part of the Chelsio T4 Ethernet driver for Linux.
+ *
+ * Copyright (c) 2015 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifdef CONFIG_CHELSIO_T4_FCOE
+
+#include <scsi/fc/fc_fs.h>
+#include <scsi/libfcoe.h>
+#include "cxgb4.h"
+
+bool cxgb_fcoe_sof_eof_supported(struct adapter *adap, struct sk_buff *skb)
+{
+       struct fcoe_hdr *fcoeh = (struct fcoe_hdr *)skb_network_header(skb);
+       u8 sof = fcoeh->fcoe_sof;
+       u8 eof = 0;
+
+       if ((sof != FC_SOF_I3) && (sof != FC_SOF_N3)) {
+               dev_err(adap->pdev_dev, "Unsupported SOF 0x%x\n", sof);
+               return false;
+       }
+
+       skb_copy_bits(skb, skb->len - 4, &eof, 1);
+
+       if ((eof != FC_EOF_N) && (eof != FC_EOF_T)) {
+               dev_err(adap->pdev_dev, "Unsupported EOF 0x%x\n", eof);
+               return false;
+       }
+
+       return true;
+}
+
+/**
+ * cxgb_fcoe_enable - enable FCoE offload features
+ * @netdev: net device
+ *
+ * Returns 0 on success or -EINVAL on failure.
+ */
+int cxgb_fcoe_enable(struct net_device *netdev)
+{
+       struct port_info *pi = netdev_priv(netdev);
+       struct adapter *adap = pi->adapter;
+       struct cxgb_fcoe *fcoe = &pi->fcoe;
+
+       if (is_t4(adap->params.chip))
+               return -EINVAL;
+
+       if (!(adap->flags & FULL_INIT_DONE))
+               return -EINVAL;
+
+       dev_info(adap->pdev_dev, "Enabling FCoE offload features\n");
+
+       netdev->features |= NETIF_F_FCOE_CRC;
+       netdev->vlan_features |= NETIF_F_FCOE_CRC;
+       netdev->features |= NETIF_F_FCOE_MTU;
+       netdev->vlan_features |= NETIF_F_FCOE_MTU;
+
+       netdev_features_change(netdev);
+
+       fcoe->flags |= CXGB_FCOE_ENABLED;
+
+       return 0;
+}
+
+/**
+ * cxgb_fcoe_disable - disable FCoE offload
+ * @netdev: net device
+ *
+ * Returns 0 on success or -EINVAL on failure.
+ */
+int cxgb_fcoe_disable(struct net_device *netdev)
+{
+       struct port_info *pi = netdev_priv(netdev);
+       struct adapter *adap = pi->adapter;
+       struct cxgb_fcoe *fcoe = &pi->fcoe;
+
+       if (!(fcoe->flags & CXGB_FCOE_ENABLED))
+               return -EINVAL;
+
+       dev_info(adap->pdev_dev, "Disabling FCoE offload features\n");
+
+       fcoe->flags &= ~CXGB_FCOE_ENABLED;
+
+       netdev->features &= ~NETIF_F_FCOE_CRC;
+       netdev->vlan_features &= ~NETIF_F_FCOE_CRC;
+       netdev->features &= ~NETIF_F_FCOE_MTU;
+       netdev->vlan_features &= ~NETIF_F_FCOE_MTU;
+
+       netdev_features_change(netdev);
+
+       return 0;
+}
+#endif /* CONFIG_CHELSIO_T4_FCOE */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_fcoe.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_fcoe.h
new file mode 100644 (file)
index 0000000..bf9258a
--- /dev/null
@@ -0,0 +1,57 @@
+/*
+ * This file is part of the Chelsio T4 Ethernet driver for Linux.
+ *
+ * Copyright (c) 2015 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __CXGB4_FCOE_H__
+#define __CXGB4_FCOE_H__
+
+#ifdef CONFIG_CHELSIO_T4_FCOE
+
+#define CXGB_FCOE_TXPKT_CSUM_START     28
+#define CXGB_FCOE_TXPKT_CSUM_END       8
+
+/* fcoe flags */
+enum {
+       CXGB_FCOE_ENABLED     = (1 << 0),
+};
+
+struct cxgb_fcoe {
+       u8      flags;
+};
+
+int cxgb_fcoe_enable(struct net_device *);
+int cxgb_fcoe_disable(struct net_device *);
+bool cxgb_fcoe_sof_eof_supported(struct adapter *, struct sk_buff *);
+
+#endif /* CONFIG_CHELSIO_T4_FCOE */
+#endif /* __CXGB4_FCOE_H__ */
index d92995138f7ef9253c3b2ac37efba2c491e2f528..58c537f16763e6c9fc7ea3faf8d5c3fa0f3af35a 100644 (file)
@@ -124,7 +124,7 @@ struct filter_entry {
 /* Macros needed to support the PCI Device ID Table ...
  */
 #define CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN \
-       static struct pci_device_id cxgb4_pci_tbl[] = {
+       static const struct pci_device_id cxgb4_pci_tbl[] = {
 #define CH_PCI_DEVICE_ID_FUNCTION 0x4
 
 /* Include PCI Device IDs for both PF4 and PF0-3 so our PCI probe() routine is
@@ -972,6 +972,28 @@ static void enable_rx(struct adapter *adap)
        }
 }
 
+static int alloc_ofld_rxqs(struct adapter *adap, struct sge_ofld_rxq *q,
+                          unsigned int nq, unsigned int per_chan, int msi_idx,
+                          u16 *ids)
+{
+       int i, err;
+
+       for (i = 0; i < nq; i++, q++) {
+               if (msi_idx > 0)
+                       msi_idx++;
+               err = t4_sge_alloc_rxq(adap, &q->rspq, false,
+                                      adap->port[i / per_chan],
+                                      msi_idx, q->fl.size ? &q->fl : NULL,
+                                      uldrx_handler);
+               if (err)
+                       return err;
+               memset(&q->stats, 0, sizeof(q->stats));
+               if (ids)
+                       ids[i] = q->rspq.abs_id;
+       }
+       return 0;
+}
+
 /**
  *     setup_sge_queues - configure SGE Tx/Rx/response queues
  *     @adap: the adapter
@@ -1046,51 +1068,27 @@ freeout:        t4_free_sge_resources(adap);
 
        j = s->ofldqsets / adap->params.nports; /* ofld queues per channel */
        for_each_ofldrxq(s, i) {
-               struct sge_ofld_rxq *q = &s->ofldrxq[i];
-               struct net_device *dev = adap->port[i / j];
-
-               if (msi_idx > 0)
-                       msi_idx++;
-               err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev, msi_idx,
-                                      q->fl.size ? &q->fl : NULL,
-                                      uldrx_handler);
-               if (err)
-                       goto freeout;
-               memset(&q->stats, 0, sizeof(q->stats));
-               s->ofld_rxq[i] = q->rspq.abs_id;
-               err = t4_sge_alloc_ofld_txq(adap, &s->ofldtxq[i], dev,
+               err = t4_sge_alloc_ofld_txq(adap, &s->ofldtxq[i],
+                                           adap->port[i / j],
                                            s->fw_evtq.cntxt_id);
                if (err)
                        goto freeout;
        }
 
-       for_each_rdmarxq(s, i) {
-               struct sge_ofld_rxq *q = &s->rdmarxq[i];
+#define ALLOC_OFLD_RXQS(firstq, nq, per_chan, ids) do { \
+       err = alloc_ofld_rxqs(adap, firstq, nq, per_chan, msi_idx, ids); \
+       if (err) \
+               goto freeout; \
+       if (msi_idx > 0) \
+               msi_idx += nq; \
+} while (0)
 
-               if (msi_idx > 0)
-                       msi_idx++;
-               err = t4_sge_alloc_rxq(adap, &q->rspq, false, adap->port[i],
-                                      msi_idx, q->fl.size ? &q->fl : NULL,
-                                      uldrx_handler);
-               if (err)
-                       goto freeout;
-               memset(&q->stats, 0, sizeof(q->stats));
-               s->rdma_rxq[i] = q->rspq.abs_id;
-       }
+       ALLOC_OFLD_RXQS(s->ofldrxq, s->ofldqsets, j, s->ofld_rxq);
+       ALLOC_OFLD_RXQS(s->rdmarxq, s->rdmaqs, 1, s->rdma_rxq);
+       j = s->rdmaciqs / adap->params.nports; /* rdmaq queues per channel */
+       ALLOC_OFLD_RXQS(s->rdmaciq, s->rdmaciqs, j, s->rdma_ciq);
 
-       for_each_rdmaciq(s, i) {
-               struct sge_ofld_rxq *q = &s->rdmaciq[i];
-
-               if (msi_idx > 0)
-                       msi_idx++;
-               err = t4_sge_alloc_rxq(adap, &q->rspq, false, adap->port[i],
-                                      msi_idx, q->fl.size ? &q->fl : NULL,
-                                      uldrx_handler);
-               if (err)
-                       goto freeout;
-               memset(&q->stats, 0, sizeof(q->stats));
-               s->rdma_ciq[i] = q->rspq.abs_id;
-       }
+#undef ALLOC_OFLD_RXQS
 
        for_each_port(adap, i) {
                /*
@@ -1301,6 +1299,10 @@ static u16 cxgb_select_queue(struct net_device *dev, struct sk_buff *skb,
                        txq = 0;
                } else {
                        txq = (vlan_tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
+#ifdef CONFIG_CHELSIO_T4_FCOE
+                       if (skb->protocol == htons(ETH_P_FCOE))
+                               txq = skb->priority & 0x7;
+#endif /* CONFIG_CHELSIO_T4_FCOE */
                }
                return txq;
        }
@@ -4601,6 +4603,10 @@ static const struct net_device_ops cxgb4_netdev_ops = {
 #ifdef CONFIG_NET_POLL_CONTROLLER
        .ndo_poll_controller  = cxgb_netpoll,
 #endif
+#ifdef CONFIG_CHELSIO_T4_FCOE
+       .ndo_fcoe_enable      = cxgb_fcoe_enable,
+       .ndo_fcoe_disable     = cxgb_fcoe_disable,
+#endif /* CONFIG_CHELSIO_T4_FCOE */
 #ifdef CONFIG_NET_RX_BUSY_POLL
        .ndo_busy_poll        = cxgb_busy_poll,
 #endif
@@ -5416,7 +5422,7 @@ static int adap_init0(struct adapter *adap)
                adap->tids.stid_base = val[1];
                adap->tids.nstids = val[2] - val[1] + 1;
                /*
-                * Setup server filter region. Divide the availble filter
+                * Setup server filter region. Divide the available filter
                 * region into two parts. Regular filters get 1/3rd and server
                 * filters get 2/3rd part. This is only enabled if workarond
                 * path is enabled.
@@ -5758,7 +5764,16 @@ static void cfg_queues(struct adapter *adap)
                        s->ofldqsets = adap->params.nports;
                /* For RDMA one Rx queue per channel suffices */
                s->rdmaqs = adap->params.nports;
-               s->rdmaciqs = adap->params.nports;
+               /* Try and allow at least 1 CIQ per cpu rounding down
+                * to the number of ports, with a minimum of 1 per port.
+                * A 2 port card in a 6 cpu system: 6 CIQs, 3 / port.
+                * A 4 port card in a 6 cpu system: 4 CIQs, 1 / port.
+                * A 4 port card in a 2 cpu system: 4 CIQs, 1 / port.
+                */
+               s->rdmaciqs = min_t(int, MAX_RDMA_CIQS, num_online_cpus());
+               s->rdmaciqs = (s->rdmaciqs / adap->params.nports) *
+                               adap->params.nports;
+               s->rdmaciqs = max_t(int, s->rdmaciqs, adap->params.nports);
        }
 
        for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) {
@@ -5844,12 +5859,17 @@ static void reduce_ethqs(struct adapter *adap, int n)
 static int enable_msix(struct adapter *adap)
 {
        int ofld_need = 0;
-       int i, want, need;
+       int i, want, need, allocated;
        struct sge *s = &adap->sge;
        unsigned int nchan = adap->params.nports;
-       struct msix_entry entries[MAX_INGQ + 1];
+       struct msix_entry *entries;
 
-       for (i = 0; i < ARRAY_SIZE(entries); ++i)
+       entries = kmalloc(sizeof(*entries) * (MAX_INGQ + 1),
+                         GFP_KERNEL);
+       if (!entries)
+               return -ENOMEM;
+
+       for (i = 0; i < MAX_INGQ + 1; ++i)
                entries[i].entry = i;
 
        want = s->max_ethqsets + EXTRA_VECS;
@@ -5866,29 +5886,39 @@ static int enable_msix(struct adapter *adap)
 #else
        need = adap->params.nports + EXTRA_VECS + ofld_need;
 #endif
-       want = pci_enable_msix_range(adap->pdev, entries, need, want);
-       if (want < 0)
-               return want;
+       allocated = pci_enable_msix_range(adap->pdev, entries, need, want);
+       if (allocated < 0) {
+               dev_info(adap->pdev_dev, "not enough MSI-X vectors left,"
+                        " not using MSI-X\n");
+               kfree(entries);
+               return allocated;
+       }
 
-       /*
-        * Distribute available vectors to the various queue groups.
+       /* Distribute available vectors to the various queue groups.
         * Every group gets its minimum requirement and NIC gets top
         * priority for leftovers.
         */
-       i = want - EXTRA_VECS - ofld_need;
+       i = allocated - EXTRA_VECS - ofld_need;
        if (i < s->max_ethqsets) {
                s->max_ethqsets = i;
                if (i < s->ethqsets)
                        reduce_ethqs(adap, i);
        }
        if (is_offload(adap)) {
-               i = want - EXTRA_VECS - s->max_ethqsets;
-               i -= ofld_need - nchan;
+               if (allocated < want) {
+                       s->rdmaqs = nchan;
+                       s->rdmaciqs = nchan;
+               }
+
+               /* leftovers go to OFLD */
+               i = allocated - EXTRA_VECS - s->max_ethqsets -
+                   s->rdmaqs - s->rdmaciqs;
                s->ofldqsets = (i / nchan) * nchan;  /* round down */
        }
-       for (i = 0; i < want; ++i)
+       for (i = 0; i < allocated; ++i)
                adap->msix_info[i].vec = entries[i].vector;
 
+       kfree(entries);
        return 0;
 }
 
index b688b32c21fe530aa0cd48b61323bca3ed269b53..c438f3895c40b2b9c96f5152de4bd9fcc950a76f 100644 (file)
@@ -46,6 +46,9 @@
 #ifdef CONFIG_NET_RX_BUSY_POLL
 #include <net/busy_poll.h>
 #endif /* CONFIG_NET_RX_BUSY_POLL */
+#ifdef CONFIG_CHELSIO_T4_FCOE
+#include <scsi/fc/fc_fcoe.h>
+#endif /* CONFIG_CHELSIO_T4_FCOE */
 #include "cxgb4.h"
 #include "t4_regs.h"
 #include "t4_values.h"
@@ -1044,6 +1047,38 @@ static inline void txq_advance(struct sge_txq *q, unsigned int n)
                q->pidx -= q->size;
 }
 
+#ifdef CONFIG_CHELSIO_T4_FCOE
+static inline int
+cxgb_fcoe_offload(struct sk_buff *skb, struct adapter *adap,
+                 const struct port_info *pi, u64 *cntrl)
+{
+       const struct cxgb_fcoe *fcoe = &pi->fcoe;
+
+       if (!(fcoe->flags & CXGB_FCOE_ENABLED))
+               return 0;
+
+       if (skb->protocol != htons(ETH_P_FCOE))
+               return 0;
+
+       skb_reset_mac_header(skb);
+       skb->mac_len = sizeof(struct ethhdr);
+
+       skb_set_network_header(skb, skb->mac_len);
+       skb_set_transport_header(skb, skb->mac_len + sizeof(struct fcoe_hdr));
+
+       if (!cxgb_fcoe_sof_eof_supported(adap, skb))
+               return -ENOTSUPP;
+
+       /* FC CRC offload */
+       *cntrl = TXPKT_CSUM_TYPE(TX_CSUM_FCOE) |
+                    TXPKT_L4CSUM_DIS | TXPKT_IPCSUM_DIS |
+                    TXPKT_CSUM_START(CXGB_FCOE_TXPKT_CSUM_START) |
+                    TXPKT_CSUM_END(CXGB_FCOE_TXPKT_CSUM_END) |
+                    TXPKT_CSUM_LOC(CXGB_FCOE_TXPKT_CSUM_END);
+       return 0;
+}
+#endif /* CONFIG_CHELSIO_T4_FCOE */
+
 /**
  *     t4_eth_xmit - add a packet to an Ethernet Tx queue
  *     @skb: the packet
@@ -1066,6 +1101,9 @@ netdev_tx_t t4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
        const struct skb_shared_info *ssi;
        dma_addr_t addr[MAX_SKB_FRAGS + 1];
        bool immediate = false;
+#ifdef CONFIG_CHELSIO_T4_FCOE
+       int err;
+#endif /* CONFIG_CHELSIO_T4_FCOE */
 
        /*
         * The chip min packet length is 10 octets but play safe and reject
@@ -1082,6 +1120,13 @@ out_free:        dev_kfree_skb_any(skb);
        q = &adap->sge.ethtxq[qidx + pi->first_qset];
 
        reclaim_completed_tx(adap, &q->q, true);
+       cntrl = TXPKT_L4CSUM_DIS | TXPKT_IPCSUM_DIS;
+
+#ifdef CONFIG_CHELSIO_T4_FCOE
+       err = cxgb_fcoe_offload(skb, adap, pi, &cntrl);
+       if (unlikely(err == -ENOTSUPP))
+               goto out_free;
+#endif /* CONFIG_CHELSIO_T4_FCOE */
 
        flits = calc_tx_flits(skb);
        ndesc = flits_to_desc(flits);
@@ -1153,13 +1198,17 @@ out_free:       dev_kfree_skb_any(skb);
                if (skb->ip_summed == CHECKSUM_PARTIAL) {
                        cntrl = hwcsum(skb) | TXPKT_IPCSUM_DIS;
                        q->tx_cso++;
-               } else
-                       cntrl = TXPKT_L4CSUM_DIS | TXPKT_IPCSUM_DIS;
+               }
        }
 
        if (skb_vlan_tag_present(skb)) {
                q->vlan_ins++;
                cntrl |= TXPKT_VLAN_VLD | TXPKT_VLAN(skb_vlan_tag_get(skb));
+#ifdef CONFIG_CHELSIO_T4_FCOE
+               if (skb->protocol == htons(ETH_P_FCOE))
+                       cntrl |= TXPKT_VLAN(
+                                ((skb->priority & 0x7) << VLAN_PRIO_SHIFT));
+#endif /* CONFIG_CHELSIO_T4_FCOE */
        }
 
        cpl->ctrl0 = htonl(TXPKT_OPCODE(CPL_TX_PKT_XT) |
@@ -1759,6 +1808,9 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
        struct sge *s = &q->adap->sge;
        int cpl_trace_pkt = is_t4(q->adap->params.chip) ?
                            CPL_TRACE_PKT : CPL_TRACE_PKT_T5;
+#ifdef CONFIG_CHELSIO_T4_FCOE
+       struct port_info *pi;
+#endif
 
        if (unlikely(*(u8 *)rsp == cpl_trace_pkt))
                return handle_trace_pkt(q->adap, si);
@@ -1799,8 +1851,24 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
                        skb->ip_summed = CHECKSUM_COMPLETE;
                        rxq->stats.rx_cso++;
                }
-       } else
+       } else {
                skb_checksum_none_assert(skb);
+#ifdef CONFIG_CHELSIO_T4_FCOE
+#define CPL_RX_PKT_FLAGS (RXF_PSH_F | RXF_SYN_F | RXF_UDP_F | \
+                         RXF_TCP_F | RXF_IP_F | RXF_IP6_F | RXF_LRO_F)
+
+               pi = netdev_priv(skb->dev);
+               if (!(pkt->l2info & cpu_to_be32(CPL_RX_PKT_FLAGS))) {
+                       if ((pkt->l2info & cpu_to_be32(RXF_FCOE_F)) &&
+                           (pi->fcoe.flags & CXGB_FCOE_ENABLED)) {
+                               if (!(pkt->err_vec & cpu_to_be16(RXERR_CSUM_F)))
+                                       skb->ip_summed = CHECKSUM_UNNECESSARY;
+                       }
+               }
+
+#undef CPL_RX_PKT_FLAGS
+#endif /* CONFIG_CHELSIO_T4_FCOE */
+       }
 
        if (unlikely(pkt->vlan_ex)) {
                __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(pkt->vlan));
index ee394dc68303851153a3598fe4455cec972597d9..5ed8db977432710198c2bc3c01d94d23ff868095 100644 (file)
@@ -867,7 +867,7 @@ static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
  *     Read the specified number of 32-bit words from the serial flash.
  *     If @byte_oriented is set the read data is stored as a byte array
  *     (i.e., big-endian), otherwise as 32-bit words in the platform's
- *     natural endianess.
+ *     natural endianness.
  */
 int t4_read_flash(struct adapter *adapter, unsigned int addr,
                  unsigned int nwords, u32 *data, int byte_oriented)
@@ -3558,7 +3558,7 @@ int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
         * For the single-MTU buffers in unpacked mode we need to include
         * space for the SGE Control Packet Shift, 14 byte Ethernet header,
         * possible 4 byte VLAN tag, all rounded up to the next Ingress Packet
-        * Padding boundry.  All of these are accommodated in the Factory
+        * Padding boundary.  All of these are accommodated in the Factory
         * Default Firmware Configuration File but we need to adjust it for
         * this host's cache line size.
         */
@@ -4582,7 +4582,7 @@ int t4_init_tp_params(struct adapter *adap)
                                                               PROTOCOL_F);
 
        /* If TP_INGRESS_CONFIG.VNID == 0, then TP_VLAN_PRI_MAP.VNIC_ID
-        * represents the presense of an Outer VLAN instead of a VNIC ID.
+        * represents the presence of an Outer VLAN instead of a VNIC ID.
         */
        if ((adap->params.tp.ingress_config & VNIC_F) == 0)
                adap->params.tp.vnic_shift = -1;
index 0fb975e258b35b08af3f43a8dbadbd1752d7dea5..30a2f56e99c297c735ec3af54de0fbf1eb0a2ed0 100644 (file)
@@ -794,6 +794,14 @@ struct cpl_rx_pkt {
        __be16 err_vec;
 };
 
+#define RXF_PSH_S    20
+#define RXF_PSH_V(x) ((x) << RXF_PSH_S)
+#define RXF_PSH_F    RXF_PSH_V(1U)
+
+#define RXF_SYN_S    21
+#define RXF_SYN_V(x) ((x) << RXF_SYN_S)
+#define RXF_SYN_F    RXF_SYN_V(1U)
+
 #define RXF_UDP_S    22
 #define RXF_UDP_V(x) ((x) << RXF_UDP_S)
 #define RXF_UDP_F    RXF_UDP_V(1U)
@@ -810,6 +818,18 @@ struct cpl_rx_pkt {
 #define RXF_IP6_V(x) ((x) << RXF_IP6_S)
 #define RXF_IP6_F    RXF_IP6_V(1U)
 
+#define RXF_SYN_COOKIE_S    26
+#define RXF_SYN_COOKIE_V(x) ((x) << RXF_SYN_COOKIE_S)
+#define RXF_SYN_COOKIE_F    RXF_SYN_COOKIE_V(1U)
+
+#define RXF_FCOE_S    26
+#define RXF_FCOE_V(x) ((x) << RXF_FCOE_S)
+#define RXF_FCOE_F    RXF_FCOE_V(1U)
+
+#define RXF_LRO_S    27
+#define RXF_LRO_V(x) ((x) << RXF_LRO_S)
+#define RXF_LRO_F    RXF_LRO_V(1U)
+
 /* rx_pkt.l2info fields */
 #define RX_ETHHDR_LEN_S    0
 #define RX_ETHHDR_LEN_M    0x1F
@@ -846,6 +866,11 @@ struct cpl_rx_pkt {
 #define RX_IPHDR_LEN_V(x) ((x) << RX_IPHDR_LEN_S)
 #define RX_IPHDR_LEN_G(x) (((x) >> RX_IPHDR_LEN_S) & RX_IPHDR_LEN_M)
 
+/* rx_pkt.err_vec fields */
+#define RXERR_CSUM_S    13
+#define RXERR_CSUM_V(x) ((x) << RXERR_CSUM_S)
+#define RXERR_CSUM_F    RXERR_CSUM_V(1U)
+
 struct cpl_trace_pkt {
        u8 opcode;
        u8 intf;
index ddfb5b846045d0e156c4287b2edb50c6d250b40f..1a9a6f334d2d798e45ddaa88438b852a74c7a8d6 100644 (file)
@@ -60,8 +60,6 @@
  *   -- Used to finish the definition of the PCI ID Table.  Note that we
  *   -- will be adding a trailing semi-colon (";") here.
  */
-#ifdef CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN
-
 #ifndef CH_PCI_DEVICE_ID_FUNCTION
 #error CH_PCI_DEVICE_ID_FUNCTION not defined!
 #endif
@@ -154,8 +152,7 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN
        CH_PCI_ID_TABLE_FENTRY(0x5087), /* Custom T580-CR */
        CH_PCI_ID_TABLE_FENTRY(0x5088), /* Custom T570-CR */
        CH_PCI_ID_TABLE_FENTRY(0x5089), /* Custom T520-CR */
+       CH_PCI_ID_TABLE_FENTRY(0x5090), /* Custom T540-CR */
 CH_PCI_DEVICE_ID_TABLE_DEFINE_END;
 
-#endif /* CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN */
-
 #endif /* __T4_PCI_ID_TBL_H__ */
index a4a19e0ec7f5d76590f0e73641d5e03afbcde43a..03fbfd1fb3dff35f5cef20f84a574df09e285116 100644 (file)
@@ -36,7 +36,7 @@
 #define _T4FW_INTERFACE_H_
 
 enum fw_retval {
-       FW_SUCCESS              = 0,    /* completed sucessfully */
+       FW_SUCCESS              = 0,    /* completed successfully */
        FW_EPERM                = 1,    /* operation not permitted */
        FW_ENOENT               = 2,    /* no such file or directory */
        FW_EIO                  = 5,    /* input/output error; hw bad */
index 122e2964e63b757f9b73e35781fca71f2ee2ef56..1d893b0b7ddfde0b4db9083176c5b636285f023a 100644 (file)
@@ -3034,7 +3034,7 @@ static void cxgb4vf_pci_shutdown(struct pci_dev *pdev)
 /* Macros needed to support the PCI Device ID Table ...
  */
 #define CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN \
-       static struct pci_device_id cxgb4vf_pci_tbl[] = {
+       static const struct pci_device_id cxgb4vf_pci_tbl[] = {
 #define CH_PCI_DEVICE_ID_FUNCTION      0x8
 
 #define CH_PCI_ID_TABLE_ENTRY(devid) \
index e0d711071afb7d6d80763666141477fb3dc4c479..7715982230e511285eea81432086ef31330c9b81 100644 (file)
@@ -875,7 +875,7 @@ static inline unsigned int calc_tx_flits(const struct sk_buff *skb)
         * Write Header (incorporated as part of the cpl_tx_pkt_lso and
         * cpl_tx_pkt structures), followed by either a TX Packet Write CPL
         * message or, if we're doing a Large Send Offload, an LSO CPL message
-        * with an embeded TX Packet Write CPL message.
+        * with an embedded TX Packet Write CPL message.
         */
        flits = sgl_len(skb_shinfo(skb)->nr_frags + 1);
        if (skb_shinfo(skb)->gso_size)
index 280b4a21584934f56fbcc24399dfd0cf5feb694e..966ee900ed00bdad79d24fdd6653665230272e87 100644 (file)
@@ -339,7 +339,7 @@ int t4vf_port_init(struct adapter *adapter, int pidx)
  *      @adapter: the adapter
  *
  *     Issues a reset command to FW.  For a Physical Function this would
- *     result in the Firmware reseting all of its state.  For a Virtual
+ *     result in the Firmware resetting all of its state.  For a Virtual
  *     Function this just resets the state associated with the VF.
  */
 int t4vf_fw_reset(struct adapter *adapter)
index d1c025fd972607eaedbe51c86f157aef54456410..60383040d6c663ae8293234ca8de4aef5527d84e 100644 (file)
@@ -1578,7 +1578,7 @@ out1:
 
 #ifndef CONFIG_CS89x0_PLATFORM
 /*
- * This function converts the I/O port addres used by the cs89x0_probe() and
+ * This function converts the I/O port address used by the cs89x0_probe() and
  * init_module() functions to the I/O memory address used by the
  * cs89x0_probe1() function.
  */
index a5179bfcdc2c1b6124a92f33f9c5f57363751a02..204bd182473bceaaabaa5b1eba5ed618de751808 100644 (file)
@@ -893,7 +893,7 @@ static int enic_set_vf_port(struct net_device *netdev, int vf,
                } else {
                        memset(pp, 0, sizeof(*pp));
                        if (vf == PORT_SELF_VF)
-                               memset(netdev->dev_addr, 0, ETH_ALEN);
+                               eth_zero_addr(netdev->dev_addr);
                }
        } else {
                /* Set flag to indicate that the port assoc/disassoc
@@ -903,14 +903,14 @@ static int enic_set_vf_port(struct net_device *netdev, int vf,
 
                /* If DISASSOCIATE, clean up all assigned/saved macaddresses */
                if (pp->request == PORT_REQUEST_DISASSOCIATE) {
-                       memset(pp->mac_addr, 0, ETH_ALEN);
+                       eth_zero_addr(pp->mac_addr);
                        if (vf == PORT_SELF_VF)
-                               memset(netdev->dev_addr, 0, ETH_ALEN);
+                               eth_zero_addr(netdev->dev_addr);
                }
        }
 
        if (vf == PORT_SELF_VF)
-               memset(pp->vf_mac, 0, ETH_ALEN);
+               eth_zero_addr(pp->vf_mac);
 
        return err;
 }
index 50a00777228e12b91b33f8bb3b7794f4f07de42c..afd8e78e024e3d2cb6015202d3678210334358ff 100644 (file)
@@ -653,7 +653,7 @@ static void dmfe_init_dm910x(struct DEVICE *dev)
        if ( !(db->media_mode & DMFE_AUTO) )
                db->op_mode = db->media_mode;   /* Force Mode */
 
-       /* Initialize Transmit/Receive decriptor and CR3/4 */
+       /* Initialize Transmit/Receive descriptor and CR3/4 */
        dmfe_descriptor_init(dev);
 
        /* Init CR6 to program DM910x operation */
index 1c5916b13778a96e489ee3ec1bcb2d1acee63cd2..2c30c0c83f984a2d41204c637bb9f2dbe797bc00 100644 (file)
@@ -564,7 +564,7 @@ static void uli526x_init(struct net_device *dev)
        if ( !(db->media_mode & ULI526X_AUTO) )
                db->op_mode = db->media_mode;           /* Force Mode */
 
-       /* Initialize Transmit/Receive decriptor and CR3/4 */
+       /* Initialize Transmit/Receive descriptor and CR3/4 */
        uli526x_descriptor_init(dev, ioaddr);
 
        /* Init CR6 to program M526X operation */
index 27b9fe99a9bdfa4eddb70aaa9749eaef6bdd3518..4b0494b9cc7cf034e8ebdc190d08e46a8a1e790e 100644 (file)
 #include <linux/firmware.h>
 #include <linux/slab.h>
 #include <linux/u64_stats_sync.h>
+#include <linux/cpumask.h>
 
 #include "be_hw.h"
 #include "be_roce.h"
 
-#define DRV_VER                        "10.4u"
+#define DRV_VER                        "10.6.0.1"
 #define DRV_NAME               "be2net"
 #define BE_NAME                        "Emulex BladeEngine2"
 #define BE3_NAME               "Emulex BladeEngine3"
@@ -87,6 +88,7 @@
 #define BE3_MAX_EVT_QS         16
 #define BE3_SRIOV_MAX_EVT_QS   8
 
+#define MAX_RSS_IFACES         15
 #define MAX_RX_QS              32
 #define MAX_EVT_QS             32
 #define MAX_TX_QS              32
@@ -182,6 +184,7 @@ struct be_eq_obj {
        u16 spurious_intr;
        struct napi_struct napi;
        struct be_adapter *adapter;
+       cpumask_var_t  affinity_mask;
 
 #ifdef CONFIG_NET_RX_BUSY_POLL
 #define BE_EQ_IDLE             0
@@ -238,10 +241,17 @@ struct be_tx_stats {
        struct u64_stats_sync sync_compl;
 };
 
+/* Structure to hold some data of interest obtained from a TX CQE */
+struct be_tx_compl_info {
+       u8 status;              /* Completion status */
+       u16 end_index;          /* Completed TXQ Index */
+};
+
 struct be_tx_obj {
        u32 db_offset;
        struct be_queue_info q;
        struct be_queue_info cq;
+       struct be_tx_compl_info txcp;
        /* Remember the skbs that were transmitted */
        struct sk_buff *sent_skb_list[TX_Q_LEN];
        struct be_tx_stats stats;
@@ -370,6 +380,7 @@ enum vf_state {
 #define BE_FLAGS_VXLAN_OFFLOADS                        BIT(8)
 #define BE_FLAGS_SETUP_DONE                    BIT(9)
 #define BE_FLAGS_EVT_INCOMPATIBLE_SFP          BIT(10)
+#define BE_FLAGS_ERR_DETECTION_SCHEDULED       BIT(11)
 
 #define BE_UC_PMAC_COUNT                       30
 #define BE_VF_UC_PMAC_COUNT                    2
@@ -404,8 +415,11 @@ struct be_resources {
        u16 max_tx_qs;
        u16 max_rss_qs;
        u16 max_rx_qs;
+       u16 max_cq_count;
        u16 max_uc_mac;         /* Max UC MACs programmable */
        u16 max_vlans;          /* Number of vlans supported */
+       u16 max_iface_count;
+       u16 max_mcc_count;
        u16 max_evt_qs;
        u32 if_cap_flags;
        u32 vf_if_cap_flags;    /* VF if capability flags */
@@ -418,6 +432,39 @@ struct rss_info {
        u8 rss_hkey[RSS_HASH_KEY_LEN];
 };
 
+/* Macros to read/write the 'features' word of be_wrb_params structure.
+ */
+#define        BE_WRB_F_BIT(name)                      BE_WRB_F_##name##_BIT
+#define        BE_WRB_F_MASK(name)                     BIT_MASK(BE_WRB_F_##name##_BIT)
+
+#define        BE_WRB_F_GET(word, name)        \
+       (((word) & (BE_WRB_F_MASK(name))) >> BE_WRB_F_BIT(name))
+
+#define        BE_WRB_F_SET(word, name, val)   \
+       ((word) |= (((val) << BE_WRB_F_BIT(name)) & BE_WRB_F_MASK(name)))
+
+/* Feature/offload bits */
+enum {
+       BE_WRB_F_CRC_BIT,               /* Ethernet CRC */
+       BE_WRB_F_IPCS_BIT,              /* IP csum */
+       BE_WRB_F_TCPCS_BIT,             /* TCP csum */
+       BE_WRB_F_UDPCS_BIT,             /* UDP csum */
+       BE_WRB_F_LSO_BIT,               /* LSO */
+       BE_WRB_F_LSO6_BIT,              /* LSO6 */
+       BE_WRB_F_VLAN_BIT,              /* VLAN */
+       BE_WRB_F_VLAN_SKIP_HW_BIT       /* Skip VLAN tag (workaround) */
+};
+
+/* The structure below provides a HW-agnostic abstraction of WRB params
+ * retrieved from a TX skb. This is in turn passed to chip specific routines
+ * during transmit, to set the corresponding params in the WRB.
+ */
+struct be_wrb_params {
+       u32 features;   /* Feature bits */
+       u16 vlan_tag;   /* VLAN tag */
+       u16 lso_mss;    /* MSS for LSO */
+};
+
 struct be_adapter {
        struct pci_dev *pdev;
        struct net_device *netdev;
@@ -449,6 +496,8 @@ struct be_adapter {
 
        /* Rx rings */
        u16 num_rx_qs;
+       u16 num_rss_qs;
+       u16 need_def_rxq;
        struct be_rx_obj rx_obj[MAX_RX_QS];
        u32 big_page_size;      /* Compounded page size shared by rx wrbs */
 
@@ -463,7 +512,7 @@ struct be_adapter {
        struct delayed_work work;
        u16 work_counter;
 
-       struct delayed_work func_recovery_work;
+       struct delayed_work be_err_detection_work;
        u32 flags;
        u32 cmd_privileges;
        /* Ethtool knobs and info */
@@ -596,9 +645,8 @@ extern const struct ethtool_ops be_ethtool_ops;
        for (i = 0, rxo = &adapter->rx_obj[i]; i < adapter->num_rx_qs;  \
                i++, rxo++)
 
-/* Skip the default non-rss queue (last one)*/
 #define for_all_rss_queues(adapter, rxo, i)                            \
-       for (i = 0, rxo = &adapter->rx_obj[i]; i < (adapter->num_rx_qs - 1);\
+       for (i = 0, rxo = &adapter->rx_obj[i]; i < adapter->num_rss_qs; \
                i++, rxo++)
 
 #define for_all_tx_queues(adapter, txo, i)                             \
index 7f05f309e93596778851fb280fa4c8bd068828c3..fb140faeafb1cbda612cd11a9a1aac04e936c4a3 100644 (file)
@@ -635,73 +635,16 @@ static int lancer_wait_ready(struct be_adapter *adapter)
        for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
                sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
                if (sliport_status & SLIPORT_STATUS_RDY_MASK)
-                       break;
-
-               msleep(1000);
-       }
-
-       if (i == SLIPORT_READY_TIMEOUT)
-               return sliport_status ? : -1;
-
-       return 0;
-}
-
-static bool lancer_provisioning_error(struct be_adapter *adapter)
-{
-       u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
-
-       sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
-       if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
-               sliport_err1 = ioread32(adapter->db + SLIPORT_ERROR1_OFFSET);
-               sliport_err2 = ioread32(adapter->db + SLIPORT_ERROR2_OFFSET);
-
-               if (sliport_err1 == SLIPORT_ERROR_NO_RESOURCE1 &&
-                   sliport_err2 == SLIPORT_ERROR_NO_RESOURCE2)
-                       return true;
-       }
-       return false;
-}
-
-int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
-{
-       int status;
-       u32 sliport_status, err, reset_needed;
-       bool resource_error;
+                       return 0;
 
-       resource_error = lancer_provisioning_error(adapter);
-       if (resource_error)
-               return -EAGAIN;
+               if (sliport_status & SLIPORT_STATUS_ERR_MASK &&
+                   !(sliport_status & SLIPORT_STATUS_RN_MASK))
+                       return -EIO;
 
-       status = lancer_wait_ready(adapter);
-       if (!status) {
-               sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
-               err = sliport_status & SLIPORT_STATUS_ERR_MASK;
-               reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
-               if (err && reset_needed) {
-                       iowrite32(SLI_PORT_CONTROL_IP_MASK,
-                                 adapter->db + SLIPORT_CONTROL_OFFSET);
-
-                       /* check if adapter has corrected the error */
-                       status = lancer_wait_ready(adapter);
-                       sliport_status = ioread32(adapter->db +
-                                                 SLIPORT_STATUS_OFFSET);
-                       sliport_status &= (SLIPORT_STATUS_ERR_MASK |
-                                               SLIPORT_STATUS_RN_MASK);
-                       if (status || sliport_status)
-                               status = -1;
-               } else if (err || reset_needed) {
-                       status = -1;
-               }
+               msleep(1000);
        }
-       /* Stop error recovery if error is not recoverable.
-        * No resource error is temporary errors and will go away
-        * when PF provisions resources.
-        */
-       resource_error = lancer_provisioning_error(adapter);
-       if (resource_error)
-               status = -EAGAIN;
 
-       return status;
+       return sliport_status ? : -1;
 }
 
 int be_fw_wait_ready(struct be_adapter *adapter)
@@ -720,6 +663,10 @@ int be_fw_wait_ready(struct be_adapter *adapter)
        }
 
        do {
+               /* There's no means to poll POST state on BE2/3 VFs */
+               if (BEx_chip(adapter) && be_virtfn(adapter))
+                       return 0;
+
                stage = be_POST_stage_get(adapter);
                if (stage == POST_STAGE_ARMFW_RDY)
                        return 0;
@@ -734,7 +681,7 @@ int be_fw_wait_ready(struct be_adapter *adapter)
 
 err:
        dev_err(dev, "POST timeout; stage=%#x\n", stage);
-       return -1;
+       return -ETIMEDOUT;
 }
 
 static inline struct be_sge *nonembedded_sgl(struct be_mcc_wrb *wrb)
@@ -2123,16 +2070,12 @@ int be_cmd_reset_function(struct be_adapter *adapter)
        int status;
 
        if (lancer_chip(adapter)) {
+               iowrite32(SLI_PORT_CONTROL_IP_MASK,
+                         adapter->db + SLIPORT_CONTROL_OFFSET);
                status = lancer_wait_ready(adapter);
-               if (!status) {
-                       iowrite32(SLI_PORT_CONTROL_IP_MASK,
-                                 adapter->db + SLIPORT_CONTROL_OFFSET);
-                       status = lancer_test_and_set_rdy_state(adapter);
-               }
-               if (status) {
+               if (status)
                        dev_err(&adapter->pdev->dev,
                                "Adapter in non recoverable error\n");
-               }
                return status;
        }
 
@@ -3075,7 +3018,7 @@ int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
 
                mac_count = resp->true_mac_count + resp->pseudo_mac_count;
                /* Mac list returned could contain one or more active mac_ids
-                * or one or more true or pseudo permanant mac addresses.
+                * or one or more true or pseudo permanent mac addresses.
                 * If an active mac_id is present, return first active mac_id
                 * found.
                 */
@@ -3130,7 +3073,7 @@ int be_cmd_get_perm_mac(struct be_adapter *adapter, u8 *mac)
        int status;
        bool pmac_valid = false;
 
-       memset(mac, 0, ETH_ALEN);
+       eth_zero_addr(mac);
 
        if (BEx_chip(adapter)) {
                if (be_physfn(adapter))
@@ -3631,12 +3574,12 @@ static void be_copy_nic_desc(struct be_resources *res,
        res->max_rss_qs = le16_to_cpu(desc->rssq_count);
        res->max_rx_qs = le16_to_cpu(desc->rq_count);
        res->max_evt_qs = le16_to_cpu(desc->eq_count);
+       res->max_cq_count = le16_to_cpu(desc->cq_count);
+       res->max_iface_count = le16_to_cpu(desc->iface_count);
+       res->max_mcc_count = le16_to_cpu(desc->mcc_count);
        /* Clear flags that driver is not interested in */
        res->if_cap_flags = le32_to_cpu(desc->cap_flags) &
                                BE_IF_CAP_FLAGS_WANT;
-       /* Need 1 RXQ as the default RXQ */
-       if (res->max_rss_qs && res->max_rss_qs == res->max_rx_qs)
-               res->max_rss_qs -= 1;
 }
 
 /* Uses Mbox */
@@ -3698,7 +3641,7 @@ err:
 
 /* Will use MBOX only if MCCQ has not been created */
 int be_cmd_get_profile_config(struct be_adapter *adapter,
-                             struct be_resources *res, u8 domain)
+                             struct be_resources *res, u8 query, u8 domain)
 {
        struct be_cmd_resp_get_profile_config *resp;
        struct be_cmd_req_get_profile_config *req;
@@ -3708,7 +3651,7 @@ int be_cmd_get_profile_config(struct be_adapter *adapter,
        struct be_nic_res_desc *nic;
        struct be_mcc_wrb wrb = {0};
        struct be_dma_mem cmd;
-       u32 desc_count;
+       u16 desc_count;
        int status;
 
        memset(&cmd, 0, sizeof(struct be_dma_mem));
@@ -3727,12 +3670,19 @@ int be_cmd_get_profile_config(struct be_adapter *adapter,
                req->hdr.version = 1;
        req->type = ACTIVE_PROFILE_TYPE;
 
+       /* When QUERY_MODIFIABLE_FIELDS_TYPE bit is set, cmd returns the
+        * descriptors with all bits set to "1" for the fields which can be
+        * modified using SET_PROFILE_CONFIG cmd.
+        */
+       if (query == RESOURCE_MODIFIABLE)
+               req->type |= QUERY_MODIFIABLE_FIELDS_TYPE;
+
        status = be_cmd_notify_wait(adapter, &wrb);
        if (status)
                goto err;
 
        resp = cmd.va;
-       desc_count = le32_to_cpu(resp->desc_count);
+       desc_count = le16_to_cpu(resp->desc_count);
 
        pcie = be_get_pcie_desc(adapter->pdev->devfn, resp->func_param,
                                desc_count);
@@ -3857,23 +3807,80 @@ int be_cmd_config_qos(struct be_adapter *adapter, u32 max_rate, u16 link_speed,
                                         1, version, domain);
 }
 
+static void be_fill_vf_res_template(struct be_adapter *adapter,
+                                   struct be_resources pool_res,
+                                   u16 num_vfs, u16 num_vf_qs,
+                                   struct be_nic_res_desc *nic_vft)
+{
+       u32 vf_if_cap_flags = pool_res.vf_if_cap_flags;
+       struct be_resources res_mod = {0};
+
+       /* Resource with fields set to all '1's by GET_PROFILE_CONFIG cmd,
+        * which are modifiable using SET_PROFILE_CONFIG cmd.
+        */
+       be_cmd_get_profile_config(adapter, &res_mod, RESOURCE_MODIFIABLE, 0);
+
+       /* If RSS IFACE capability flags are modifiable for a VF, set the
+        * capability flag as valid and set RSS and DEFQ_RSS IFACE flags if
+        * more than 1 RSSQ is available for a VF.
+        * Otherwise, provision only 1 queue pair for VF.
+        */
+       if (res_mod.vf_if_cap_flags & BE_IF_FLAGS_RSS) {
+               nic_vft->flags |= BIT(IF_CAPS_FLAGS_VALID_SHIFT);
+               if (num_vf_qs > 1) {
+                       vf_if_cap_flags |= BE_IF_FLAGS_RSS;
+                       if (pool_res.if_cap_flags & BE_IF_FLAGS_DEFQ_RSS)
+                               vf_if_cap_flags |= BE_IF_FLAGS_DEFQ_RSS;
+               } else {
+                       vf_if_cap_flags &= ~(BE_IF_FLAGS_RSS |
+                                            BE_IF_FLAGS_DEFQ_RSS);
+               }
+
+               nic_vft->cap_flags = cpu_to_le32(vf_if_cap_flags);
+       } else {
+               num_vf_qs = 1;
+       }
+
+       nic_vft->rq_count = cpu_to_le16(num_vf_qs);
+       nic_vft->txq_count = cpu_to_le16(num_vf_qs);
+       nic_vft->rssq_count = cpu_to_le16(num_vf_qs);
+       nic_vft->cq_count = cpu_to_le16(pool_res.max_cq_count /
+                                       (num_vfs + 1));
+
+       /* Distribute unicast MACs, VLANs, IFACE count and MCCQ count equally
+        * among the PF and it's VFs, if the fields are changeable
+        */
+       if (res_mod.max_uc_mac == FIELD_MODIFIABLE)
+               nic_vft->unicast_mac_count = cpu_to_le16(pool_res.max_uc_mac /
+                                                        (num_vfs + 1));
+
+       if (res_mod.max_vlans == FIELD_MODIFIABLE)
+               nic_vft->vlan_count = cpu_to_le16(pool_res.max_vlans /
+                                                 (num_vfs + 1));
+
+       if (res_mod.max_iface_count == FIELD_MODIFIABLE)
+               nic_vft->iface_count = cpu_to_le16(pool_res.max_iface_count /
+                                                  (num_vfs + 1));
+
+       if (res_mod.max_mcc_count == FIELD_MODIFIABLE)
+               nic_vft->mcc_count = cpu_to_le16(pool_res.max_mcc_count /
+                                                (num_vfs + 1));
+}
+
 int be_cmd_set_sriov_config(struct be_adapter *adapter,
-                           struct be_resources res, u16 num_vfs)
+                           struct be_resources pool_res, u16 num_vfs,
+                           u16 num_vf_qs)
 {
        struct {
                struct be_pcie_res_desc pcie;
                struct be_nic_res_desc nic_vft;
        } __packed desc;
-       u16 vf_q_count;
-
-       if (BEx_chip(adapter) || lancer_chip(adapter))
-               return 0;
 
        /* PF PCIE descriptor */
        be_reset_pcie_desc(&desc.pcie);
        desc.pcie.hdr.desc_type = PCIE_RESOURCE_DESC_TYPE_V1;
        desc.pcie.hdr.desc_len = RESOURCE_DESC_SIZE_V1;
-       desc.pcie.flags = (1 << IMM_SHIFT) | (1 << NOSV_SHIFT);
+       desc.pcie.flags = BIT(IMM_SHIFT) | BIT(NOSV_SHIFT);
        desc.pcie.pf_num = adapter->pdev->devfn;
        desc.pcie.sriov_state = num_vfs ? 1 : 0;
        desc.pcie.num_vfs = cpu_to_le16(num_vfs);
@@ -3882,32 +3889,12 @@ int be_cmd_set_sriov_config(struct be_adapter *adapter,
        be_reset_nic_desc(&desc.nic_vft);
        desc.nic_vft.hdr.desc_type = NIC_RESOURCE_DESC_TYPE_V1;
        desc.nic_vft.hdr.desc_len = RESOURCE_DESC_SIZE_V1;
-       desc.nic_vft.flags = (1 << VFT_SHIFT) | (1 << IMM_SHIFT) |
-                               (1 << NOSV_SHIFT);
+       desc.nic_vft.flags = BIT(VFT_SHIFT) | BIT(IMM_SHIFT) | BIT(NOSV_SHIFT);
        desc.nic_vft.pf_num = adapter->pdev->devfn;
        desc.nic_vft.vf_num = 0;
 
-       if (num_vfs && res.vf_if_cap_flags & BE_IF_FLAGS_RSS) {
-               /* If number of VFs requested is 8 less than max supported,
-                * assign 8 queue pairs to the PF and divide the remaining
-                * resources evenly among the VFs
-                */
-               if (num_vfs < (be_max_vfs(adapter) - 8))
-                       vf_q_count = (res.max_rss_qs - 8) / num_vfs;
-               else
-                       vf_q_count = res.max_rss_qs / num_vfs;
-
-               desc.nic_vft.rq_count = cpu_to_le16(vf_q_count);
-               desc.nic_vft.txq_count = cpu_to_le16(vf_q_count);
-               desc.nic_vft.rssq_count = cpu_to_le16(vf_q_count - 1);
-               desc.nic_vft.cq_count = cpu_to_le16(3 * vf_q_count);
-       } else {
-               desc.nic_vft.txq_count = cpu_to_le16(1);
-               desc.nic_vft.rq_count = cpu_to_le16(1);
-               desc.nic_vft.rssq_count = cpu_to_le16(0);
-               /* One CQ for each TX, RX and MCCQ */
-               desc.nic_vft.cq_count = cpu_to_le16(3);
-       }
+       be_fill_vf_res_template(adapter, pool_res, num_vfs, num_vf_qs,
+                               &desc.nic_vft);
 
        return be_cmd_set_profile_config(adapter, &desc,
                                         2 * RESOURCE_DESC_SIZE_V1, 2, 1, 0);
index a7634a3f052ac02786baa7e63e764d86262720a9..1ec22300e2542f3f2382c8830153f917f19f6d35 100644 (file)
@@ -588,14 +588,15 @@ enum be_if_flags {
        BE_IF_FLAGS_MCAST_PROMISCUOUS = 0x200,
        BE_IF_FLAGS_PASS_L2_ERRORS = 0x400,
        BE_IF_FLAGS_PASS_L3L4_ERRORS = 0x800,
-       BE_IF_FLAGS_MULTICAST = 0x1000
+       BE_IF_FLAGS_MULTICAST = 0x1000,
+       BE_IF_FLAGS_DEFQ_RSS = 0x1000000
 };
 
 #define BE_IF_CAP_FLAGS_WANT (BE_IF_FLAGS_RSS | BE_IF_FLAGS_PROMISCUOUS |\
                         BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_VLAN_PROMISCUOUS |\
                         BE_IF_FLAGS_VLAN | BE_IF_FLAGS_MCAST_PROMISCUOUS |\
                         BE_IF_FLAGS_PASS_L3L4_ERRORS | BE_IF_FLAGS_MULTICAST |\
-                        BE_IF_FLAGS_UNTAGGED)
+                        BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_DEFQ_RSS)
 
 #define BE_IF_FLAGS_ALL_PROMISCUOUS    (BE_IF_FLAGS_PROMISCUOUS | \
                                         BE_IF_FLAGS_VLAN_PROMISCUOUS |\
@@ -2021,6 +2022,7 @@ struct be_cmd_req_set_ext_fat_caps {
 #define PORT_RESOURCE_DESC_TYPE_V1             0x55
 #define MAX_RESOURCE_DESC                      264
 
+#define IF_CAPS_FLAGS_VALID_SHIFT              0       /* IF caps valid */
 #define VFT_SHIFT                              3       /* VF template */
 #define IMM_SHIFT                              6       /* Immediate */
 #define NOSV_SHIFT                             7       /* No save */
@@ -2131,20 +2133,28 @@ struct be_cmd_resp_get_func_config {
        u8 func_param[MAX_RESOURCE_DESC * RESOURCE_DESC_SIZE_V1];
 };
 
-#define ACTIVE_PROFILE_TYPE                    0x2
+enum {
+       RESOURCE_LIMITS,
+       RESOURCE_MODIFIABLE
+};
+
 struct be_cmd_req_get_profile_config {
        struct be_cmd_req_hdr hdr;
        u8 rsvd;
+#define ACTIVE_PROFILE_TYPE                    0x2
+#define QUERY_MODIFIABLE_FIELDS_TYPE           BIT(3)
        u8 type;
        u16 rsvd1;
 };
 
 struct be_cmd_resp_get_profile_config {
        struct be_cmd_resp_hdr hdr;
-       u32 desc_count;
+       __le16 desc_count;
+       u16 rsvd;
        u8 func_param[MAX_RESOURCE_DESC * RESOURCE_DESC_SIZE_V1];
 };
 
+#define FIELD_MODIFIABLE                       0xFFFF
 struct be_cmd_req_set_profile_config {
        struct be_cmd_req_hdr hdr;
        u32 rsvd;
@@ -2344,7 +2354,7 @@ int be_cmd_query_port_name(struct be_adapter *adapter);
 int be_cmd_get_func_config(struct be_adapter *adapter,
                           struct be_resources *res);
 int be_cmd_get_profile_config(struct be_adapter *adapter,
-                             struct be_resources *res, u8 domain);
+                             struct be_resources *res, u8 query, u8 domain);
 int be_cmd_get_active_profile(struct be_adapter *adapter, u16 *profile);
 int be_cmd_get_if_id(struct be_adapter *adapter, struct be_vf_cfg *vf_cfg,
                     int vf_num);
@@ -2355,4 +2365,5 @@ int be_cmd_set_logical_link_config(struct be_adapter *adapter,
 int be_cmd_set_vxlan_port(struct be_adapter *adapter, __be16 port);
 int be_cmd_manage_iface(struct be_adapter *adapter, u32 iface, u8 op);
 int be_cmd_set_sriov_config(struct be_adapter *adapter,
-                           struct be_resources res, u16 num_vfs);
+                           struct be_resources res, u16 num_vfs,
+                           u16 num_vf_qs);
index 4d2de47007692a85e1477da07b98402b489bc312..b765c24625bf523fd7932be17f6dfa22840a8e46 100644 (file)
@@ -1097,7 +1097,7 @@ static int be_set_rss_hash_opts(struct be_adapter *adapter,
                return status;
 
        if (be_multi_rxq(adapter)) {
-               for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
+               for (j = 0; j < 128; j += adapter->num_rss_qs) {
                        for_all_rss_queues(adapter, rxo, i) {
                                if ((j + i) >= 128)
                                        break;
index e6b790f0d9dc1ebfe6103a1549e84cf058522027..5ff7fba9b67c9d39043d1094193db714f7625a6b 100644 (file)
@@ -30,6 +30,9 @@ MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
 MODULE_AUTHOR("Emulex Corporation");
 MODULE_LICENSE("GPL");
 
+/* num_vfs module param is obsolete.
+ * Use sysfs method to enable/disable VFs.
+ */
 static unsigned int num_vfs;
 module_param(num_vfs, uint, S_IRUGO);
 MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
@@ -727,48 +730,86 @@ static u16 skb_ip_proto(struct sk_buff *skb)
                ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr;
 }
 
-static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
-                        struct sk_buff *skb, u32 wrb_cnt, u32 len,
-                        bool skip_hw_vlan)
+static inline bool be_is_txq_full(struct be_tx_obj *txo)
 {
-       u16 vlan_tag, proto;
+       return atomic_read(&txo->q.used) + BE_MAX_TX_FRAG_COUNT >= txo->q.len;
+}
 
-       memset(hdr, 0, sizeof(*hdr));
+static inline bool be_can_txq_wake(struct be_tx_obj *txo)
+{
+       return atomic_read(&txo->q.used) < txo->q.len / 2;
+}
+
+static inline bool be_is_tx_compl_pending(struct be_tx_obj *txo)
+{
+       return atomic_read(&txo->q.used) > txo->pend_wrb_cnt;
+}
 
-       SET_TX_WRB_HDR_BITS(crc, hdr, 1);
+static void be_get_wrb_params_from_skb(struct be_adapter *adapter,
+                                      struct sk_buff *skb,
+                                      struct be_wrb_params *wrb_params)
+{
+       u16 proto;
 
        if (skb_is_gso(skb)) {
-               SET_TX_WRB_HDR_BITS(lso, hdr, 1);
-               SET_TX_WRB_HDR_BITS(lso_mss, hdr, skb_shinfo(skb)->gso_size);
+               BE_WRB_F_SET(wrb_params->features, LSO, 1);
+               wrb_params->lso_mss = skb_shinfo(skb)->gso_size;
                if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
-                       SET_TX_WRB_HDR_BITS(lso6, hdr, 1);
+                       BE_WRB_F_SET(wrb_params->features, LSO6, 1);
        } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
                if (skb->encapsulation) {
-                       SET_TX_WRB_HDR_BITS(ipcs, hdr, 1);
+                       BE_WRB_F_SET(wrb_params->features, IPCS, 1);
                        proto = skb_inner_ip_proto(skb);
                } else {
                        proto = skb_ip_proto(skb);
                }
                if (proto == IPPROTO_TCP)
-                       SET_TX_WRB_HDR_BITS(tcpcs, hdr, 1);
+                       BE_WRB_F_SET(wrb_params->features, TCPCS, 1);
                else if (proto == IPPROTO_UDP)
-                       SET_TX_WRB_HDR_BITS(udpcs, hdr, 1);
+                       BE_WRB_F_SET(wrb_params->features, UDPCS, 1);
        }
 
        if (skb_vlan_tag_present(skb)) {
-               SET_TX_WRB_HDR_BITS(vlan, hdr, 1);
-               vlan_tag = be_get_tx_vlan_tag(adapter, skb);
-               SET_TX_WRB_HDR_BITS(vlan_tag, hdr, vlan_tag);
+               BE_WRB_F_SET(wrb_params->features, VLAN, 1);
+               wrb_params->vlan_tag = be_get_tx_vlan_tag(adapter, skb);
        }
 
-       SET_TX_WRB_HDR_BITS(num_wrb, hdr, wrb_cnt);
-       SET_TX_WRB_HDR_BITS(len, hdr, len);
+       BE_WRB_F_SET(wrb_params->features, CRC, 1);
+}
+
+static void wrb_fill_hdr(struct be_adapter *adapter,
+                        struct be_eth_hdr_wrb *hdr,
+                        struct be_wrb_params *wrb_params,
+                        struct sk_buff *skb)
+{
+       memset(hdr, 0, sizeof(*hdr));
 
-       /* Hack to skip HW VLAN tagging needs evt = 1, compl = 0
-        * When this hack is not needed, the evt bit is set while ringing DB
+       SET_TX_WRB_HDR_BITS(crc, hdr,
+                           BE_WRB_F_GET(wrb_params->features, CRC));
+       SET_TX_WRB_HDR_BITS(ipcs, hdr,
+                           BE_WRB_F_GET(wrb_params->features, IPCS));
+       SET_TX_WRB_HDR_BITS(tcpcs, hdr,
+                           BE_WRB_F_GET(wrb_params->features, TCPCS));
+       SET_TX_WRB_HDR_BITS(udpcs, hdr,
+                           BE_WRB_F_GET(wrb_params->features, UDPCS));
+
+       SET_TX_WRB_HDR_BITS(lso, hdr,
+                           BE_WRB_F_GET(wrb_params->features, LSO));
+       SET_TX_WRB_HDR_BITS(lso6, hdr,
+                           BE_WRB_F_GET(wrb_params->features, LSO6));
+       SET_TX_WRB_HDR_BITS(lso_mss, hdr, wrb_params->lso_mss);
+
+       /* Hack to skip HW VLAN tagging needs evt = 1, compl = 0. When this
+        * hack is not needed, the evt bit is set while ringing DB.
         */
-       if (skip_hw_vlan)
-               SET_TX_WRB_HDR_BITS(event, hdr, 1);
+       SET_TX_WRB_HDR_BITS(event, hdr,
+                           BE_WRB_F_GET(wrb_params->features, VLAN_SKIP_HW));
+       SET_TX_WRB_HDR_BITS(vlan, hdr,
+                           BE_WRB_F_GET(wrb_params->features, VLAN));
+       SET_TX_WRB_HDR_BITS(vlan_tag, hdr, wrb_params->vlan_tag);
+
+       SET_TX_WRB_HDR_BITS(num_wrb, hdr, skb_wrb_cnt(skb));
+       SET_TX_WRB_HDR_BITS(len, hdr, skb->len);
 }
 
 static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
@@ -788,77 +829,124 @@ static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
        }
 }
 
-/* Returns the number of WRBs used up by the skb */
+/* Grab a WRB header for xmit */
+static u16 be_tx_get_wrb_hdr(struct be_tx_obj *txo)
+{
+       u16 head = txo->q.head;
+
+       queue_head_inc(&txo->q);
+       return head;
+}
+
+/* Set up the WRB header for xmit */
+static void be_tx_setup_wrb_hdr(struct be_adapter *adapter,
+                               struct be_tx_obj *txo,
+                               struct be_wrb_params *wrb_params,
+                               struct sk_buff *skb, u16 head)
+{
+       u32 num_frags = skb_wrb_cnt(skb);
+       struct be_queue_info *txq = &txo->q;
+       struct be_eth_hdr_wrb *hdr = queue_index_node(txq, head);
+
+       wrb_fill_hdr(adapter, hdr, wrb_params, skb);
+       be_dws_cpu_to_le(hdr, sizeof(*hdr));
+
+       BUG_ON(txo->sent_skb_list[head]);
+       txo->sent_skb_list[head] = skb;
+       txo->last_req_hdr = head;
+       atomic_add(num_frags, &txq->used);
+       txo->last_req_wrb_cnt = num_frags;
+       txo->pend_wrb_cnt += num_frags;
+}
+
+/* Setup a WRB fragment (buffer descriptor) for xmit */
+static void be_tx_setup_wrb_frag(struct be_tx_obj *txo, dma_addr_t busaddr,
+                                int len)
+{
+       struct be_eth_wrb *wrb;
+       struct be_queue_info *txq = &txo->q;
+
+       wrb = queue_head_node(txq);
+       wrb_fill(wrb, busaddr, len);
+       queue_head_inc(txq);
+}
+
+/* Bring the queue back to the state it was in before be_xmit_enqueue() routine
+ * was invoked. The producer index is restored to the previous packet and the
+ * WRBs of the current packet are unmapped. Invoked to handle tx setup errors.
+ */
+static void be_xmit_restore(struct be_adapter *adapter,
+                           struct be_tx_obj *txo, u16 head, bool map_single,
+                           u32 copied)
+{
+       struct device *dev;
+       struct be_eth_wrb *wrb;
+       struct be_queue_info *txq = &txo->q;
+
+       dev = &adapter->pdev->dev;
+       txq->head = head;
+
+       /* skip the first wrb (hdr); it's not mapped */
+       queue_head_inc(txq);
+       while (copied) {
+               wrb = queue_head_node(txq);
+               unmap_tx_frag(dev, wrb, map_single);
+               map_single = false;
+               copied -= le32_to_cpu(wrb->frag_len);
+               queue_head_inc(txq);
+       }
+
+       txq->head = head;
+}
+
+/* Enqueue the given packet for transmit. This routine allocates WRBs for the
+ * packet, dma maps the packet buffers and sets up the WRBs. Returns the number
+ * of WRBs used up by the packet.
+ */
 static u32 be_xmit_enqueue(struct be_adapter *adapter, struct be_tx_obj *txo,
-                          struct sk_buff *skb, bool skip_hw_vlan)
+                          struct sk_buff *skb,
+                          struct be_wrb_params *wrb_params)
 {
        u32 i, copied = 0, wrb_cnt = skb_wrb_cnt(skb);
        struct device *dev = &adapter->pdev->dev;
        struct be_queue_info *txq = &txo->q;
-       struct be_eth_hdr_wrb *hdr;
        bool map_single = false;
-       struct be_eth_wrb *wrb;
-       dma_addr_t busaddr;
        u16 head = txq->head;
+       dma_addr_t busaddr;
+       int len;
 
-       hdr = queue_head_node(txq);
-       wrb_fill_hdr(adapter, hdr, skb, wrb_cnt, skb->len, skip_hw_vlan);
-       be_dws_cpu_to_le(hdr, sizeof(*hdr));
-
-       queue_head_inc(txq);
+       head = be_tx_get_wrb_hdr(txo);
 
        if (skb->len > skb->data_len) {
-               int len = skb_headlen(skb);
+               len = skb_headlen(skb);
 
                busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
                if (dma_mapping_error(dev, busaddr))
                        goto dma_err;
                map_single = true;
-               wrb = queue_head_node(txq);
-               wrb_fill(wrb, busaddr, len);
-               queue_head_inc(txq);
+               be_tx_setup_wrb_frag(txo, busaddr, len);
                copied += len;
        }
 
        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
                const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
+               len = skb_frag_size(frag);
 
-               busaddr = skb_frag_dma_map(dev, frag, 0,
-                                          skb_frag_size(frag), DMA_TO_DEVICE);
+               busaddr = skb_frag_dma_map(dev, frag, 0, len, DMA_TO_DEVICE);
                if (dma_mapping_error(dev, busaddr))
                        goto dma_err;
-               wrb = queue_head_node(txq);
-               wrb_fill(wrb, busaddr, skb_frag_size(frag));
-               queue_head_inc(txq);
-               copied += skb_frag_size(frag);
+               be_tx_setup_wrb_frag(txo, busaddr, len);
+               copied += len;
        }
 
-       BUG_ON(txo->sent_skb_list[head]);
-       txo->sent_skb_list[head] = skb;
-       txo->last_req_hdr = head;
-       atomic_add(wrb_cnt, &txq->used);
-       txo->last_req_wrb_cnt = wrb_cnt;
-       txo->pend_wrb_cnt += wrb_cnt;
+       be_tx_setup_wrb_hdr(adapter, txo, wrb_params, skb, head);
 
        be_tx_stats_update(txo, skb);
        return wrb_cnt;
 
 dma_err:
-       /* Bring the queue back to the state it was in before this
-        * routine was invoked.
-        */
-       txq->head = head;
-       /* skip the first wrb (hdr); it's not mapped */
-       queue_head_inc(txq);
-       while (copied) {
-               wrb = queue_head_node(txq);
-               unmap_tx_frag(dev, wrb, map_single);
-               map_single = false;
-               copied -= le32_to_cpu(wrb->frag_len);
-               adapter->drv_stats.dma_map_errors++;
-               queue_head_inc(txq);
-       }
-       txq->head = head;
+       adapter->drv_stats.dma_map_errors++;
+       be_xmit_restore(adapter, txo, head, map_single, copied);
        return 0;
 }
 
@@ -869,7 +957,8 @@ static inline int qnq_async_evt_rcvd(struct be_adapter *adapter)
 
 static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
                                             struct sk_buff *skb,
-                                            bool *skip_hw_vlan)
+                                            struct be_wrb_params
+                                            *wrb_params)
 {
        u16 vlan_tag = 0;
 
@@ -886,8 +975,7 @@ static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
                /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
                 * skip VLAN insertion
                 */
-               if (skip_hw_vlan)
-                       *skip_hw_vlan = true;
+               BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
        }
 
        if (vlan_tag) {
@@ -905,8 +993,7 @@ static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
                                                vlan_tag);
                if (unlikely(!skb))
                        return skb;
-               if (skip_hw_vlan)
-                       *skip_hw_vlan = true;
+               BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
        }
 
        return skb;
@@ -946,7 +1033,8 @@ static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb)
 
 static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
                                                  struct sk_buff *skb,
-                                                 bool *skip_hw_vlan)
+                                                 struct be_wrb_params
+                                                 *wrb_params)
 {
        struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
        unsigned int eth_hdr_len;
@@ -970,7 +1058,7 @@ static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
         */
        if (be_pvid_tagging_enabled(adapter) &&
            veh->h_vlan_proto == htons(ETH_P_8021Q))
-               *skip_hw_vlan = true;
+               BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
 
        /* HW has a bug wherein it will calculate CSUM for VLAN
         * pkts even though it is disabled.
@@ -978,7 +1066,7 @@ static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
         */
        if (skb->ip_summed != CHECKSUM_PARTIAL &&
            skb_vlan_tag_present(skb)) {
-               skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
+               skb = be_insert_vlan_in_pkt(adapter, skb, wrb_params);
                if (unlikely(!skb))
                        goto err;
        }
@@ -1000,7 +1088,7 @@ static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
         */
        if (be_ipv6_tx_stall_chk(adapter, skb) &&
            be_vlan_tag_tx_chk(adapter, skb)) {
-               skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
+               skb = be_insert_vlan_in_pkt(adapter, skb, wrb_params);
                if (unlikely(!skb))
                        goto err;
        }
@@ -1014,7 +1102,7 @@ err:
 
 static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
                                           struct sk_buff *skb,
-                                          bool *skip_hw_vlan)
+                                          struct be_wrb_params *wrb_params)
 {
        /* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or
         * less may cause a transmit stall on that port. So the work-around is
@@ -1026,7 +1114,7 @@ static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
        }
 
        if (BEx_chip(adapter) || lancer_chip(adapter)) {
-               skb = be_lancer_xmit_workarounds(adapter, skb, skip_hw_vlan);
+               skb = be_lancer_xmit_workarounds(adapter, skb, wrb_params);
                if (!skb)
                        return NULL;
        }
@@ -1060,24 +1148,26 @@ static void be_xmit_flush(struct be_adapter *adapter, struct be_tx_obj *txo)
 
 static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
 {
-       bool skip_hw_vlan = false, flush = !skb->xmit_more;
        struct be_adapter *adapter = netdev_priv(netdev);
        u16 q_idx = skb_get_queue_mapping(skb);
        struct be_tx_obj *txo = &adapter->tx_obj[q_idx];
-       struct be_queue_info *txq = &txo->q;
+       struct be_wrb_params wrb_params = { 0 };
+       bool flush = !skb->xmit_more;
        u16 wrb_cnt;
 
-       skb = be_xmit_workarounds(adapter, skb, &skip_hw_vlan);
+       skb = be_xmit_workarounds(adapter, skb, &wrb_params);
        if (unlikely(!skb))
                goto drop;
 
-       wrb_cnt = be_xmit_enqueue(adapter, txo, skb, skip_hw_vlan);
+       be_get_wrb_params_from_skb(adapter, skb, &wrb_params);
+
+       wrb_cnt = be_xmit_enqueue(adapter, txo, skb, &wrb_params);
        if (unlikely(!wrb_cnt)) {
                dev_kfree_skb_any(skb);
                goto drop;
        }
 
-       if ((atomic_read(&txq->used) + BE_MAX_TX_FRAG_COUNT) >= txq->len) {
+       if (be_is_txq_full(txo)) {
                netif_stop_subqueue(netdev, q_idx);
                tx_stats(txo)->tx_stops++;
        }
@@ -2042,18 +2132,23 @@ static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp, u32 frags_needed)
        }
 }
 
-static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
+static struct be_tx_compl_info *be_tx_compl_get(struct be_tx_obj *txo)
 {
-       struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
+       struct be_queue_info *tx_cq = &txo->cq;
+       struct be_tx_compl_info *txcp = &txo->txcp;
+       struct be_eth_tx_compl *compl = queue_tail_node(tx_cq);
 
-       if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
+       if (compl->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
                return NULL;
 
+       /* Ensure load ordering of valid bit dword and other dwords below */
        rmb();
-       be_dws_le_to_cpu(txcp, sizeof(*txcp));
+       be_dws_le_to_cpu(compl, sizeof(*compl));
 
-       txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
+       txcp->status = GET_TX_COMPL_BITS(status, compl);
+       txcp->end_index = GET_TX_COMPL_BITS(wrb_index, compl);
 
+       compl->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
        queue_tail_inc(tx_cq);
        return txcp;
 }
@@ -2174,9 +2269,9 @@ static void be_tx_compl_clean(struct be_adapter *adapter)
 {
        u16 end_idx, notified_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
        struct device *dev = &adapter->pdev->dev;
-       struct be_tx_obj *txo;
+       struct be_tx_compl_info *txcp;
        struct be_queue_info *txq;
-       struct be_eth_tx_compl *txcp;
+       struct be_tx_obj *txo;
        int i, pending_txqs;
 
        /* Stop polling for compls when HW has been silent for 10ms */
@@ -2187,10 +2282,10 @@ static void be_tx_compl_clean(struct be_adapter *adapter)
                        cmpl = 0;
                        num_wrbs = 0;
                        txq = &txo->q;
-                       while ((txcp = be_tx_compl_get(&txo->cq))) {
-                               end_idx = GET_TX_COMPL_BITS(wrb_index, txcp);
-                               num_wrbs += be_tx_compl_process(adapter, txo,
-                                                               end_idx);
+                       while ((txcp = be_tx_compl_get(txo))) {
+                               num_wrbs +=
+                                       be_tx_compl_process(adapter, txo,
+                                                           txcp->end_index);
                                cmpl++;
                        }
                        if (cmpl) {
@@ -2198,7 +2293,7 @@ static void be_tx_compl_clean(struct be_adapter *adapter)
                                atomic_sub(num_wrbs, &txq->used);
                                timeo = 0;
                        }
-                       if (atomic_read(&txq->used) == txo->pend_wrb_cnt)
+                       if (!be_is_tx_compl_pending(txo))
                                pending_txqs--;
                }
 
@@ -2247,6 +2342,7 @@ static void be_evt_queues_destroy(struct be_adapter *adapter)
                        napi_hash_del(&eqo->napi);
                        netif_napi_del(&eqo->napi);
                }
+               free_cpumask_var(eqo->affinity_mask);
                be_queue_free(adapter, &eqo->q);
        }
 }
@@ -2262,6 +2358,11 @@ static int be_evt_queues_create(struct be_adapter *adapter)
                                    adapter->cfg_num_qs);
 
        for_all_evt_queues(adapter, eqo, i) {
+               if (!zalloc_cpumask_var(&eqo->affinity_mask, GFP_KERNEL))
+                       return -ENOMEM;
+               cpumask_set_cpu_local_first(i, dev_to_node(&adapter->pdev->dev),
+                                           eqo->affinity_mask);
+
                netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
                               BE_NAPI_WEIGHT);
                napi_hash_add(&eqo->napi);
@@ -2353,8 +2454,9 @@ static void be_tx_queues_destroy(struct be_adapter *adapter)
 
 static int be_tx_qs_create(struct be_adapter *adapter)
 {
-       struct be_queue_info *cq, *eq;
+       struct be_queue_info *cq;
        struct be_tx_obj *txo;
+       struct be_eq_obj *eqo;
        int status, i;
 
        adapter->num_tx_qs = min(adapter->num_evt_qs, be_max_txqs(adapter));
@@ -2372,8 +2474,8 @@ static int be_tx_qs_create(struct be_adapter *adapter)
                /* If num_evt_qs is less than num_tx_qs, then more than
                 * one txq share an eq
                 */
-               eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
-               status = be_cmd_cq_create(adapter, cq, eq, false, 3);
+               eqo = &adapter->eq_obj[i % adapter->num_evt_qs];
+               status = be_cmd_cq_create(adapter, cq, &eqo->q, false, 3);
                if (status)
                        return status;
 
@@ -2385,6 +2487,9 @@ static int be_tx_qs_create(struct be_adapter *adapter)
                status = be_cmd_txq_create(adapter, txo);
                if (status)
                        return status;
+
+               netif_set_xps_queue(adapter->netdev, eqo->affinity_mask,
+                                   eqo->idx);
        }
 
        dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
@@ -2413,13 +2518,19 @@ static int be_rx_cqs_create(struct be_adapter *adapter)
        int rc, i;
 
        /* We can create as many RSS rings as there are EQs. */
-       adapter->num_rx_qs = adapter->num_evt_qs;
+       adapter->num_rss_qs = adapter->num_evt_qs;
+
+       /* We'll use RSS only if atleast 2 RSS rings are supported. */
+       if (adapter->num_rss_qs <= 1)
+               adapter->num_rss_qs = 0;
 
-       /* We'll use RSS only if atleast 2 RSS rings are supported.
-        * When RSS is used, we'll need a default RXQ for non-IP traffic.
+       adapter->num_rx_qs = adapter->num_rss_qs + adapter->need_def_rxq;
+
+       /* When the interface is not capable of RSS rings (and there is no
+        * need to create a default RXQ) we'll still need one RXQ
         */
-       if (adapter->num_rx_qs > 1)
-               adapter->num_rx_qs++;
+       if (adapter->num_rx_qs == 0)
+               adapter->num_rx_qs = 1;
 
        adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
        for_all_rx_queues(adapter, rxo, i) {
@@ -2438,8 +2549,7 @@ static int be_rx_cqs_create(struct be_adapter *adapter)
        }
 
        dev_info(&adapter->pdev->dev,
-                "created %d RSS queue(s) and 1 default RX queue\n",
-                adapter->num_rx_qs - 1);
+                "created %d RX queue(s)\n", adapter->num_rx_qs);
        return 0;
 }
 
@@ -2549,7 +2659,7 @@ loop_continue:
        return work_done;
 }
 
-static inline void be_update_tx_err(struct be_tx_obj *txo, u32 status)
+static inline void be_update_tx_err(struct be_tx_obj *txo, u8 status)
 {
        switch (status) {
        case BE_TX_COMP_HDR_PARSE_ERR:
@@ -2564,7 +2674,7 @@ static inline void be_update_tx_err(struct be_tx_obj *txo, u32 status)
        }
 }
 
-static inline void lancer_update_tx_err(struct be_tx_obj *txo, u32 status)
+static inline void lancer_update_tx_err(struct be_tx_obj *txo, u8 status)
 {
        switch (status) {
        case LANCER_TX_COMP_LSO_ERR:
@@ -2589,22 +2699,18 @@ static inline void lancer_update_tx_err(struct be_tx_obj *txo, u32 status)
 static void be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
                          int idx)
 {
-       struct be_eth_tx_compl *txcp;
        int num_wrbs = 0, work_done = 0;
-       u32 compl_status;
-       u16 last_idx;
+       struct be_tx_compl_info *txcp;
 
-       while ((txcp = be_tx_compl_get(&txo->cq))) {
-               last_idx = GET_TX_COMPL_BITS(wrb_index, txcp);
-               num_wrbs += be_tx_compl_process(adapter, txo, last_idx);
+       while ((txcp = be_tx_compl_get(txo))) {
+               num_wrbs += be_tx_compl_process(adapter, txo, txcp->end_index);
                work_done++;
 
-               compl_status = GET_TX_COMPL_BITS(status, txcp);
-               if (compl_status) {
+               if (txcp->status) {
                        if (lancer_chip(adapter))
-                               lancer_update_tx_err(txo, compl_status);
+                               lancer_update_tx_err(txo, txcp->status);
                        else
-                               be_update_tx_err(txo, compl_status);
+                               be_update_tx_err(txo, txcp->status);
                }
        }
 
@@ -2615,7 +2721,7 @@ static void be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
                /* As Tx wrbs have been freed up, wake up netdev queue
                 * if it was stopped due to lack of tx wrbs.  */
                if (__netif_subqueue_stopped(adapter->netdev, idx) &&
-                   atomic_read(&txo->q.used) < txo->q.len / 2) {
+                   be_can_txq_wake(txo)) {
                        netif_wake_subqueue(adapter->netdev, idx);
                }
 
@@ -2807,12 +2913,12 @@ void be_detect_error(struct be_adapter *adapter)
                        sliport_err2 = ioread32(adapter->db +
                                                SLIPORT_ERROR2_OFFSET);
                        adapter->hw_error = true;
+                       error_detected = true;
                        /* Do not log error messages if its a FW reset */
                        if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
                            sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
                                dev_info(dev, "Firmware update in progress\n");
                        } else {
-                               error_detected = true;
                                dev_err(dev, "Error detected in the card\n");
                                dev_err(dev, "ERR: sliport status 0x%x\n",
                                        sliport_status);
@@ -2932,6 +3038,8 @@ static int be_msix_register(struct be_adapter *adapter)
                status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
                if (status)
                        goto err_msix;
+
+               irq_set_affinity_hint(vec, eqo->affinity_mask);
        }
 
        return 0;
@@ -2976,7 +3084,7 @@ static void be_irq_unregister(struct be_adapter *adapter)
 {
        struct net_device *netdev = adapter->netdev;
        struct be_eq_obj *eqo;
-       int i;
+       int i, vec;
 
        if (!adapter->isr_registered)
                return;
@@ -2988,8 +3096,11 @@ static void be_irq_unregister(struct be_adapter *adapter)
        }
 
        /* MSIx */
-       for_all_evt_queues(adapter, eqo, i)
-               free_irq(be_msix_vec_get(adapter, eqo), eqo);
+       for_all_evt_queues(adapter, eqo, i) {
+               vec = be_msix_vec_get(adapter, eqo);
+               irq_set_affinity_hint(vec, NULL);
+               free_irq(vec, eqo);
+       }
 
 done:
        adapter->isr_registered = false;
@@ -3071,12 +3182,14 @@ static int be_rx_qs_create(struct be_adapter *adapter)
                        return rc;
        }
 
-       /* The FW would like the default RXQ to be created first */
-       rxo = default_rxo(adapter);
-       rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
-                              adapter->if_handle, false, &rxo->rss_id);
-       if (rc)
-               return rc;
+       if (adapter->need_def_rxq || !adapter->num_rss_qs) {
+               rxo = default_rxo(adapter);
+               rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
+                                      rx_frag_size, adapter->if_handle,
+                                      false, &rxo->rss_id);
+               if (rc)
+                       return rc;
+       }
 
        for_all_rss_queues(adapter, rxo, i) {
                rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
@@ -3087,8 +3200,7 @@ static int be_rx_qs_create(struct be_adapter *adapter)
        }
 
        if (be_multi_rxq(adapter)) {
-               for (j = 0; j < RSS_INDIR_TABLE_LEN;
-                       j += adapter->num_rx_qs - 1) {
+               for (j = 0; j < RSS_INDIR_TABLE_LEN; j += adapter->num_rss_qs) {
                        for_all_rss_queues(adapter, rxo, i) {
                                if ((j + i) >= RSS_INDIR_TABLE_LEN)
                                        break;
@@ -3179,7 +3291,7 @@ static int be_setup_wol(struct be_adapter *adapter, bool enable)
        int status = 0;
        u8 mac[ETH_ALEN];
 
-       memset(mac, 0, ETH_ALEN);
+       eth_zero_addr(mac);
 
        cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
        cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
@@ -3324,6 +3436,14 @@ static void be_cancel_worker(struct be_adapter *adapter)
        }
 }
 
+static void be_cancel_err_detection(struct be_adapter *adapter)
+{
+       if (adapter->flags & BE_FLAGS_ERR_DETECTION_SCHEDULED) {
+               cancel_delayed_work_sync(&adapter->be_err_detection_work);
+               adapter->flags &= ~BE_FLAGS_ERR_DETECTION_SCHEDULED;
+       }
+}
+
 static void be_mac_clear(struct be_adapter *adapter)
 {
        if (adapter->pmac_id) {
@@ -3355,8 +3475,39 @@ static void be_disable_vxlan_offloads(struct be_adapter *adapter)
 }
 #endif
 
+static u16 be_calculate_vf_qs(struct be_adapter *adapter, u16 num_vfs)
+{
+       struct be_resources res = adapter->pool_res;
+       u16 num_vf_qs = 1;
+
+       /* Distribute the queue resources equally among the PF and it's VFs
+        * Do not distribute queue resources in multi-channel configuration.
+        */
+       if (num_vfs && !be_is_mc(adapter)) {
+               /* If number of VFs requested is 8 less than max supported,
+                * assign 8 queue pairs to the PF and divide the remaining
+                * resources evenly among the VFs
+                */
+               if (num_vfs < (be_max_vfs(adapter) - 8))
+                       num_vf_qs = (res.max_rss_qs - 8) / num_vfs;
+               else
+                       num_vf_qs = res.max_rss_qs / num_vfs;
+
+               /* Skyhawk-R chip supports only MAX_RSS_IFACES RSS capable
+                * interfaces per port. Provide RSS on VFs, only if number
+                * of VFs requested is less than MAX_RSS_IFACES limit.
+                */
+               if (num_vfs >= MAX_RSS_IFACES)
+                       num_vf_qs = 1;
+       }
+       return num_vf_qs;
+}
+
 static int be_clear(struct be_adapter *adapter)
 {
+       struct pci_dev *pdev = adapter->pdev;
+       u16 num_vf_qs;
+
        be_cancel_worker(adapter);
 
        if (sriov_enabled(adapter))
@@ -3365,9 +3516,14 @@ static int be_clear(struct be_adapter *adapter)
        /* Re-configure FW to distribute resources evenly across max-supported
         * number of VFs, only when VFs are not already enabled.
         */
-       if (be_physfn(adapter) && !pci_vfs_assigned(adapter->pdev))
+       if (skyhawk_chip(adapter) && be_physfn(adapter) &&
+           !pci_vfs_assigned(pdev)) {
+               num_vf_qs = be_calculate_vf_qs(adapter,
+                                              pci_sriov_get_totalvfs(pdev));
                be_cmd_set_sriov_config(adapter, adapter->pool_res,
-                                       pci_sriov_get_totalvfs(adapter->pdev));
+                                       pci_sriov_get_totalvfs(pdev),
+                                       num_vf_qs);
+       }
 
 #ifdef CONFIG_BE2NET_VXLAN
        be_disable_vxlan_offloads(adapter);
@@ -3391,7 +3547,7 @@ static int be_if_create(struct be_adapter *adapter, u32 *if_handle,
 
        en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
                   BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS |
-                  BE_IF_FLAGS_RSS;
+                  BE_IF_FLAGS_RSS | BE_IF_FLAGS_DEFQ_RSS;
 
        en_flags &= cap_flags;
 
@@ -3412,6 +3568,7 @@ static int be_vfs_if_create(struct be_adapter *adapter)
        for_all_vfs(adapter, vf_cfg, vf) {
                if (!BE3_chip(adapter)) {
                        status = be_cmd_get_profile_config(adapter, &res,
+                                                          RESOURCE_LIMITS,
                                                           vf + 1);
                        if (!status) {
                                cap_flags = res.if_cap_flags;
@@ -3585,7 +3742,8 @@ static void BEx_get_resources(struct be_adapter *adapter,
                /* On a SuperNIC profile, the driver needs to use the
                 * GET_PROFILE_CONFIG cmd to query the per-function TXQ limits
                 */
-               be_cmd_get_profile_config(adapter, &super_nic_res, 0);
+               be_cmd_get_profile_config(adapter, &super_nic_res,
+                                         RESOURCE_LIMITS, 0);
                /* Some old versions of BE3 FW don't report max_tx_qs value */
                res->max_tx_qs = super_nic_res.max_tx_qs ? : BE3_MAX_TX_QS;
        } else {
@@ -3605,6 +3763,7 @@ static void BEx_get_resources(struct be_adapter *adapter,
                res->max_evt_qs = 1;
 
        res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
+       res->if_cap_flags &= ~BE_IF_FLAGS_DEFQ_RSS;
        if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
                res->if_cap_flags &= ~BE_IF_FLAGS_RSS;
 }
@@ -3624,13 +3783,12 @@ static void be_setup_init(struct be_adapter *adapter)
 
 static int be_get_sriov_config(struct be_adapter *adapter)
 {
-       struct device *dev = &adapter->pdev->dev;
        struct be_resources res = {0};
        int max_vfs, old_vfs;
 
-       /* Some old versions of BE3 FW don't report max_vfs value */
-       be_cmd_get_profile_config(adapter, &res, 0);
+       be_cmd_get_profile_config(adapter, &res, RESOURCE_LIMITS, 0);
 
+       /* Some old versions of BE3 FW don't report max_vfs value */
        if (BE3_chip(adapter) && !res.max_vfs) {
                max_vfs = pci_sriov_get_totalvfs(adapter->pdev);
                res.max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
@@ -3638,35 +3796,49 @@ static int be_get_sriov_config(struct be_adapter *adapter)
 
        adapter->pool_res = res;
 
-       if (!be_max_vfs(adapter)) {
-               if (num_vfs)
-                       dev_warn(dev, "SRIOV is disabled. Ignoring num_vfs\n");
-               adapter->num_vfs = 0;
-               return 0;
-       }
-
-       pci_sriov_set_totalvfs(adapter->pdev, be_max_vfs(adapter));
-
-       /* validate num_vfs module param */
+       /* If during previous unload of the driver, the VFs were not disabled,
+        * then we cannot rely on the PF POOL limits for the TotalVFs value.
+        * Instead use the TotalVFs value stored in the pci-dev struct.
+        */
        old_vfs = pci_num_vf(adapter->pdev);
        if (old_vfs) {
-               dev_info(dev, "%d VFs are already enabled\n", old_vfs);
-               if (old_vfs != num_vfs)
-                       dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
+               dev_info(&adapter->pdev->dev, "%d VFs are already enabled\n",
+                        old_vfs);
+
+               adapter->pool_res.max_vfs =
+                       pci_sriov_get_totalvfs(adapter->pdev);
                adapter->num_vfs = old_vfs;
-       } else {
-               if (num_vfs > be_max_vfs(adapter)) {
-                       dev_info(dev, "Resources unavailable to init %d VFs\n",
-                                num_vfs);
-                       dev_info(dev, "Limiting to %d VFs\n",
-                                be_max_vfs(adapter));
-               }
-               adapter->num_vfs = min_t(u16, num_vfs, be_max_vfs(adapter));
        }
 
        return 0;
 }
 
+static void be_alloc_sriov_res(struct be_adapter *adapter)
+{
+       int old_vfs = pci_num_vf(adapter->pdev);
+       u16 num_vf_qs;
+       int status;
+
+       be_get_sriov_config(adapter);
+
+       if (!old_vfs)
+               pci_sriov_set_totalvfs(adapter->pdev, be_max_vfs(adapter));
+
+       /* When the HW is in SRIOV capable configuration, the PF-pool
+        * resources are given to PF during driver load, if there are no
+        * old VFs. This facility is not available in BE3 FW.
+        * Also, this is done by FW in Lancer chip.
+        */
+       if (skyhawk_chip(adapter) && be_max_vfs(adapter) && !old_vfs) {
+               num_vf_qs = be_calculate_vf_qs(adapter, 0);
+               status = be_cmd_set_sriov_config(adapter, adapter->pool_res, 0,
+                                                num_vf_qs);
+               if (status)
+                       dev_err(&adapter->pdev->dev,
+                               "Failed to optimize SRIOV resources\n");
+       }
+}
+
 static int be_get_resources(struct be_adapter *adapter)
 {
        struct device *dev = &adapter->pdev->dev;
@@ -3687,12 +3859,23 @@ static int be_get_resources(struct be_adapter *adapter)
                if (status)
                        return status;
 
+               /* If a deafault RXQ must be created, we'll use up one RSSQ*/
+               if (res.max_rss_qs && res.max_rss_qs == res.max_rx_qs &&
+                   !(res.if_cap_flags & BE_IF_FLAGS_DEFQ_RSS))
+                       res.max_rss_qs -= 1;
+
                /* If RoCE may be enabled stash away half the EQs for RoCE */
                if (be_roce_supported(adapter))
                        res.max_evt_qs /= 2;
                adapter->res = res;
        }
 
+       /* If FW supports RSS default queue, then skip creating non-RSS
+        * queue for non-IP traffic.
+        */
+       adapter->need_def_rxq = (be_if_cap_flags(adapter) &
+                                BE_IF_FLAGS_DEFQ_RSS) ? 0 : 1;
+
        dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
                 be_max_txqs(adapter), be_max_rxqs(adapter),
                 be_max_rss(adapter), be_max_eqs(adapter),
@@ -3701,47 +3884,33 @@ static int be_get_resources(struct be_adapter *adapter)
                 be_max_uc(adapter), be_max_mc(adapter),
                 be_max_vlans(adapter));
 
+       /* Sanitize cfg_num_qs based on HW and platform limits */
+       adapter->cfg_num_qs = min_t(u16, netif_get_num_default_rss_queues(),
+                                   be_max_qs(adapter));
        return 0;
 }
 
-static void be_sriov_config(struct be_adapter *adapter)
-{
-       struct device *dev = &adapter->pdev->dev;
-       int status;
-
-       status = be_get_sriov_config(adapter);
-       if (status) {
-               dev_err(dev, "Failed to query SR-IOV configuration\n");
-               dev_err(dev, "SR-IOV cannot be enabled\n");
-               return;
-       }
-
-       /* When the HW is in SRIOV capable configuration, the PF-pool
-        * resources are equally distributed across the max-number of
-        * VFs. The user may request only a subset of the max-vfs to be
-        * enabled. Based on num_vfs, redistribute the resources across
-        * num_vfs so that each VF will have access to more number of
-        * resources. This facility is not available in BE3 FW.
-        * Also, this is done by FW in Lancer chip.
-        */
-       if (be_max_vfs(adapter) && !pci_num_vf(adapter->pdev)) {
-               status = be_cmd_set_sriov_config(adapter,
-                                                adapter->pool_res,
-                                                adapter->num_vfs);
-               if (status)
-                       dev_err(dev, "Failed to optimize SR-IOV resources\n");
-       }
-}
-
 static int be_get_config(struct be_adapter *adapter)
 {
+       int status, level;
        u16 profile_id;
-       int status;
+
+       status = be_cmd_get_cntl_attributes(adapter);
+       if (status)
+               return status;
 
        status = be_cmd_query_fw_cfg(adapter);
        if (status)
                return status;
 
+       if (BEx_chip(adapter)) {
+               level = be_cmd_get_fw_log_level(adapter);
+               adapter->msg_enable =
+                       level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
+       }
+
+       be_cmd_get_acpi_wol_cap(adapter);
+
        be_cmd_query_port_name(adapter);
 
        if (be_physfn(adapter)) {
@@ -3751,9 +3920,6 @@ static int be_get_config(struct be_adapter *adapter)
                                 "Using profile 0x%x\n", profile_id);
        }
 
-       if (!BE2_chip(adapter) && be_physfn(adapter))
-               be_sriov_config(adapter);
-
        status = be_get_resources(adapter);
        if (status)
                return status;
@@ -3763,9 +3929,6 @@ static int be_get_config(struct be_adapter *adapter)
        if (!adapter->pmac_id)
                return -ENOMEM;
 
-       /* Sanitize cfg_num_qs based on HW and platform limits */
-       adapter->cfg_num_qs = min(adapter->cfg_num_qs, be_max_qs(adapter));
-
        return 0;
 }
 
@@ -3799,6 +3962,13 @@ static void be_schedule_worker(struct be_adapter *adapter)
        adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
 }
 
+static void be_schedule_err_detection(struct be_adapter *adapter)
+{
+       schedule_delayed_work(&adapter->be_err_detection_work,
+                             msecs_to_jiffies(1000));
+       adapter->flags |= BE_FLAGS_ERR_DETECTION_SCHEDULED;
+}
+
 static int be_setup_queues(struct be_adapter *adapter)
 {
        struct net_device *netdev = adapter->netdev;
@@ -3881,16 +4051,61 @@ static inline int fw_major_num(const char *fw_ver)
        return fw_major;
 }
 
+/* If any VFs are already enabled don't FLR the PF */
+static bool be_reset_required(struct be_adapter *adapter)
+{
+       return pci_num_vf(adapter->pdev) ? false : true;
+}
+
+/* Wait for the FW to be ready and perform the required initialization */
+static int be_func_init(struct be_adapter *adapter)
+{
+       int status;
+
+       status = be_fw_wait_ready(adapter);
+       if (status)
+               return status;
+
+       if (be_reset_required(adapter)) {
+               status = be_cmd_reset_function(adapter);
+               if (status)
+                       return status;
+
+               /* Wait for interrupts to quiesce after an FLR */
+               msleep(100);
+
+               /* We can clear all errors when function reset succeeds */
+               be_clear_all_error(adapter);
+       }
+
+       /* Tell FW we're ready to fire cmds */
+       status = be_cmd_fw_init(adapter);
+       if (status)
+               return status;
+
+       /* Allow interrupts for other ULPs running on NIC function */
+       be_intr_set(adapter, true);
+
+       return 0;
+}
+
 static int be_setup(struct be_adapter *adapter)
 {
        struct device *dev = &adapter->pdev->dev;
        int status;
 
+       status = be_func_init(adapter);
+       if (status)
+               return status;
+
        be_setup_init(adapter);
 
        if (!lancer_chip(adapter))
                be_cmd_req_native_mode(adapter);
 
+       if (!BE2_chip(adapter) && be_physfn(adapter))
+               be_alloc_sriov_res(adapter);
+
        status = be_get_config(adapter);
        if (status)
                goto err;
@@ -3931,8 +4146,6 @@ static int be_setup(struct be_adapter *adapter)
 
        be_set_rx_mode(adapter->netdev);
 
-       be_cmd_get_acpi_wol_cap(adapter);
-
        status = be_cmd_set_flow_control(adapter, adapter->tx_fc,
                                         adapter->rx_fc);
        if (status)
@@ -4842,29 +5055,165 @@ static void be_netdev_init(struct net_device *netdev)
        netdev->ethtool_ops = &be_ethtool_ops;
 }
 
-static void be_unmap_pci_bars(struct be_adapter *adapter)
+static void be_cleanup(struct be_adapter *adapter)
 {
-       if (adapter->csr)
-               pci_iounmap(adapter->pdev, adapter->csr);
-       if (adapter->db)
-               pci_iounmap(adapter->pdev, adapter->db);
-}
+       struct net_device *netdev = adapter->netdev;
 
-static int db_bar(struct be_adapter *adapter)
-{
-       if (lancer_chip(adapter) || !be_physfn(adapter))
-               return 0;
-       else
-               return 4;
+       rtnl_lock();
+       netif_device_detach(netdev);
+       if (netif_running(netdev))
+               be_close(netdev);
+       rtnl_unlock();
+
+       be_clear(adapter);
 }
 
-static int be_roce_map_pci_bars(struct be_adapter *adapter)
+static int be_resume(struct be_adapter *adapter)
 {
-       if (skyhawk_chip(adapter)) {
-               adapter->roce_db.size = 4096;
-               adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
-                                                             db_bar(adapter));
-               adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
+       struct net_device *netdev = adapter->netdev;
+       int status;
+
+       status = be_setup(adapter);
+       if (status)
+               return status;
+
+       if (netif_running(netdev)) {
+               status = be_open(netdev);
+               if (status)
+                       return status;
+       }
+
+       netif_device_attach(netdev);
+
+       return 0;
+}
+
+static int be_err_recover(struct be_adapter *adapter)
+{
+       struct device *dev = &adapter->pdev->dev;
+       int status;
+
+       status = be_resume(adapter);
+       if (status)
+               goto err;
+
+       dev_info(dev, "Adapter recovery successful\n");
+       return 0;
+err:
+       if (be_physfn(adapter))
+               dev_err(dev, "Adapter recovery failed\n");
+       else
+               dev_err(dev, "Re-trying adapter recovery\n");
+
+       return status;
+}
+
+static void be_err_detection_task(struct work_struct *work)
+{
+       struct be_adapter *adapter =
+                               container_of(work, struct be_adapter,
+                                            be_err_detection_work.work);
+       int status = 0;
+
+       be_detect_error(adapter);
+
+       if (adapter->hw_error) {
+               be_cleanup(adapter);
+
+               /* As of now error recovery support is in Lancer only */
+               if (lancer_chip(adapter))
+                       status = be_err_recover(adapter);
+       }
+
+       /* Always attempt recovery on VFs */
+       if (!status || be_virtfn(adapter))
+               be_schedule_err_detection(adapter);
+}
+
+static void be_log_sfp_info(struct be_adapter *adapter)
+{
+       int status;
+
+       status = be_cmd_query_sfp_info(adapter);
+       if (!status) {
+               dev_err(&adapter->pdev->dev,
+                       "Unqualified SFP+ detected on %c from %s part no: %s",
+                       adapter->port_name, adapter->phy.vendor_name,
+                       adapter->phy.vendor_pn);
+       }
+       adapter->flags &= ~BE_FLAGS_EVT_INCOMPATIBLE_SFP;
+}
+
+static void be_worker(struct work_struct *work)
+{
+       struct be_adapter *adapter =
+               container_of(work, struct be_adapter, work.work);
+       struct be_rx_obj *rxo;
+       int i;
+
+       /* when interrupts are not yet enabled, just reap any pending
+        * mcc completions
+        */
+       if (!netif_running(adapter->netdev)) {
+               local_bh_disable();
+               be_process_mcc(adapter);
+               local_bh_enable();
+               goto reschedule;
+       }
+
+       if (!adapter->stats_cmd_sent) {
+               if (lancer_chip(adapter))
+                       lancer_cmd_get_pport_stats(adapter,
+                                                  &adapter->stats_cmd);
+               else
+                       be_cmd_get_stats(adapter, &adapter->stats_cmd);
+       }
+
+       if (be_physfn(adapter) &&
+           MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
+               be_cmd_get_die_temperature(adapter);
+
+       for_all_rx_queues(adapter, rxo, i) {
+               /* Replenish RX-queues starved due to memory
+                * allocation failures.
+                */
+               if (rxo->rx_post_starved)
+                       be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST);
+       }
+
+       be_eqd_update(adapter);
+
+       if (adapter->flags & BE_FLAGS_EVT_INCOMPATIBLE_SFP)
+               be_log_sfp_info(adapter);
+
+reschedule:
+       adapter->work_counter++;
+       schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
+}
+
+static void be_unmap_pci_bars(struct be_adapter *adapter)
+{
+       if (adapter->csr)
+               pci_iounmap(adapter->pdev, adapter->csr);
+       if (adapter->db)
+               pci_iounmap(adapter->pdev, adapter->db);
+}
+
+static int db_bar(struct be_adapter *adapter)
+{
+       if (lancer_chip(adapter) || !be_physfn(adapter))
+               return 0;
+       else
+               return 4;
+}
+
+static int be_roce_map_pci_bars(struct be_adapter *adapter)
+{
+       if (skyhawk_chip(adapter)) {
+               adapter->roce_db.size = 4096;
+               adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
+                                                             db_bar(adapter));
+               adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
                                                               db_bar(adapter));
        }
        return 0;
@@ -4874,6 +5223,12 @@ static int be_map_pci_bars(struct be_adapter *adapter)
 {
        struct pci_dev *pdev = adapter->pdev;
        u8 __iomem *addr;
+       u32 sli_intf;
+
+       pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
+       adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
+                               SLI_INTF_FAMILY_SHIFT;
+       adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
 
        if (BEx_chip(adapter) && be_physfn(adapter)) {
                adapter->csr = pci_iomap(pdev, 2, 0);
@@ -4907,109 +5262,93 @@ pci_map_err:
        return -ENOMEM;
 }
 
-static void be_ctrl_cleanup(struct be_adapter *adapter)
+static void be_drv_cleanup(struct be_adapter *adapter)
 {
        struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
-
-       be_unmap_pci_bars(adapter);
+       struct device *dev = &adapter->pdev->dev;
 
        if (mem->va)
-               dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
-                                 mem->dma);
+               dma_free_coherent(dev, mem->size, mem->va, mem->dma);
 
        mem = &adapter->rx_filter;
        if (mem->va)
-               dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
-                                 mem->dma);
+               dma_free_coherent(dev, mem->size, mem->va, mem->dma);
+
+       mem = &adapter->stats_cmd;
+       if (mem->va)
+               dma_free_coherent(dev, mem->size, mem->va, mem->dma);
 }
 
-static int be_ctrl_init(struct be_adapter *adapter)
+/* Allocate and initialize various fields in be_adapter struct */
+static int be_drv_init(struct be_adapter *adapter)
 {
        struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
        struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
        struct be_dma_mem *rx_filter = &adapter->rx_filter;
-       u32 sli_intf;
-       int status;
-
-       pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
-       adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
-                                SLI_INTF_FAMILY_SHIFT;
-       adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
-
-       status = be_map_pci_bars(adapter);
-       if (status)
-               goto done;
+       struct be_dma_mem *stats_cmd = &adapter->stats_cmd;
+       struct device *dev = &adapter->pdev->dev;
+       int status = 0;
 
        mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
-       mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
-                                               mbox_mem_alloc->size,
+       mbox_mem_alloc->va = dma_alloc_coherent(dev, mbox_mem_alloc->size,
                                                &mbox_mem_alloc->dma,
                                                GFP_KERNEL);
-       if (!mbox_mem_alloc->va) {
-               status = -ENOMEM;
-               goto unmap_pci_bars;
-       }
+       if (!mbox_mem_alloc->va)
+               return -ENOMEM;
+
        mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
        mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
        mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
        memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
 
        rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
-       rx_filter->va = dma_zalloc_coherent(&adapter->pdev->dev,
-                                           rx_filter->size, &rx_filter->dma,
-                                           GFP_KERNEL);
+       rx_filter->va = dma_zalloc_coherent(dev, rx_filter->size,
+                                           &rx_filter->dma, GFP_KERNEL);
        if (!rx_filter->va) {
                status = -ENOMEM;
                goto free_mbox;
        }
 
+       if (lancer_chip(adapter))
+               stats_cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
+       else if (BE2_chip(adapter))
+               stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
+       else if (BE3_chip(adapter))
+               stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
+       else
+               stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v2);
+       stats_cmd->va = dma_zalloc_coherent(dev, stats_cmd->size,
+                                           &stats_cmd->dma, GFP_KERNEL);
+       if (!stats_cmd->va) {
+               status = -ENOMEM;
+               goto free_rx_filter;
+       }
+
        mutex_init(&adapter->mbox_lock);
        spin_lock_init(&adapter->mcc_lock);
        spin_lock_init(&adapter->mcc_cq_lock);
-
        init_completion(&adapter->et_cmd_compl);
-       pci_save_state(adapter->pdev);
-       return 0;
-
-free_mbox:
-       dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
-                         mbox_mem_alloc->va, mbox_mem_alloc->dma);
-
-unmap_pci_bars:
-       be_unmap_pci_bars(adapter);
-
-done:
-       return status;
-}
 
-static void be_stats_cleanup(struct be_adapter *adapter)
-{
-       struct be_dma_mem *cmd = &adapter->stats_cmd;
+       pci_save_state(adapter->pdev);
 
-       if (cmd->va)
-               dma_free_coherent(&adapter->pdev->dev, cmd->size,
-                                 cmd->va, cmd->dma);
-}
+       INIT_DELAYED_WORK(&adapter->work, be_worker);
+       INIT_DELAYED_WORK(&adapter->be_err_detection_work,
+                         be_err_detection_task);
 
-static int be_stats_init(struct be_adapter *adapter)
-{
-       struct be_dma_mem *cmd = &adapter->stats_cmd;
+       adapter->rx_fc = true;
+       adapter->tx_fc = true;
 
-       if (lancer_chip(adapter))
-               cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
-       else if (BE2_chip(adapter))
-               cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
-       else if (BE3_chip(adapter))
-               cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
-       else
-               /* ALL non-BE ASICs */
-               cmd->size = sizeof(struct be_cmd_req_get_stats_v2);
+       /* Must be a power of 2 or else MODULO will BUG_ON */
+       adapter->be_get_temp_freq = 64;
 
-       cmd->va = dma_zalloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
-                                     GFP_KERNEL);
-       if (!cmd->va)
-               return -ENOMEM;
        return 0;
+
+free_rx_filter:
+       dma_free_coherent(dev, rx_filter->size, rx_filter->va, rx_filter->dma);
+free_mbox:
+       dma_free_coherent(dev, mbox_mem_alloc->size, mbox_mem_alloc->va,
+                         mbox_mem_alloc->dma);
+       return status;
 }
 
 static void be_remove(struct pci_dev *pdev)
@@ -5022,7 +5361,7 @@ static void be_remove(struct pci_dev *pdev)
        be_roce_dev_remove(adapter);
        be_intr_set(adapter, false);
 
-       cancel_delayed_work_sync(&adapter->func_recovery_work);
+       be_cancel_err_detection(adapter);
 
        unregister_netdev(adapter->netdev);
 
@@ -5031,9 +5370,8 @@ static void be_remove(struct pci_dev *pdev)
        /* tell fw we're done with firing cmds */
        be_cmd_fw_clean(adapter);
 
-       be_stats_cleanup(adapter);
-
-       be_ctrl_cleanup(adapter);
+       be_unmap_pci_bars(adapter);
+       be_drv_cleanup(adapter);
 
        pci_disable_pcie_error_reporting(pdev);
 
@@ -5043,156 +5381,6 @@ static void be_remove(struct pci_dev *pdev)
        free_netdev(adapter->netdev);
 }
 
-static int be_get_initial_config(struct be_adapter *adapter)
-{
-       int status, level;
-
-       status = be_cmd_get_cntl_attributes(adapter);
-       if (status)
-               return status;
-
-       /* Must be a power of 2 or else MODULO will BUG_ON */
-       adapter->be_get_temp_freq = 64;
-
-       if (BEx_chip(adapter)) {
-               level = be_cmd_get_fw_log_level(adapter);
-               adapter->msg_enable =
-                       level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
-       }
-
-       adapter->cfg_num_qs = netif_get_num_default_rss_queues();
-       return 0;
-}
-
-static int lancer_recover_func(struct be_adapter *adapter)
-{
-       struct device *dev = &adapter->pdev->dev;
-       int status;
-
-       status = lancer_test_and_set_rdy_state(adapter);
-       if (status)
-               goto err;
-
-       if (netif_running(adapter->netdev))
-               be_close(adapter->netdev);
-
-       be_clear(adapter);
-
-       be_clear_all_error(adapter);
-
-       status = be_setup(adapter);
-       if (status)
-               goto err;
-
-       if (netif_running(adapter->netdev)) {
-               status = be_open(adapter->netdev);
-               if (status)
-                       goto err;
-       }
-
-       dev_err(dev, "Adapter recovery successful\n");
-       return 0;
-err:
-       if (status == -EAGAIN)
-               dev_err(dev, "Waiting for resource provisioning\n");
-       else
-               dev_err(dev, "Adapter recovery failed\n");
-
-       return status;
-}
-
-static void be_func_recovery_task(struct work_struct *work)
-{
-       struct be_adapter *adapter =
-               container_of(work, struct be_adapter,  func_recovery_work.work);
-       int status = 0;
-
-       be_detect_error(adapter);
-
-       if (adapter->hw_error && lancer_chip(adapter)) {
-               rtnl_lock();
-               netif_device_detach(adapter->netdev);
-               rtnl_unlock();
-
-               status = lancer_recover_func(adapter);
-               if (!status)
-                       netif_device_attach(adapter->netdev);
-       }
-
-       /* In Lancer, for all errors other than provisioning error (-EAGAIN),
-        * no need to attempt further recovery.
-        */
-       if (!status || status == -EAGAIN)
-               schedule_delayed_work(&adapter->func_recovery_work,
-                                     msecs_to_jiffies(1000));
-}
-
-static void be_log_sfp_info(struct be_adapter *adapter)
-{
-       int status;
-
-       status = be_cmd_query_sfp_info(adapter);
-       if (!status) {
-               dev_err(&adapter->pdev->dev,
-                       "Unqualified SFP+ detected on %c from %s part no: %s",
-                       adapter->port_name, adapter->phy.vendor_name,
-                       adapter->phy.vendor_pn);
-       }
-       adapter->flags &= ~BE_FLAGS_EVT_INCOMPATIBLE_SFP;
-}
-
-static void be_worker(struct work_struct *work)
-{
-       struct be_adapter *adapter =
-               container_of(work, struct be_adapter, work.work);
-       struct be_rx_obj *rxo;
-       int i;
-
-       /* when interrupts are not yet enabled, just reap any pending
-       * mcc completions */
-       if (!netif_running(adapter->netdev)) {
-               local_bh_disable();
-               be_process_mcc(adapter);
-               local_bh_enable();
-               goto reschedule;
-       }
-
-       if (!adapter->stats_cmd_sent) {
-               if (lancer_chip(adapter))
-                       lancer_cmd_get_pport_stats(adapter,
-                                                  &adapter->stats_cmd);
-               else
-                       be_cmd_get_stats(adapter, &adapter->stats_cmd);
-       }
-
-       if (be_physfn(adapter) &&
-           MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
-               be_cmd_get_die_temperature(adapter);
-
-       for_all_rx_queues(adapter, rxo, i) {
-               /* Replenish RX-queues starved due to memory
-                * allocation failures.
-                */
-               if (rxo->rx_post_starved)
-                       be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST);
-       }
-
-       be_eqd_update(adapter);
-
-       if (adapter->flags & BE_FLAGS_EVT_INCOMPATIBLE_SFP)
-               be_log_sfp_info(adapter);
-
-reschedule:
-       adapter->work_counter++;
-       schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
-}
-
-/* If any VFs are already enabled don't FLR the PF */
-static bool be_reset_required(struct be_adapter *adapter)
-{
-       return pci_num_vf(adapter->pdev) ? false : true;
-}
-
 static char *mc_name(struct be_adapter *adapter)
 {
        char *str = ""; /* default */
@@ -5291,50 +5479,17 @@ static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
        if (!status)
                dev_info(&pdev->dev, "PCIe error reporting enabled\n");
 
-       status = be_ctrl_init(adapter);
+       status = be_map_pci_bars(adapter);
        if (status)
                goto free_netdev;
 
-       /* sync up with fw's ready state */
-       if (be_physfn(adapter)) {
-               status = be_fw_wait_ready(adapter);
-               if (status)
-                       goto ctrl_clean;
-       }
-
-       if (be_reset_required(adapter)) {
-               status = be_cmd_reset_function(adapter);
-               if (status)
-                       goto ctrl_clean;
-
-               /* Wait for interrupts to quiesce after an FLR */
-               msleep(100);
-       }
-
-       /* Allow interrupts for other ULPs running on NIC function */
-       be_intr_set(adapter, true);
-
-       /* tell fw we're ready to fire cmds */
-       status = be_cmd_fw_init(adapter);
-       if (status)
-               goto ctrl_clean;
-
-       status = be_stats_init(adapter);
-       if (status)
-               goto ctrl_clean;
-
-       status = be_get_initial_config(adapter);
+       status = be_drv_init(adapter);
        if (status)
-               goto stats_clean;
-
-       INIT_DELAYED_WORK(&adapter->work, be_worker);
-       INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
-       adapter->rx_fc = true;
-       adapter->tx_fc = true;
+               goto unmap_bars;
 
        status = be_setup(adapter);
        if (status)
-               goto stats_clean;
+               goto drv_cleanup;
 
        be_netdev_init(netdev);
        status = register_netdev(netdev);
@@ -5343,8 +5498,7 @@ static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
 
        be_roce_dev_add(adapter);
 
-       schedule_delayed_work(&adapter->func_recovery_work,
-                             msecs_to_jiffies(1000));
+       be_schedule_err_detection(adapter);
 
        dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
                 func_name(adapter), mc_name(adapter), adapter->port_name);
@@ -5353,10 +5507,10 @@ static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
 
 unsetup:
        be_clear(adapter);
-stats_clean:
-       be_stats_cleanup(adapter);
-ctrl_clean:
-       be_ctrl_cleanup(adapter);
+drv_cleanup:
+       be_drv_cleanup(adapter);
+unmap_bars:
+       be_unmap_pci_bars(adapter);
 free_netdev:
        free_netdev(netdev);
 rel_reg:
@@ -5371,21 +5525,14 @@ do_none:
 static int be_suspend(struct pci_dev *pdev, pm_message_t state)
 {
        struct be_adapter *adapter = pci_get_drvdata(pdev);
-       struct net_device *netdev =  adapter->netdev;
 
        if (adapter->wol_en)
                be_setup_wol(adapter, true);
 
        be_intr_set(adapter, false);
-       cancel_delayed_work_sync(&adapter->func_recovery_work);
+       be_cancel_err_detection(adapter);
 
-       netif_device_detach(netdev);
-       if (netif_running(netdev)) {
-               rtnl_lock();
-               be_close(netdev);
-               rtnl_unlock();
-       }
-       be_clear(adapter);
+       be_cleanup(adapter);
 
        pci_save_state(pdev);
        pci_disable_device(pdev);
@@ -5393,13 +5540,10 @@ static int be_suspend(struct pci_dev *pdev, pm_message_t state)
        return 0;
 }
 
-static int be_resume(struct pci_dev *pdev)
+static int be_pci_resume(struct pci_dev *pdev)
 {
-       int status = 0;
        struct be_adapter *adapter = pci_get_drvdata(pdev);
-       struct net_device *netdev =  adapter->netdev;
-
-       netif_device_detach(netdev);
+       int status = 0;
 
        status = pci_enable_device(pdev);
        if (status)
@@ -5408,30 +5552,11 @@ static int be_resume(struct pci_dev *pdev)
        pci_set_power_state(pdev, PCI_D0);
        pci_restore_state(pdev);
 
-       status = be_fw_wait_ready(adapter);
-       if (status)
-               return status;
-
-       status = be_cmd_reset_function(adapter);
-       if (status)
-               return status;
-
-       be_intr_set(adapter, true);
-       /* tell fw we're ready to fire cmds */
-       status = be_cmd_fw_init(adapter);
+       status = be_resume(adapter);
        if (status)
                return status;
 
-       be_setup(adapter);
-       if (netif_running(netdev)) {
-               rtnl_lock();
-               be_open(netdev);
-               rtnl_unlock();
-       }
-
-       schedule_delayed_work(&adapter->func_recovery_work,
-                             msecs_to_jiffies(1000));
-       netif_device_attach(netdev);
+       be_schedule_err_detection(adapter);
 
        if (adapter->wol_en)
                be_setup_wol(adapter, false);
@@ -5451,7 +5576,7 @@ static void be_shutdown(struct pci_dev *pdev)
 
        be_roce_dev_shutdown(adapter);
        cancel_delayed_work_sync(&adapter->work);
-       cancel_delayed_work_sync(&adapter->func_recovery_work);
+       be_cancel_err_detection(adapter);
 
        netif_device_detach(adapter->netdev);
 
@@ -5464,22 +5589,15 @@ static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
                                            pci_channel_state_t state)
 {
        struct be_adapter *adapter = pci_get_drvdata(pdev);
-       struct net_device *netdev =  adapter->netdev;
 
        dev_err(&adapter->pdev->dev, "EEH error detected\n");
 
        if (!adapter->eeh_error) {
                adapter->eeh_error = true;
 
-               cancel_delayed_work_sync(&adapter->func_recovery_work);
+               be_cancel_err_detection(adapter);
 
-               rtnl_lock();
-               netif_device_detach(netdev);
-               if (netif_running(netdev))
-                       be_close(netdev);
-               rtnl_unlock();
-
-               be_clear(adapter);
+               be_cleanup(adapter);
        }
 
        if (state == pci_channel_io_perm_failure)
@@ -5530,43 +5648,73 @@ static void be_eeh_resume(struct pci_dev *pdev)
 {
        int status = 0;
        struct be_adapter *adapter = pci_get_drvdata(pdev);
-       struct net_device *netdev =  adapter->netdev;
 
        dev_info(&adapter->pdev->dev, "EEH resume\n");
 
        pci_save_state(pdev);
 
-       status = be_cmd_reset_function(adapter);
+       status = be_resume(adapter);
        if (status)
                goto err;
 
-       /* On some BE3 FW versions, after a HW reset,
-        * interrupts will remain disabled for each function.
-        * So, explicitly enable interrupts
+       be_schedule_err_detection(adapter);
+       return;
+err:
+       dev_err(&adapter->pdev->dev, "EEH resume failed\n");
+}
+
+static int be_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
+{
+       struct be_adapter *adapter = pci_get_drvdata(pdev);
+       u16 num_vf_qs;
+       int status;
+
+       if (!num_vfs)
+               be_vf_clear(adapter);
+
+       adapter->num_vfs = num_vfs;
+
+       if (adapter->num_vfs == 0 && pci_vfs_assigned(pdev)) {
+               dev_warn(&pdev->dev,
+                        "Cannot disable VFs while they are assigned\n");
+               return -EBUSY;
+       }
+
+       /* When the HW is in SRIOV capable configuration, the PF-pool resources
+        * are equally distributed across the max-number of VFs. The user may
+        * request only a subset of the max-vfs to be enabled.
+        * Based on num_vfs, redistribute the resources across num_vfs so that
+        * each VF will have access to more number of resources.
+        * This facility is not available in BE3 FW.
+        * Also, this is done by FW in Lancer chip.
         */
-       be_intr_set(adapter, true);
+       if (skyhawk_chip(adapter) && !pci_num_vf(pdev)) {
+               num_vf_qs = be_calculate_vf_qs(adapter, adapter->num_vfs);
+               status = be_cmd_set_sriov_config(adapter, adapter->pool_res,
+                                                adapter->num_vfs, num_vf_qs);
+               if (status)
+                       dev_err(&pdev->dev,
+                               "Failed to optimize SR-IOV resources\n");
+       }
 
-       /* tell fw we're ready to fire cmds */
-       status = be_cmd_fw_init(adapter);
+       status = be_get_resources(adapter);
        if (status)
-               goto err;
+               return be_cmd_status(status);
 
-       status = be_setup(adapter);
+       /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
+       rtnl_lock();
+       status = be_update_queues(adapter);
+       rtnl_unlock();
        if (status)
-               goto err;
+               return be_cmd_status(status);
 
-       if (netif_running(netdev)) {
-               status = be_open(netdev);
-               if (status)
-                       goto err;
-       }
+       if (adapter->num_vfs)
+               status = be_vf_setup(adapter);
 
-       schedule_delayed_work(&adapter->func_recovery_work,
-                             msecs_to_jiffies(1000));
-       netif_device_attach(netdev);
-       return;
-err:
-       dev_err(&adapter->pdev->dev, "EEH resume failed\n");
+       if (!status)
+               return adapter->num_vfs;
+
+       return 0;
 }
 
 static const struct pci_error_handlers be_eeh_handlers = {
@@ -5581,8 +5729,9 @@ static struct pci_driver be_driver = {
        .probe = be_probe,
        .remove = be_remove,
        .suspend = be_suspend,
-       .resume = be_resume,
+       .resume = be_pci_resume,
        .shutdown = be_shutdown,
+       .sriov_configure = be_pci_sriov_configure,
        .err_handler = &be_eeh_handlers
 };
 
@@ -5596,6 +5745,11 @@ static int __init be_init_module(void)
                rx_frag_size = 2048;
        }
 
+       if (num_vfs > 0) {
+               pr_info(DRV_NAME " : Module param num_vfs is obsolete.");
+               pr_info(DRV_NAME " : Use sysfs method to enable VFs\n");
+       }
+
        return pci_register_driver(&be_driver);
 }
 module_init(be_init_module);
index f88cfaa359e725f1707ff25552993a3f94f57e00..442410cd2ca4b11baaa40039085bbe937344eec5 100644 (file)
@@ -1299,7 +1299,7 @@ static int ethoc_resume(struct platform_device *pdev)
 # define ethoc_resume  NULL
 #endif
 
-static struct of_device_id ethoc_match[] = {
+static const struct of_device_id ethoc_match[] = {
        { .compatible = "opencores,ethoc", },
        {},
 };
index ba84c4a9ce32fd2d677b42e49531aab5a0433d97..25e3425729d0ad54acb0a86e0c1137b3f3152059 100644 (file)
@@ -58,14 +58,12 @@ source "drivers/net/ethernet/freescale/fs_enet/Kconfig"
 
 config FSL_PQ_MDIO
        tristate "Freescale PQ MDIO"
-       depends on FSL_SOC
        select PHYLIB
        ---help---
          This driver supports the MDIO bus used by the gianfar and UCC drivers.
 
 config FSL_XGMAC_MDIO
        tristate "Freescale XGMAC MDIO"
-       depends on FSL_SOC
        select PHYLIB
        select OF_MDIO
        ---help---
index f495796248db97f3bf3e94900d00cacd2d520624..afe7f39cdd7cc2f5c7a7e1833090810b2400f2ba 100644 (file)
@@ -1057,7 +1057,7 @@ static int mpc52xx_fec_of_resume(struct platform_device *op)
 }
 #endif
 
-static struct of_device_id mpc52xx_fec_match[] = {
+static const struct of_device_id mpc52xx_fec_match[] = {
        { .compatible = "fsl,mpc5200b-fec", },
        { .compatible = "fsl,mpc5200-fec", },
        { .compatible = "mpc5200-fec", },
index e0528900db023cada5870329317996511e407e73..1e647beaf9894b5e498dde448f400f5901e8034f 100644 (file)
@@ -134,7 +134,7 @@ static int mpc52xx_fec_mdio_remove(struct platform_device *of)
        return 0;
 }
 
-static struct of_device_id mpc52xx_fec_mdio_match[] = {
+static const struct of_device_id mpc52xx_fec_mdio_match[] = {
        { .compatible = "fsl,mpc5200b-mdio", },
        { .compatible = "fsl,mpc5200-mdio", },
        { .compatible = "mpc5200b-fec-phy", },
index 1f9cf2345266b2f24a247768518773548d774f95..a583d89b13c457d84a3158ab02adceb349fa7978 100644 (file)
@@ -136,7 +136,7 @@ static int fec_ptp_enable_pps(struct fec_enet_private *fep, uint enable)
                 */
                writel(FEC_T_TF_MASK, fep->hwp + FEC_TCSR(fep->pps_channel));
 
-               /* It is recommended to doulbe check the TMODE field in the
+               /* It is recommended to double check the TMODE field in the
                 * TCSR register to be cleared before the first compare counter
                 * is written into TCCR register. Just add a double check.
                 */
@@ -390,20 +390,18 @@ static int fec_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
  * read the timecounter and return the correct value on ns,
  * after converting it into a struct timespec.
  */
-static int fec_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
+static int fec_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
 {
        struct fec_enet_private *adapter =
            container_of(ptp, struct fec_enet_private, ptp_caps);
        u64 ns;
-       u32 remainder;
        unsigned long flags;
 
        spin_lock_irqsave(&adapter->tmreg_lock, flags);
        ns = timecounter_read(&adapter->tc);
        spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
 
-       ts->tv_sec = div_u64_rem(ns, 1000000000ULL, &remainder);
-       ts->tv_nsec = remainder;
+       *ts = ns_to_timespec64(ns);
 
        return 0;
 }
@@ -417,7 +415,7 @@ static int fec_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
  * wall timer value.
  */
 static int fec_ptp_settime(struct ptp_clock_info *ptp,
-                          const struct timespec *ts)
+                          const struct timespec64 *ts)
 {
        struct fec_enet_private *fep =
            container_of(ptp, struct fec_enet_private, ptp_caps);
@@ -433,8 +431,7 @@ static int fec_ptp_settime(struct ptp_clock_info *ptp,
                return -EINVAL;
        }
 
-       ns = ts->tv_sec * 1000000000ULL;
-       ns += ts->tv_nsec;
+       ns = timespec64_to_ns(ts);
        /* Get the timer value based on timestamp.
         * Update the counter with the masked value.
         */
@@ -584,8 +581,8 @@ void fec_ptp_init(struct platform_device *pdev)
        fep->ptp_caps.pps = 1;
        fep->ptp_caps.adjfreq = fec_ptp_adjfreq;
        fep->ptp_caps.adjtime = fec_ptp_adjtime;
-       fep->ptp_caps.gettime = fec_ptp_gettime;
-       fep->ptp_caps.settime = fec_ptp_settime;
+       fep->ptp_caps.gettime64 = fec_ptp_gettime;
+       fep->ptp_caps.settime64 = fec_ptp_settime;
        fep->ptp_caps.enable = fec_ptp_enable;
 
        fep->cycle_speed = clk_get_rate(fep->clk_ptp);
index a17628769a1f0de4c749ac59138d300b8e02e07f..9b3639eae676a5ecb3eb042b33d624ef061320b7 100644 (file)
@@ -916,7 +916,7 @@ static const struct net_device_ops fs_enet_netdev_ops = {
 #endif
 };
 
-static struct of_device_id fs_enet_match[];
+static const struct of_device_id fs_enet_match[];
 static int fs_enet_probe(struct platform_device *ofdev)
 {
        const struct of_device_id *match;
@@ -1082,7 +1082,7 @@ static int fs_enet_remove(struct platform_device *ofdev)
        return 0;
 }
 
-static struct of_device_id fs_enet_match[] = {
+static const struct of_device_id fs_enet_match[] = {
 #ifdef CONFIG_FS_ENET_HAS_SCC
        {
                .compatible = "fsl,cpm1-scc-enet",
index 1d5617d2d8bda35d093c3dfe532ddf11aea4b77d..68a428de0bc0ef02c2dca32a673b748b6375ca12 100644 (file)
@@ -213,7 +213,7 @@ static int fs_enet_mdio_remove(struct platform_device *ofdev)
        return 0;
 }
 
-static struct of_device_id fs_enet_mdio_bb_match[] = {
+static const struct of_device_id fs_enet_mdio_bb_match[] = {
        {
                .compatible = "fsl,cpm2-mdio-bitbang",
        },
index 1648e35825003020b64986c806f956251e7a37ad..2be383e6d258536b21b3528d2b40912644daed69 100644 (file)
@@ -95,7 +95,7 @@ static int fs_enet_fec_mii_write(struct mii_bus *bus, int phy_id, int location,
 
 }
 
-static struct of_device_id fs_enet_mdio_fec_match[];
+static const struct of_device_id fs_enet_mdio_fec_match[];
 static int fs_enet_mdio_probe(struct platform_device *ofdev)
 {
        const struct of_device_id *match;
@@ -208,7 +208,7 @@ static int fs_enet_mdio_remove(struct platform_device *ofdev)
        return 0;
 }
 
-static struct of_device_id fs_enet_mdio_fec_match[] = {
+static const struct of_device_id fs_enet_mdio_fec_match[] = {
        {
                .compatible = "fsl,pq1-fec-mdio",
        },
index d1a91e344e6b8e5e4ba344a0ad95674d5b2f48f5..3c40f6b9922436a32d255aa627e9d94db45f8477 100644 (file)
@@ -294,7 +294,7 @@ static void ucc_configure(phys_addr_t start, phys_addr_t end)
 
 #endif
 
-static struct of_device_id fsl_pq_mdio_match[] = {
+static const struct of_device_id fsl_pq_mdio_match[] = {
 #if defined(CONFIG_GIANFAR) || defined(CONFIG_GIANFAR_MODULE)
        {
                .compatible = "fsl,gianfar-tbi",
index 7bf3682cdf478b1597cf04e071b525eed31adb69..4ee080d49bc000a6bdb0d4a3184d6421aaae2ffc 100644 (file)
@@ -158,7 +158,7 @@ static void gfar_init_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
 {
        u32 lstatus;
 
-       bdp->bufPtr = buf;
+       bdp->bufPtr = cpu_to_be32(buf);
 
        lstatus = BD_LFLAG(RXBD_EMPTY | RXBD_INTERRUPT);
        if (bdp == rx_queue->rx_bd_base + rx_queue->rx_ring_size - 1)
@@ -166,7 +166,7 @@ static void gfar_init_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
 
        gfar_wmb();
 
-       bdp->lstatus = lstatus;
+       bdp->lstatus = cpu_to_be32(lstatus);
 }
 
 static int gfar_init_bds(struct net_device *ndev)
@@ -200,7 +200,8 @@ static int gfar_init_bds(struct net_device *ndev)
 
                /* Set the last descriptor in the ring to indicate wrap */
                txbdp--;
-               txbdp->status |= TXBD_WRAP;
+               txbdp->status = cpu_to_be16(be16_to_cpu(txbdp->status) |
+                                           TXBD_WRAP);
        }
 
        rfbptr = &regs->rfbptr0;
@@ -214,7 +215,7 @@ static int gfar_init_bds(struct net_device *ndev)
                        struct sk_buff *skb = rx_queue->rx_skbuff[j];
 
                        if (skb) {
-                               bufaddr = rxbdp->bufPtr;
+                               bufaddr = be32_to_cpu(rxbdp->bufPtr);
                        } else {
                                skb = gfar_new_skb(ndev, &bufaddr);
                                if (!skb) {
@@ -696,19 +697,28 @@ static int gfar_parse_group(struct device_node *np,
        grp->priv = priv;
        spin_lock_init(&grp->grplock);
        if (priv->mode == MQ_MG_MODE) {
-               u32 *rxq_mask, *txq_mask;
-               rxq_mask = (u32 *)of_get_property(np, "fsl,rx-bit-map", NULL);
-               txq_mask = (u32 *)of_get_property(np, "fsl,tx-bit-map", NULL);
+               u32 rxq_mask, txq_mask;
+               int ret;
+
+               grp->rx_bit_map = (DEFAULT_MAPPING >> priv->num_grps);
+               grp->tx_bit_map = (DEFAULT_MAPPING >> priv->num_grps);
+
+               ret = of_property_read_u32(np, "fsl,rx-bit-map", &rxq_mask);
+               if (!ret) {
+                       grp->rx_bit_map = rxq_mask ?
+                       rxq_mask : (DEFAULT_MAPPING >> priv->num_grps);
+               }
+
+               ret = of_property_read_u32(np, "fsl,tx-bit-map", &txq_mask);
+               if (!ret) {
+                       grp->tx_bit_map = txq_mask ?
+                       txq_mask : (DEFAULT_MAPPING >> priv->num_grps);
+               }
 
                if (priv->poll_mode == GFAR_SQ_POLLING) {
                        /* One Q per interrupt group: Q0 to G0, Q1 to G1 */
                        grp->rx_bit_map = (DEFAULT_MAPPING >> priv->num_grps);
                        grp->tx_bit_map = (DEFAULT_MAPPING >> priv->num_grps);
-               } else { /* GFAR_MQ_POLLING */
-                       grp->rx_bit_map = rxq_mask ?
-                       *rxq_mask : (DEFAULT_MAPPING >> priv->num_grps);
-                       grp->tx_bit_map = txq_mask ?
-                       *txq_mask : (DEFAULT_MAPPING >> priv->num_grps);
                }
        } else {
                grp->rx_bit_map = 0xFF;
@@ -769,11 +779,10 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
        struct gfar_private *priv = NULL;
        struct device_node *np = ofdev->dev.of_node;
        struct device_node *child = NULL;
-       const u32 *stash;
-       const u32 *stash_len;
-       const u32 *stash_idx;
+       struct property *stash;
+       u32 stash_len = 0;
+       u32 stash_idx = 0;
        unsigned int num_tx_qs, num_rx_qs;
-       u32 *tx_queues, *rx_queues;
        unsigned short mode, poll_mode;
 
        if (!np)
@@ -787,10 +796,6 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
                poll_mode = GFAR_SQ_POLLING;
        }
 
-       /* parse the num of HW tx and rx queues */
-       tx_queues = (u32 *)of_get_property(np, "fsl,num_tx_queues", NULL);
-       rx_queues = (u32 *)of_get_property(np, "fsl,num_rx_queues", NULL);
-
        if (mode == SQ_SG_MODE) {
                num_tx_qs = 1;
                num_rx_qs = 1;
@@ -809,8 +814,17 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
                        num_tx_qs = num_grps; /* one txq per int group */
                        num_rx_qs = num_grps; /* one rxq per int group */
                } else { /* GFAR_MQ_POLLING */
-                       num_tx_qs = tx_queues ? *tx_queues : 1;
-                       num_rx_qs = rx_queues ? *rx_queues : 1;
+                       u32 tx_queues, rx_queues;
+                       int ret;
+
+                       /* parse the num of HW tx and rx queues */
+                       ret = of_property_read_u32(np, "fsl,num_tx_queues",
+                                                  &tx_queues);
+                       num_tx_qs = ret ? 1 : tx_queues;
+
+                       ret = of_property_read_u32(np, "fsl,num_rx_queues",
+                                                  &rx_queues);
+                       num_rx_qs = ret ? 1 : rx_queues;
                }
        }
 
@@ -851,13 +865,17 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
        if (err)
                goto rx_alloc_failed;
 
+       err = of_property_read_string(np, "model", &model);
+       if (err) {
+               pr_err("Device model property missing, aborting\n");
+               goto rx_alloc_failed;
+       }
+
        /* Init Rx queue filer rule set linked list */
        INIT_LIST_HEAD(&priv->rx_list.list);
        priv->rx_list.count = 0;
        mutex_init(&priv->rx_queue_access);
 
-       model = of_get_property(np, "model", NULL);
-
        for (i = 0; i < MAXGROUPS; i++)
                priv->gfargrp[i].regs = NULL;
 
@@ -877,22 +895,22 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
                        goto err_grp_init;
        }
 
-       stash = of_get_property(np, "bd-stash", NULL);
+       stash = of_find_property(np, "bd-stash", NULL);
 
        if (stash) {
                priv->device_flags |= FSL_GIANFAR_DEV_HAS_BD_STASHING;
                priv->bd_stash_en = 1;
        }
 
-       stash_len = of_get_property(np, "rx-stash-len", NULL);
+       err = of_property_read_u32(np, "rx-stash-len", &stash_len);
 
-       if (stash_len)
-               priv->rx_stash_size = *stash_len;
+       if (err == 0)
+               priv->rx_stash_size = stash_len;
 
-       stash_idx = of_get_property(np, "rx-stash-idx", NULL);
+       err = of_property_read_u32(np, "rx-stash-idx", &stash_idx);
 
-       if (stash_idx)
-               priv->rx_stash_index = *stash_idx;
+       if (err == 0)
+               priv->rx_stash_index = stash_idx;
 
        if (stash_len || stash_idx)
                priv->device_flags |= FSL_GIANFAR_DEV_HAS_BUF_STASHING;
@@ -919,15 +937,15 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
                                     FSL_GIANFAR_DEV_HAS_EXTENDED_HASH |
                                     FSL_GIANFAR_DEV_HAS_TIMER;
 
-       ctype = of_get_property(np, "phy-connection-type", NULL);
+       err = of_property_read_string(np, "phy-connection-type", &ctype);
 
        /* We only care about rgmii-id.  The rest are autodetected */
-       if (ctype && !strcmp(ctype, "rgmii-id"))
+       if (err == 0 && !strcmp(ctype, "rgmii-id"))
                priv->interface = PHY_INTERFACE_MODE_RGMII_ID;
        else
                priv->interface = PHY_INTERFACE_MODE_MII;
 
-       if (of_get_property(np, "fsl,magic-packet", NULL))
+       if (of_find_property(np, "fsl,magic-packet", NULL))
                priv->device_flags |= FSL_GIANFAR_DEV_HAS_MAGIC_PACKET;
 
        priv->phy_node = of_parse_phandle(np, "phy-handle", 0);
@@ -1884,14 +1902,15 @@ static void free_skb_tx_queue(struct gfar_priv_tx_q *tx_queue)
                if (!tx_queue->tx_skbuff[i])
                        continue;
 
-               dma_unmap_single(priv->dev, txbdp->bufPtr,
-                                txbdp->length, DMA_TO_DEVICE);
+               dma_unmap_single(priv->dev, be32_to_cpu(txbdp->bufPtr),
+                                be16_to_cpu(txbdp->length), DMA_TO_DEVICE);
                txbdp->lstatus = 0;
                for (j = 0; j < skb_shinfo(tx_queue->tx_skbuff[i])->nr_frags;
                     j++) {
                        txbdp++;
-                       dma_unmap_page(priv->dev, txbdp->bufPtr,
-                                      txbdp->length, DMA_TO_DEVICE);
+                       dma_unmap_page(priv->dev, be32_to_cpu(txbdp->bufPtr),
+                                      be16_to_cpu(txbdp->length),
+                                      DMA_TO_DEVICE);
                }
                txbdp++;
                dev_kfree_skb_any(tx_queue->tx_skbuff[i]);
@@ -1911,7 +1930,7 @@ static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue)
 
        for (i = 0; i < rx_queue->rx_ring_size; i++) {
                if (rx_queue->rx_skbuff[i]) {
-                       dma_unmap_single(priv->dev, rxbdp->bufPtr,
+                       dma_unmap_single(priv->dev, be32_to_cpu(rxbdp->bufPtr),
                                         priv->rx_buffer_size,
                                         DMA_FROM_DEVICE);
                        dev_kfree_skb_any(rx_queue->rx_skbuff[i]);
@@ -2167,16 +2186,16 @@ static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb,
         */
        if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
                flags |= TXFCB_UDP;
-               fcb->phcs = udp_hdr(skb)->check;
+               fcb->phcs = (__force __be16)(udp_hdr(skb)->check);
        } else
-               fcb->phcs = tcp_hdr(skb)->check;
+               fcb->phcs = (__force __be16)(tcp_hdr(skb)->check);
 
        /* l3os is the distance between the start of the
         * frame (skb->data) and the start of the IP hdr.
         * l4os is the distance between the start of the
         * l3 hdr and the l4 hdr
         */
-       fcb->l3os = (u16)(skb_network_offset(skb) - fcb_length);
+       fcb->l3os = (u8)(skb_network_offset(skb) - fcb_length);
        fcb->l4os = skb_network_header_len(skb);
 
        fcb->flags = flags;
@@ -2185,7 +2204,7 @@ static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb,
 void inline gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb)
 {
        fcb->flags |= TXFCB_VLN;
-       fcb->vlctl = skb_vlan_tag_get(skb);
+       fcb->vlctl = cpu_to_be16(skb_vlan_tag_get(skb));
 }
 
 static inline struct txbd8 *skip_txbd(struct txbd8 *bdp, int stride,
@@ -2298,7 +2317,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
        tx_queue->stats.tx_packets++;
 
        txbdp = txbdp_start = tx_queue->cur_tx;
-       lstatus = txbdp->lstatus;
+       lstatus = be32_to_cpu(txbdp->lstatus);
 
        /* Time stamp insertion requires one additional TxBD */
        if (unlikely(do_tstamp))
@@ -2306,11 +2325,14 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
                                                 tx_queue->tx_ring_size);
 
        if (nr_frags == 0) {
-               if (unlikely(do_tstamp))
-                       txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_LAST |
-                                                         TXBD_INTERRUPT);
-               else
+               if (unlikely(do_tstamp)) {
+                       u32 lstatus_ts = be32_to_cpu(txbdp_tstamp->lstatus);
+
+                       lstatus_ts |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
+                       txbdp_tstamp->lstatus = cpu_to_be32(lstatus_ts);
+               } else {
                        lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
+               }
        } else {
                /* Place the fragment addresses and lengths into the TxBDs */
                for (i = 0; i < nr_frags; i++) {
@@ -2320,7 +2342,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
 
                        frag_len = skb_shinfo(skb)->frags[i].size;
 
-                       lstatus = txbdp->lstatus | frag_len |
+                       lstatus = be32_to_cpu(txbdp->lstatus) | frag_len |
                                  BD_LFLAG(TXBD_READY);
 
                        /* Handle the last BD specially */
@@ -2336,11 +2358,11 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
                                goto dma_map_err;
 
                        /* set the TxBD length and buffer pointer */
-                       txbdp->bufPtr = bufaddr;
-                       txbdp->lstatus = lstatus;
+                       txbdp->bufPtr = cpu_to_be32(bufaddr);
+                       txbdp->lstatus = cpu_to_be32(lstatus);
                }
 
-               lstatus = txbdp_start->lstatus;
+               lstatus = be32_to_cpu(txbdp_start->lstatus);
        }
 
        /* Add TxPAL between FCB and frame if required */
@@ -2388,7 +2410,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
        if (unlikely(dma_mapping_error(priv->dev, bufaddr)))
                goto dma_map_err;
 
-       txbdp_start->bufPtr = bufaddr;
+       txbdp_start->bufPtr = cpu_to_be32(bufaddr);
 
        /* If time stamping is requested one additional TxBD must be set up. The
         * first TxBD points to the FCB and must have a data length of
@@ -2396,9 +2418,15 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
         * the full frame length.
         */
        if (unlikely(do_tstamp)) {
-               txbdp_tstamp->bufPtr = txbdp_start->bufPtr + fcb_len;
-               txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_READY) |
-                                        (skb_headlen(skb) - fcb_len);
+               u32 lstatus_ts = be32_to_cpu(txbdp_tstamp->lstatus);
+
+               bufaddr = be32_to_cpu(txbdp_start->bufPtr);
+               bufaddr += fcb_len;
+               lstatus_ts |= BD_LFLAG(TXBD_READY) |
+                             (skb_headlen(skb) - fcb_len);
+
+               txbdp_tstamp->bufPtr = cpu_to_be32(bufaddr);
+               txbdp_tstamp->lstatus = cpu_to_be32(lstatus_ts);
                lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | GMAC_FCB_LEN;
        } else {
                lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb);
@@ -2421,7 +2449,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
 
        gfar_wmb();
 
-       txbdp_start->lstatus = lstatus;
+       txbdp_start->lstatus = cpu_to_be32(lstatus);
 
        gfar_wmb(); /* force lstatus write before tx_skbuff */
 
@@ -2460,13 +2488,14 @@ dma_map_err:
        if (do_tstamp)
                txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
        for (i = 0; i < nr_frags; i++) {
-               lstatus = txbdp->lstatus;
+               lstatus = be32_to_cpu(txbdp->lstatus);
                if (!(lstatus & BD_LFLAG(TXBD_READY)))
                        break;
 
-               txbdp->lstatus = lstatus & ~BD_LFLAG(TXBD_READY);
-               bufaddr = txbdp->bufPtr;
-               dma_unmap_page(priv->dev, bufaddr, txbdp->length,
+               lstatus &= ~BD_LFLAG(TXBD_READY);
+               txbdp->lstatus = cpu_to_be32(lstatus);
+               bufaddr = be32_to_cpu(txbdp->bufPtr);
+               dma_unmap_page(priv->dev, bufaddr, be16_to_cpu(txbdp->length),
                               DMA_TO_DEVICE);
                txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
        }
@@ -2607,7 +2636,7 @@ static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
 
                lbdp = skip_txbd(bdp, nr_txbds - 1, base, tx_ring_size);
 
-               lstatus = lbdp->lstatus;
+               lstatus = be32_to_cpu(lbdp->lstatus);
 
                /* Only clean completed frames */
                if ((lstatus & BD_LFLAG(TXBD_READY)) &&
@@ -2616,11 +2645,12 @@ static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
 
                if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
                        next = next_txbd(bdp, base, tx_ring_size);
-                       buflen = next->length + GMAC_FCB_LEN + GMAC_TXPAL_LEN;
+                       buflen = be16_to_cpu(next->length) +
+                                GMAC_FCB_LEN + GMAC_TXPAL_LEN;
                } else
-                       buflen = bdp->length;
+                       buflen = be16_to_cpu(bdp->length);
 
-               dma_unmap_single(priv->dev, bdp->bufPtr,
+               dma_unmap_single(priv->dev, be32_to_cpu(bdp->bufPtr),
                                 buflen, DMA_TO_DEVICE);
 
                if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
@@ -2631,17 +2661,18 @@ static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
                        shhwtstamps.hwtstamp = ns_to_ktime(*ns);
                        skb_pull(skb, GMAC_FCB_LEN + GMAC_TXPAL_LEN);
                        skb_tstamp_tx(skb, &shhwtstamps);
-                       bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
+                       gfar_clear_txbd_status(bdp);
                        bdp = next;
                }
 
-               bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
+               gfar_clear_txbd_status(bdp);
                bdp = next_txbd(bdp, base, tx_ring_size);
 
                for (i = 0; i < frags; i++) {
-                       dma_unmap_page(priv->dev, bdp->bufPtr,
-                                      bdp->length, DMA_TO_DEVICE);
-                       bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
+                       dma_unmap_page(priv->dev, be32_to_cpu(bdp->bufPtr),
+                                      be16_to_cpu(bdp->length),
+                                      DMA_TO_DEVICE);
+                       gfar_clear_txbd_status(bdp);
                        bdp = next_txbd(bdp, base, tx_ring_size);
                }
 
@@ -2798,13 +2829,13 @@ static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb)
         * were verified, then we tell the kernel that no
         * checksumming is necessary.  Otherwise, it is [FIXME]
         */
-       if ((fcb->flags & RXFCB_CSUM_MASK) == (RXFCB_CIP | RXFCB_CTU))
+       if ((be16_to_cpu(fcb->flags) & RXFCB_CSUM_MASK) ==
+           (RXFCB_CIP | RXFCB_CTU))
                skb->ip_summed = CHECKSUM_UNNECESSARY;
        else
                skb_checksum_none_assert(skb);
 }
 
-
 /* gfar_process_frame() -- handle one incoming packet if skb isn't NULL. */
 static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
                               int amount_pull, struct napi_struct *napi)
@@ -2846,8 +2877,9 @@ static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
         * RXFCB_VLN is pseudo randomly set.
         */
        if (dev->features & NETIF_F_HW_VLAN_CTAG_RX &&
-           fcb->flags & RXFCB_VLN)
-               __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), fcb->vlctl);
+           be16_to_cpu(fcb->flags) & RXFCB_VLN)
+               __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
+                                      be16_to_cpu(fcb->vlctl));
 
        /* Send the packet up the stack */
        napi_gro_receive(napi, skb);
@@ -2874,7 +2906,7 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
 
        amount_pull = priv->uses_rxfcb ? GMAC_FCB_LEN : 0;
 
-       while (!((bdp->status & RXBD_EMPTY) || (--rx_work_limit < 0))) {
+       while (!(be16_to_cpu(bdp->status) & RXBD_EMPTY) && rx_work_limit--) {
                struct sk_buff *newskb;
                dma_addr_t bufaddr;
 
@@ -2885,21 +2917,22 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
 
                skb = rx_queue->rx_skbuff[rx_queue->skb_currx];
 
-               dma_unmap_single(priv->dev, bdp->bufPtr,
+               dma_unmap_single(priv->dev, be32_to_cpu(bdp->bufPtr),
                                 priv->rx_buffer_size, DMA_FROM_DEVICE);
 
-               if (unlikely(!(bdp->status & RXBD_ERR) &&
-                            bdp->length > priv->rx_buffer_size))
-                       bdp->status = RXBD_LARGE;
+               if (unlikely(!(be16_to_cpu(bdp->status) & RXBD_ERR) &&
+                            be16_to_cpu(bdp->length) > priv->rx_buffer_size))
+                       bdp->status = cpu_to_be16(RXBD_LARGE);
 
                /* We drop the frame if we failed to allocate a new buffer */
-               if (unlikely(!newskb || !(bdp->status & RXBD_LAST) ||
-                            bdp->status & RXBD_ERR)) {
-                       count_errors(bdp->status, dev);
+               if (unlikely(!newskb ||
+                            !(be16_to_cpu(bdp->status) & RXBD_LAST) ||
+                            be16_to_cpu(bdp->status) & RXBD_ERR)) {
+                       count_errors(be16_to_cpu(bdp->status), dev);
 
                        if (unlikely(!newskb)) {
                                newskb = skb;
-                               bufaddr = bdp->bufPtr;
+                               bufaddr = be32_to_cpu(bdp->bufPtr);
                        } else if (skb)
                                dev_kfree_skb(skb);
                } else {
@@ -2908,7 +2941,8 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
                        howmany++;
 
                        if (likely(skb)) {
-                               pkt_len = bdp->length - ETH_FCS_LEN;
+                               pkt_len = be16_to_cpu(bdp->length) -
+                                         ETH_FCS_LEN;
                                /* Remove the FCS from the packet length */
                                skb_put(skb, pkt_len);
                                rx_queue->stats.rx_bytes += pkt_len;
@@ -3560,7 +3594,7 @@ static noinline void gfar_update_link_state(struct gfar_private *priv)
                phy_print_status(phydev);
 }
 
-static struct of_device_id gfar_match[] =
+static const struct of_device_id gfar_match[] =
 {
        {
                .type = "network",
index 9e1802400c23686825475e1f198be0864de5a19b..daa1d37de6427b93a756843074c5d2e5c1961467 100644 (file)
@@ -544,12 +544,12 @@ struct txbd8
 {
        union {
                struct {
-                       u16     status; /* Status Fields */
-                       u16     length; /* Buffer length */
+                       __be16  status; /* Status Fields */
+                       __be16  length; /* Buffer length */
                };
-               u32 lstatus;
+               __be32 lstatus;
        };
-       u32     bufPtr; /* Buffer Pointer */
+       __be32  bufPtr; /* Buffer Pointer */
 };
 
 struct txfcb {
@@ -557,28 +557,28 @@ struct txfcb {
        u8      ptp;    /* Flag to enable tx timestamping */
        u8      l4os;   /* Level 4 Header Offset */
        u8      l3os;   /* Level 3 Header Offset */
-       u16     phcs;   /* Pseudo-header Checksum */
-       u16     vlctl;  /* VLAN control word */
+       __be16  phcs;   /* Pseudo-header Checksum */
+       __be16  vlctl;  /* VLAN control word */
 };
 
 struct rxbd8
 {
        union {
                struct {
-                       u16     status; /* Status Fields */
-                       u16     length; /* Buffer Length */
+                       __be16  status; /* Status Fields */
+                       __be16  length; /* Buffer Length */
                };
-               u32 lstatus;
+               __be32 lstatus;
        };
-       u32     bufPtr; /* Buffer Pointer */
+       __be32  bufPtr; /* Buffer Pointer */
 };
 
 struct rxfcb {
-       u16     flags;
+       __be16  flags;
        u8      rq;     /* Receive Queue index */
        u8      pro;    /* Layer 4 Protocol */
        u16     reserved;
-       u16     vlctl;  /* VLAN control word */
+       __be16  vlctl;  /* VLAN control word */
 };
 
 struct gianfar_skb_cb {
@@ -1287,6 +1287,14 @@ static inline void gfar_wmb(void)
 #endif
 }
 
+static inline void gfar_clear_txbd_status(struct txbd8 *bdp)
+{
+       u32 lstatus = be32_to_cpu(bdp->lstatus);
+
+       lstatus &= BD_LFLAG(TXBD_WRAP);
+       bdp->lstatus = cpu_to_be32(lstatus);
+}
+
 irqreturn_t gfar_receive(int irq, void *dev_id);
 int startup_gfar(struct net_device *dev);
 void stop_gfar(struct net_device *dev);
index 16826341a4c9abaf48880385049e008a63d1ece3..8e3cd77aa347a4136d10e25a934ecc335b0b9301 100644 (file)
@@ -322,10 +322,10 @@ static int ptp_gianfar_adjtime(struct ptp_clock_info *ptp, s64 delta)
        return 0;
 }
 
-static int ptp_gianfar_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
+static int ptp_gianfar_gettime(struct ptp_clock_info *ptp,
+                              struct timespec64 *ts)
 {
        u64 ns;
-       u32 remainder;
        unsigned long flags;
        struct etsects *etsects = container_of(ptp, struct etsects, caps);
 
@@ -335,20 +335,19 @@ static int ptp_gianfar_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
 
        spin_unlock_irqrestore(&etsects->lock, flags);
 
-       ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder);
-       ts->tv_nsec = remainder;
+       *ts = ns_to_timespec64(ns);
+
        return 0;
 }
 
 static int ptp_gianfar_settime(struct ptp_clock_info *ptp,
-                              const struct timespec *ts)
+                              const struct timespec64 *ts)
 {
        u64 ns;
        unsigned long flags;
        struct etsects *etsects = container_of(ptp, struct etsects, caps);
 
-       ns = ts->tv_sec * 1000000000ULL;
-       ns += ts->tv_nsec;
+       ns = timespec64_to_ns(ts);
 
        spin_lock_irqsave(&etsects->lock, flags);
 
@@ -418,8 +417,8 @@ static struct ptp_clock_info ptp_gianfar_caps = {
        .pps            = 1,
        .adjfreq        = ptp_gianfar_adjfreq,
        .adjtime        = ptp_gianfar_adjtime,
-       .gettime        = ptp_gianfar_gettime,
-       .settime        = ptp_gianfar_settime,
+       .gettime64      = ptp_gianfar_gettime,
+       .settime64      = ptp_gianfar_settime,
        .enable         = ptp_gianfar_enable,
 };
 
@@ -440,7 +439,7 @@ static int gianfar_ptp_probe(struct platform_device *dev)
 {
        struct device_node *node = dev->dev.of_node;
        struct etsects *etsects;
-       struct timespec now;
+       struct timespec64 now;
        int err = -ENOMEM;
        u32 tmr_ctrl;
        unsigned long flags;
@@ -495,7 +494,7 @@ static int gianfar_ptp_probe(struct platform_device *dev)
                pr_err("ioremap ptp registers failed\n");
                goto no_ioremap;
        }
-       getnstimeofday(&now);
+       getnstimeofday64(&now);
        ptp_gianfar_settime(&etsects->caps, &now);
 
        tmr_ctrl =
@@ -554,7 +553,7 @@ static int gianfar_ptp_remove(struct platform_device *dev)
        return 0;
 }
 
-static struct of_device_id match_table[] = {
+static const struct of_device_id match_table[] = {
        { .compatible = "fsl,etsec-ptp" },
        {},
 };
index 56b774d3a13d4c3856ad01fff0ebe67c128e4f65..4dd40e057f40035ff6b8a1e236ab80c8ddc5497b 100644 (file)
@@ -3933,7 +3933,7 @@ static int ucc_geth_remove(struct platform_device* ofdev)
        return 0;
 }
 
-static struct of_device_id ucc_geth_match[] = {
+static const struct of_device_id ucc_geth_match[] = {
        {
                .type = "network",
                .compatible = "ucc_geth",
index 3a83bc2c613ce0e907831a264070970acfd8eccf..7b8fe866f60380f6ba23e31d915e472666e99be0 100644 (file)
@@ -46,17 +46,43 @@ struct tgec_mdio_controller {
 #define MDIO_DATA(x)           (x & 0xffff)
 #define MDIO_DATA_BSY          BIT(31)
 
+struct mdio_fsl_priv {
+       struct  tgec_mdio_controller __iomem *mdio_base;
+       bool    is_little_endian;
+};
+
+static u32 xgmac_read32(void __iomem *regs,
+                       bool is_little_endian)
+{
+       if (is_little_endian)
+               return ioread32(regs);
+       else
+               return ioread32be(regs);
+}
+
+static void xgmac_write32(u32 value,
+                         void __iomem *regs,
+                         bool is_little_endian)
+{
+       if (is_little_endian)
+               iowrite32(value, regs);
+       else
+               iowrite32be(value, regs);
+}
+
 /*
  * Wait until the MDIO bus is free
  */
 static int xgmac_wait_until_free(struct device *dev,
-                                struct tgec_mdio_controller __iomem *regs)
+                                struct tgec_mdio_controller __iomem *regs,
+                                bool is_little_endian)
 {
        unsigned int timeout;
 
        /* Wait till the bus is free */
        timeout = TIMEOUT;
-       while ((ioread32be(&regs->mdio_stat) & MDIO_STAT_BSY) && timeout) {
+       while ((xgmac_read32(&regs->mdio_stat, is_little_endian) &
+               MDIO_STAT_BSY) && timeout) {
                cpu_relax();
                timeout--;
        }
@@ -73,13 +99,15 @@ static int xgmac_wait_until_free(struct device *dev,
  * Wait till the MDIO read or write operation is complete
  */
 static int xgmac_wait_until_done(struct device *dev,
-                                struct tgec_mdio_controller __iomem *regs)
+                                struct tgec_mdio_controller __iomem *regs,
+                                bool is_little_endian)
 {
        unsigned int timeout;
 
        /* Wait till the MDIO write is complete */
        timeout = TIMEOUT;
-       while ((ioread32be(&regs->mdio_data) & MDIO_DATA_BSY) && timeout) {
+       while ((xgmac_read32(&regs->mdio_stat, is_little_endian) &
+               MDIO_STAT_BSY) && timeout) {
                cpu_relax();
                timeout--;
        }
@@ -99,12 +127,14 @@ static int xgmac_wait_until_done(struct device *dev,
  */
 static int xgmac_mdio_write(struct mii_bus *bus, int phy_id, int regnum, u16 value)
 {
-       struct tgec_mdio_controller __iomem *regs = bus->priv;
+       struct mdio_fsl_priv *priv = (struct mdio_fsl_priv *)bus->priv;
+       struct tgec_mdio_controller __iomem *regs = priv->mdio_base;
        uint16_t dev_addr;
        u32 mdio_ctl, mdio_stat;
        int ret;
+       bool endian = priv->is_little_endian;
 
-       mdio_stat = ioread32be(&regs->mdio_stat);
+       mdio_stat = xgmac_read32(&regs->mdio_stat, endian);
        if (regnum & MII_ADDR_C45) {
                /* Clause 45 (ie 10G) */
                dev_addr = (regnum >> 16) & 0x1f;
@@ -115,29 +145,29 @@ static int xgmac_mdio_write(struct mii_bus *bus, int phy_id, int regnum, u16 val
                mdio_stat &= ~MDIO_STAT_ENC;
        }
 
-       iowrite32be(mdio_stat, &regs->mdio_stat);
+       xgmac_write32(mdio_stat, &regs->mdio_stat, endian);
 
-       ret = xgmac_wait_until_free(&bus->dev, regs);
+       ret = xgmac_wait_until_free(&bus->dev, regs, endian);
        if (ret)
                return ret;
 
        /* Set the port and dev addr */
        mdio_ctl = MDIO_CTL_PORT_ADDR(phy_id) | MDIO_CTL_DEV_ADDR(dev_addr);
-       iowrite32be(mdio_ctl, &regs->mdio_ctl);
+       xgmac_write32(mdio_ctl, &regs->mdio_ctl, endian);
 
        /* Set the register address */
        if (regnum & MII_ADDR_C45) {
-               iowrite32be(regnum & 0xffff, &regs->mdio_addr);
+               xgmac_write32(regnum & 0xffff, &regs->mdio_addr, endian);
 
-               ret = xgmac_wait_until_free(&bus->dev, regs);
+               ret = xgmac_wait_until_free(&bus->dev, regs, endian);
                if (ret)
                        return ret;
        }
 
        /* Write the value to the register */
-       iowrite32be(MDIO_DATA(value), &regs->mdio_data);
+       xgmac_write32(MDIO_DATA(value), &regs->mdio_data, endian);
 
-       ret = xgmac_wait_until_done(&bus->dev, regs);
+       ret = xgmac_wait_until_done(&bus->dev, regs, endian);
        if (ret)
                return ret;
 
@@ -151,14 +181,16 @@ static int xgmac_mdio_write(struct mii_bus *bus, int phy_id, int regnum, u16 val
  */
 static int xgmac_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
 {
-       struct tgec_mdio_controller __iomem *regs = bus->priv;
+       struct mdio_fsl_priv *priv = (struct mdio_fsl_priv *)bus->priv;
+       struct tgec_mdio_controller __iomem *regs = priv->mdio_base;
        uint16_t dev_addr;
        uint32_t mdio_stat;
        uint32_t mdio_ctl;
        uint16_t value;
        int ret;
+       bool endian = priv->is_little_endian;
 
-       mdio_stat = ioread32be(&regs->mdio_stat);
+       mdio_stat = xgmac_read32(&regs->mdio_stat, endian);
        if (regnum & MII_ADDR_C45) {
                dev_addr = (regnum >> 16) & 0x1f;
                mdio_stat |= MDIO_STAT_ENC;
@@ -167,41 +199,41 @@ static int xgmac_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
                mdio_stat &= ~MDIO_STAT_ENC;
        }
 
-       iowrite32be(mdio_stat, &regs->mdio_stat);
+       xgmac_write32(mdio_stat, &regs->mdio_stat, endian);
 
-       ret = xgmac_wait_until_free(&bus->dev, regs);
+       ret = xgmac_wait_until_free(&bus->dev, regs, endian);
        if (ret)
                return ret;
 
        /* Set the Port and Device Addrs */
        mdio_ctl = MDIO_CTL_PORT_ADDR(phy_id) | MDIO_CTL_DEV_ADDR(dev_addr);
-       iowrite32be(mdio_ctl, &regs->mdio_ctl);
+       xgmac_write32(mdio_ctl, &regs->mdio_ctl, endian);
 
        /* Set the register address */
        if (regnum & MII_ADDR_C45) {
-               iowrite32be(regnum & 0xffff, &regs->mdio_addr);
+               xgmac_write32(regnum & 0xffff, &regs->mdio_addr, endian);
 
-               ret = xgmac_wait_until_free(&bus->dev, regs);
+               ret = xgmac_wait_until_free(&bus->dev, regs, endian);
                if (ret)
                        return ret;
        }
 
        /* Initiate the read */
-       iowrite32be(mdio_ctl | MDIO_CTL_READ, &regs->mdio_ctl);
+       xgmac_write32(mdio_ctl | MDIO_CTL_READ, &regs->mdio_ctl, endian);
 
-       ret = xgmac_wait_until_done(&bus->dev, regs);
+       ret = xgmac_wait_until_done(&bus->dev, regs, endian);
        if (ret)
                return ret;
 
        /* Return all Fs if nothing was there */
-       if (ioread32be(&regs->mdio_stat) & MDIO_STAT_RD_ER) {
+       if (xgmac_read32(&regs->mdio_stat, endian) & MDIO_STAT_RD_ER) {
                dev_err(&bus->dev,
                        "Error while reading PHY%d reg at %d.%hhu\n",
                        phy_id, dev_addr, regnum);
                return 0xffff;
        }
 
-       value = ioread32be(&regs->mdio_data) & 0xffff;
+       value = xgmac_read32(&regs->mdio_data, endian) & 0xffff;
        dev_dbg(&bus->dev, "read %04x\n", value);
 
        return value;
@@ -212,6 +244,7 @@ static int xgmac_mdio_probe(struct platform_device *pdev)
        struct device_node *np = pdev->dev.of_node;
        struct mii_bus *bus;
        struct resource res;
+       struct mdio_fsl_priv *priv;
        int ret;
 
        ret = of_address_to_resource(np, 0, &res);
@@ -220,7 +253,7 @@ static int xgmac_mdio_probe(struct platform_device *pdev)
                return ret;
        }
 
-       bus = mdiobus_alloc();
+       bus = mdiobus_alloc_size(sizeof(struct mdio_fsl_priv));
        if (!bus)
                return -ENOMEM;
 
@@ -231,12 +264,19 @@ static int xgmac_mdio_probe(struct platform_device *pdev)
        snprintf(bus->id, MII_BUS_ID_SIZE, "%llx", (unsigned long long)res.start);
 
        /* Set the PHY base address */
-       bus->priv = of_iomap(np, 0);
-       if (!bus->priv) {
+       priv = bus->priv;
+       priv->mdio_base = of_iomap(np, 0);
+       if (!priv->mdio_base) {
                ret = -ENOMEM;
                goto err_ioremap;
        }
 
+       if (of_get_property(pdev->dev.of_node,
+                           "little-endian", NULL))
+               priv->is_little_endian = true;
+       else
+               priv->is_little_endian = false;
+
        ret = of_mdiobus_register(bus, np);
        if (ret) {
                dev_err(&pdev->dev, "cannot register MDIO bus\n");
@@ -248,7 +288,7 @@ static int xgmac_mdio_probe(struct platform_device *pdev)
        return 0;
 
 err_registration:
-       iounmap(bus->priv);
+       iounmap(priv->mdio_base);
 
 err_ioremap:
        mdiobus_free(bus);
@@ -267,7 +307,7 @@ static int xgmac_mdio_remove(struct platform_device *pdev)
        return 0;
 }
 
-static struct of_device_id xgmac_mdio_match[] = {
+static const struct of_device_id xgmac_mdio_match[] = {
        {
                .compatible = "fsl,fman-xmdio",
        },
index c05e50759621137fa3f9749a55c347381d54ed55..291c87036e173c792a1b26234e2b99e8cd67bf65 100644 (file)
@@ -103,7 +103,7 @@ static int ehea_probe_adapter(struct platform_device *dev);
 
 static int ehea_remove(struct platform_device *dev);
 
-static struct of_device_id ehea_module_device_table[] = {
+static const struct of_device_id ehea_module_device_table[] = {
        {
                .name = "lhea",
                .compatible = "IBM,lhea",
@@ -116,7 +116,7 @@ static struct of_device_id ehea_module_device_table[] = {
 };
 MODULE_DEVICE_TABLE(of, ehea_module_device_table);
 
-static struct of_device_id ehea_device_table[] = {
+static const struct of_device_id ehea_device_table[] = {
        {
                .name = "lhea",
                .compatible = "IBM,lhea",
index 162762d1a12cb1ffcb34a2325150278650168c29..8a17b97baa201a8f2b87c66c4e6857d9c914b3b3 100644 (file)
@@ -2981,7 +2981,7 @@ static int emac_remove(struct platform_device *ofdev)
 }
 
 /* XXX Features in here should be replaced by properties... */
-static struct of_device_id emac_match[] =
+static const struct of_device_id emac_match[] =
 {
        {
                .type           = "network",
index dddaab11a4c71284fa62a4f6d5af7ed91bf5b703..fdb5cdb3cd15322ed226827b73052beac92c5173 100644 (file)
@@ -753,7 +753,7 @@ static int mal_remove(struct platform_device *ofdev)
        return 0;
 }
 
-static struct of_device_id mal_platform_match[] =
+static const struct of_device_id mal_platform_match[] =
 {
        {
                .compatible     = "ibm,mcmal",
index 457088fc5b06b7d296b54a575fd4220b40ead9b5..206ccbbae7bb474ce34a4c3ea0a3cfe7888adc6d 100644 (file)
@@ -305,7 +305,7 @@ static int rgmii_remove(struct platform_device *ofdev)
        return 0;
 }
 
-static struct of_device_id rgmii_match[] =
+static const struct of_device_id rgmii_match[] =
 {
        {
                .compatible     = "ibm,rgmii",
index cb18e7f917c68d58eec3f31b937465b66e56a900..32cb6c9007c55c959f36eb6bc58a7e8d53236ebe 100644 (file)
@@ -148,7 +148,7 @@ static int tah_remove(struct platform_device *ofdev)
        return 0;
 }
 
-static struct of_device_id tah_match[] =
+static const struct of_device_id tah_match[] =
 {
        {
                .compatible     = "ibm,tah",
index 36409ccb75ea86472df71d2a3d03f03bea560a8c..8727b865ea0219949b77f49f7f3547ec575bf1a6 100644 (file)
@@ -295,7 +295,7 @@ static int zmii_remove(struct platform_device *ofdev)
        return 0;
 }
 
-static struct of_device_id zmii_match[] =
+static const struct of_device_id zmii_match[] =
 {
        {
                .compatible     = "ibm,zmii",
index e9c3a87e5b115dc690ef2b81bbe16a5480dae5b1..05f88394f9a5599dcc1a4076a05694e596e6fdbe 100644 (file)
@@ -414,7 +414,7 @@ enum cb_status {
 
 /**
  * cb_command - Command Block flags
- * @cb_tx_nc:  0: controler does CRC (normal),  1: CRC from skb memory
+ * @cb_tx_nc:  0: controller does CRC (normal),  1: CRC from skb memory
  */
 enum cb_command {
        cb_nop    = 0x0000,
index 7f997d36948f3e59621b3698b1a43b36166ecf93..b548ef0cf56be1278daa2d92edcde4fed51f9541 100644 (file)
@@ -144,6 +144,11 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
 static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
                                     struct e1000_rx_ring *rx_ring,
                                     int *work_done, int work_to_do);
+static void e1000_alloc_dummy_rx_buffers(struct e1000_adapter *adapter,
+                                        struct e1000_rx_ring *rx_ring,
+                                        int cleaned_count)
+{
+}
 static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
                                   struct e1000_rx_ring *rx_ring,
                                   int cleaned_count);
@@ -516,6 +521,7 @@ void e1000_down(struct e1000_adapter *adapter)
        struct net_device *netdev = adapter->netdev;
        u32 rctl, tctl;
 
+       netif_carrier_off(netdev);
 
        /* disable receives in the hardware */
        rctl = er32(RCTL);
@@ -544,7 +550,6 @@ void e1000_down(struct e1000_adapter *adapter)
 
        adapter->link_speed = 0;
        adapter->link_duplex = 0;
-       netif_carrier_off(netdev);
 
        e1000_reset(adapter);
        e1000_clean_all_tx_rings(adapter);
@@ -1111,7 +1116,7 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                if (e1000_read_mac_addr(hw))
                        e_err(probe, "EEPROM Read Error\n");
        }
-       /* don't block initalization here due to bad MAC address */
+       /* don't block initialization here due to bad MAC address */
        memcpy(netdev->dev_addr, hw->mac_addr, netdev->addr_len);
 
        if (!is_valid_ether_addr(netdev->dev_addr))
@@ -3552,8 +3557,11 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
                msleep(1);
        /* e1000_down has a dependency on max_frame_size */
        hw->max_frame_size = max_frame;
-       if (netif_running(netdev))
+       if (netif_running(netdev)) {
+               /* prevent buffers from being reallocated */
+               adapter->alloc_rx_buf = e1000_alloc_dummy_rx_buffers;
                e1000_down(adapter);
+       }
 
        /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
         * means we reserve 2 more, this pushes us to allocate from the next
index bb7ab3c321d61316e90405e301bb5a3e962e638b..0570c668ec3dd6f89cdb4c8ac7aa8edc271b710b 100644 (file)
 #define E1000_RCTL_LBM_TCVR       0x000000C0    /* tcvr loopback mode */
 #define E1000_RCTL_DTYP_PS        0x00000400    /* Packet Split descriptor */
 #define E1000_RCTL_RDMTS_HALF     0x00000000    /* Rx desc min threshold size */
+#define E1000_RCTL_RDMTS_HEX      0x00010000
 #define E1000_RCTL_MO_SHIFT       12            /* multicast offset shift */
 #define E1000_RCTL_MO_3           0x00003000    /* multicast offset 15:4 */
 #define E1000_RCTL_BAM            0x00008000    /* broadcast enable */
index 9416e5a7e0c82262b078ebda3b1374b8ce58934d..a69f09e37b5893a02b2e0d521806769ea6f951c5 100644 (file)
@@ -132,6 +132,7 @@ enum e1000_boards {
        board_pchlan,
        board_pch2lan,
        board_pch_lpt,
+       board_pch_spt
 };
 
 struct e1000_ps_page {
@@ -501,6 +502,7 @@ extern const struct e1000_info e1000_ich10_info;
 extern const struct e1000_info e1000_pch_info;
 extern const struct e1000_info e1000_pch2_info;
 extern const struct e1000_info e1000_pch_lpt_info;
+extern const struct e1000_info e1000_pch_spt_info;
 extern const struct e1000_info e1000_es2_info;
 
 void e1000e_ptp_init(struct e1000_adapter *adapter);
index 865ce45f9ec3424733dd3db43d9e635183575604..11f486e4ff7b37ec2d96cb2272dad3bd3d09b68d 100644 (file)
@@ -896,18 +896,20 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
        case e1000_pchlan:
        case e1000_pch2lan:
        case e1000_pch_lpt:
+       case e1000_pch_spt:
                mask |= (1 << 18);
                break;
        default:
                break;
        }
 
-       if (mac->type == e1000_pch_lpt)
+       if ((mac->type == e1000_pch_lpt) || (mac->type == e1000_pch_spt))
                wlock_mac = (er32(FWSM) & E1000_FWSM_WLOCK_MAC_MASK) >>
                    E1000_FWSM_WLOCK_MAC_SHIFT;
 
        for (i = 0; i < mac->rar_entry_count; i++) {
-               if (mac->type == e1000_pch_lpt) {
+               if ((mac->type == e1000_pch_lpt) ||
+                   (mac->type == e1000_pch_spt)) {
                        /* Cannot test write-protected SHRAL[n] registers */
                        if ((wlock_mac == 1) || (wlock_mac && (i > wlock_mac)))
                                continue;
index 72f5475c4b9093d075e608070c4415a4e0ad5afd..19e8c487db06d5c6c8ca0b330edaeb751bc4c364 100644 (file)
@@ -87,6 +87,10 @@ struct e1000_hw;
 #define E1000_DEV_ID_PCH_I218_V2               0x15A1
 #define E1000_DEV_ID_PCH_I218_LM3              0x15A2  /* Wildcat Point PCH */
 #define E1000_DEV_ID_PCH_I218_V3               0x15A3  /* Wildcat Point PCH */
+#define E1000_DEV_ID_PCH_SPT_I219_LM           0x156F  /* SPT PCH */
+#define E1000_DEV_ID_PCH_SPT_I219_V            0x1570  /* SPT PCH */
+#define E1000_DEV_ID_PCH_SPT_I219_LM2          0x15B7  /* SPT-H PCH */
+#define E1000_DEV_ID_PCH_SPT_I219_V2           0x15B8  /* SPT-H PCH */
 
 #define E1000_REVISION_4       4
 
@@ -108,6 +112,7 @@ enum e1000_mac_type {
        e1000_pchlan,
        e1000_pch2lan,
        e1000_pch_lpt,
+       e1000_pch_spt,
 };
 
 enum e1000_media_type {
@@ -153,6 +158,7 @@ enum e1000_bus_width {
        e1000_bus_width_pcie_x1,
        e1000_bus_width_pcie_x2,
        e1000_bus_width_pcie_x4 = 4,
+       e1000_bus_width_pcie_x8 = 8,
        e1000_bus_width_32,
        e1000_bus_width_64,
        e1000_bus_width_reserved
index 48b74a5491551fcbb42afc9f29e012b553ea5079..9d81c03174334be84a3d605ff03417effc4c7ccd 100644 (file)
@@ -123,6 +123,14 @@ static s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw, u32 offset,
                                         u16 *data);
 static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
                                         u8 size, u16 *data);
+static s32 e1000_read_flash_data32_ich8lan(struct e1000_hw *hw, u32 offset,
+                                          u32 *data);
+static s32 e1000_read_flash_dword_ich8lan(struct e1000_hw *hw,
+                                         u32 offset, u32 *data);
+static s32 e1000_write_flash_data32_ich8lan(struct e1000_hw *hw,
+                                           u32 offset, u32 data);
+static s32 e1000_retry_write_flash_dword_ich8lan(struct e1000_hw *hw,
+                                                u32 offset, u32 dword);
 static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw);
 static s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw);
 static s32 e1000_led_on_ich8lan(struct e1000_hw *hw);
@@ -229,7 +237,8 @@ static bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw)
        if (ret_val)
                return false;
 out:
-       if (hw->mac.type == e1000_pch_lpt) {
+       if ((hw->mac.type == e1000_pch_lpt) ||
+           (hw->mac.type == e1000_pch_spt)) {
                /* Unforce SMBus mode in PHY */
                e1e_rphy_locked(hw, CV_SMB_CTRL, &phy_reg);
                phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
@@ -321,6 +330,7 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
         */
        switch (hw->mac.type) {
        case e1000_pch_lpt:
+       case e1000_pch_spt:
                if (e1000_phy_is_accessible_pchlan(hw))
                        break;
 
@@ -461,6 +471,7 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
                        /* fall-through */
                case e1000_pch2lan:
                case e1000_pch_lpt:
+               case e1000_pch_spt:
                        /* In case the PHY needs to be in mdio slow mode,
                         * set slow mode and try to get the PHY id again.
                         */
@@ -590,35 +601,54 @@ static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw)
        struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
        u32 gfpreg, sector_base_addr, sector_end_addr;
        u16 i;
-
-       /* Can't read flash registers if the register set isn't mapped. */
-       if (!hw->flash_address) {
-               e_dbg("ERROR: Flash registers not mapped\n");
-               return -E1000_ERR_CONFIG;
-       }
+       u32 nvm_size;
 
        nvm->type = e1000_nvm_flash_sw;
 
-       gfpreg = er32flash(ICH_FLASH_GFPREG);
+       if (hw->mac.type == e1000_pch_spt) {
+               /* in SPT, gfpreg doesn't exist. NVM size is taken from the
+                * STRAP register. This is because in SPT the GbE Flash region
+                * is no longer accessed through the flash registers. Instead,
+                * the mechanism has changed, and the Flash region access
+                * registers are now implemented in GbE memory space.
+                */
+               nvm->flash_base_addr = 0;
+               nvm_size = (((er32(STRAP) >> 1) & 0x1F) + 1)
+                   * NVM_SIZE_MULTIPLIER;
+               nvm->flash_bank_size = nvm_size / 2;
+               /* Adjust to word count */
+               nvm->flash_bank_size /= sizeof(u16);
+               /* Set the base address for flash register access */
+               hw->flash_address = hw->hw_addr + E1000_FLASH_BASE_ADDR;
+       } else {
+               /* Can't read flash registers if register set isn't mapped. */
+               if (!hw->flash_address) {
+                       e_dbg("ERROR: Flash registers not mapped\n");
+                       return -E1000_ERR_CONFIG;
+               }
 
-       /* sector_X_addr is a "sector"-aligned address (4096 bytes)
-        * Add 1 to sector_end_addr since this sector is included in
-        * the overall size.
-        */
-       sector_base_addr = gfpreg & FLASH_GFPREG_BASE_MASK;
-       sector_end_addr = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK) + 1;
+               gfpreg = er32flash(ICH_FLASH_GFPREG);
 
-       /* flash_base_addr is byte-aligned */
-       nvm->flash_base_addr = sector_base_addr << FLASH_SECTOR_ADDR_SHIFT;
+               /* sector_X_addr is a "sector"-aligned address (4096 bytes)
+                * Add 1 to sector_end_addr since this sector is included in
+                * the overall size.
+                */
+               sector_base_addr = gfpreg & FLASH_GFPREG_BASE_MASK;
+               sector_end_addr = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK) + 1;
 
-       /* find total size of the NVM, then cut in half since the total
-        * size represents two separate NVM banks.
-        */
-       nvm->flash_bank_size = ((sector_end_addr - sector_base_addr)
-                               << FLASH_SECTOR_ADDR_SHIFT);
-       nvm->flash_bank_size /= 2;
-       /* Adjust to word count */
-       nvm->flash_bank_size /= sizeof(u16);
+               /* flash_base_addr is byte-aligned */
+               nvm->flash_base_addr = sector_base_addr
+                   << FLASH_SECTOR_ADDR_SHIFT;
+
+               /* find total size of the NVM, then cut in half since the total
+                * size represents two separate NVM banks.
+                */
+               nvm->flash_bank_size = ((sector_end_addr - sector_base_addr)
+                                       << FLASH_SECTOR_ADDR_SHIFT);
+               nvm->flash_bank_size /= 2;
+               /* Adjust to word count */
+               nvm->flash_bank_size /= sizeof(u16);
+       }
 
        nvm->word_size = E1000_ICH8_SHADOW_RAM_WORDS;
 
@@ -682,6 +712,7 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
                mac->ops.rar_set = e1000_rar_set_pch2lan;
                /* fall-through */
        case e1000_pch_lpt:
+       case e1000_pch_spt:
        case e1000_pchlan:
                /* check management mode */
                mac->ops.check_mng_mode = e1000_check_mng_mode_pchlan;
@@ -699,7 +730,7 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
                break;
        }
 
-       if (mac->type == e1000_pch_lpt) {
+       if ((mac->type == e1000_pch_lpt) || (mac->type == e1000_pch_spt)) {
                mac->rar_entry_count = E1000_PCH_LPT_RAR_ENTRIES;
                mac->ops.rar_set = e1000_rar_set_pch_lpt;
                mac->ops.setup_physical_interface =
@@ -919,8 +950,9 @@ release:
                /* clear FEXTNVM6 bit 8 on link down or 10/100 */
                fextnvm6 &= ~E1000_FEXTNVM6_REQ_PLL_CLK;
 
-               if (!link || ((status & E1000_STATUS_SPEED_100) &&
-                             (status & E1000_STATUS_FD)))
+               if ((hw->phy.revision > 5) || !link ||
+                   ((status & E1000_STATUS_SPEED_100) &&
+                    (status & E1000_STATUS_FD)))
                        goto update_fextnvm6;
 
                ret_val = e1e_rphy(hw, I217_INBAND_CTRL, &reg);
@@ -1100,6 +1132,21 @@ s32 e1000_enable_ulp_lpt_lp(struct e1000_hw *hw, bool to_sx)
        if (ret_val)
                goto out;
 
+       /* Si workaround for ULP entry flow on i127/rev6 h/w.  Enable
+        * LPLU and disable Gig speed when entering ULP
+        */
+       if ((hw->phy.type == e1000_phy_i217) && (hw->phy.revision == 6)) {
+               ret_val = e1000_read_phy_reg_hv_locked(hw, HV_OEM_BITS,
+                                                      &phy_reg);
+               if (ret_val)
+                       goto release;
+               phy_reg |= HV_OEM_BITS_LPLU | HV_OEM_BITS_GBE_DIS;
+               ret_val = e1000_write_phy_reg_hv_locked(hw, HV_OEM_BITS,
+                                                       phy_reg);
+               if (ret_val)
+                       goto release;
+       }
+
        /* Force SMBus mode in PHY */
        ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg);
        if (ret_val)
@@ -1302,7 +1349,8 @@ out:
 static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
 {
        struct e1000_mac_info *mac = &hw->mac;
-       s32 ret_val;
+       s32 ret_val, tipg_reg = 0;
+       u16 emi_addr, emi_val = 0;
        bool link;
        u16 phy_reg;
 
@@ -1333,48 +1381,55 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
         * the IPG and reduce Rx latency in the PHY.
         */
        if (((hw->mac.type == e1000_pch2lan) ||
-            (hw->mac.type == e1000_pch_lpt)) && link) {
+            (hw->mac.type == e1000_pch_lpt) ||
+            (hw->mac.type == e1000_pch_spt)) && link) {
                u32 reg;
 
                reg = er32(STATUS);
+               tipg_reg = er32(TIPG);
+               tipg_reg &= ~E1000_TIPG_IPGT_MASK;
+
                if (!(reg & (E1000_STATUS_FD | E1000_STATUS_SPEED_MASK))) {
-                       u16 emi_addr;
+                       tipg_reg |= 0xFF;
+                       /* Reduce Rx latency in analog PHY */
+                       emi_val = 0;
+               } else {
 
-                       reg = er32(TIPG);
-                       reg &= ~E1000_TIPG_IPGT_MASK;
-                       reg |= 0xFF;
-                       ew32(TIPG, reg);
+                       /* Roll back the default values */
+                       tipg_reg |= 0x08;
+                       emi_val = 1;
+               }
 
-                       /* Reduce Rx latency in analog PHY */
-                       ret_val = hw->phy.ops.acquire(hw);
-                       if (ret_val)
-                               return ret_val;
+               ew32(TIPG, tipg_reg);
 
-                       if (hw->mac.type == e1000_pch2lan)
-                               emi_addr = I82579_RX_CONFIG;
-                       else
-                               emi_addr = I217_RX_CONFIG;
+               ret_val = hw->phy.ops.acquire(hw);
+               if (ret_val)
+                       return ret_val;
 
-                       ret_val = e1000_write_emi_reg_locked(hw, emi_addr, 0);
+               if (hw->mac.type == e1000_pch2lan)
+                       emi_addr = I82579_RX_CONFIG;
+               else
+                       emi_addr = I217_RX_CONFIG;
+               ret_val = e1000_write_emi_reg_locked(hw, emi_addr, emi_val);
 
-                       hw->phy.ops.release(hw);
+               hw->phy.ops.release(hw);
 
-                       if (ret_val)
-                               return ret_val;
-               }
+               if (ret_val)
+                       return ret_val;
        }
 
        /* Work-around I218 hang issue */
        if ((hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPTLP_I218_LM) ||
            (hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPTLP_I218_V) ||
            (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_LM3) ||
-           (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_V3)) {
+           (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_V3) ||
+           (hw->mac.type == e1000_pch_spt)) {
                ret_val = e1000_k1_workaround_lpt_lp(hw, link);
                if (ret_val)
                        return ret_val;
        }
-
-       if (hw->mac.type == e1000_pch_lpt) {
+       if ((hw->mac.type == e1000_pch_lpt) ||
+           (hw->mac.type == e1000_pch_spt)) {
                /* Set platform power management values for
                 * Latency Tolerance Reporting (LTR)
                 */
@@ -1386,6 +1441,19 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
        /* Clear link partner's EEE ability */
        hw->dev_spec.ich8lan.eee_lp_ability = 0;
 
+       /* FEXTNVM6 K1-off workaround */
+       if (hw->mac.type == e1000_pch_spt) {
+               u32 pcieanacfg = er32(PCIEANACFG);
+               u32 fextnvm6 = er32(FEXTNVM6);
+
+               if (pcieanacfg & E1000_FEXTNVM6_K1_OFF_ENABLE)
+                       fextnvm6 |= E1000_FEXTNVM6_K1_OFF_ENABLE;
+               else
+                       fextnvm6 &= ~E1000_FEXTNVM6_K1_OFF_ENABLE;
+
+               ew32(FEXTNVM6, fextnvm6);
+       }
+
        if (!link)
                return 0;       /* No link detected */
 
@@ -1479,6 +1547,7 @@ static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter)
        case e1000_pchlan:
        case e1000_pch2lan:
        case e1000_pch_lpt:
+       case e1000_pch_spt:
                rc = e1000_init_phy_params_pchlan(hw);
                break;
        default:
@@ -1929,6 +1998,7 @@ static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
        case e1000_pchlan:
        case e1000_pch2lan:
        case e1000_pch_lpt:
+       case e1000_pch_spt:
                sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M;
                break;
        default:
@@ -2961,6 +3031,20 @@ static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank)
        s32 ret_val;
 
        switch (hw->mac.type) {
+               /* In SPT, read from the CTRL_EXT reg instead of
+                * accessing the sector valid bits from the nvm
+                */
+       case e1000_pch_spt:
+               *bank = er32(CTRL_EXT)
+                   & E1000_CTRL_EXT_NVMVS;
+               if ((*bank == 0) || (*bank == 1)) {
+                       e_dbg("ERROR: No valid NVM bank present\n");
+                       return -E1000_ERR_NVM;
+               } else {
+                       *bank = *bank - 2;
+                       return 0;
+               }
+               break;
        case e1000_ich8lan:
        case e1000_ich9lan:
                eecd = er32(EECD);
@@ -3007,6 +3091,99 @@ static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank)
        }
 }
 
+/**
+ *  e1000_read_nvm_spt - NVM access for SPT
+ *  @hw: pointer to the HW structure
+ *  @offset: The offset (in bytes) of the word(s) to read.
+ *  @words: Size of data to read in words.
+ *  @data: pointer to the word(s) to read at offset.
+ *
+ *  Reads a word(s) from the NVM
+ **/
+static s32 e1000_read_nvm_spt(struct e1000_hw *hw, u16 offset, u16 words,
+                             u16 *data)
+{
+       struct e1000_nvm_info *nvm = &hw->nvm;
+       struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
+       u32 act_offset;
+       s32 ret_val = 0;
+       u32 bank = 0;
+       u32 dword = 0;
+       u16 offset_to_read;
+       u16 i;
+
+       if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
+           (words == 0)) {
+               e_dbg("nvm parameter(s) out of bounds\n");
+               ret_val = -E1000_ERR_NVM;
+               goto out;
+       }
+
+       nvm->ops.acquire(hw);
+
+       ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
+       if (ret_val) {
+               e_dbg("Could not detect valid bank, assuming bank 0\n");
+               bank = 0;
+       }
+
+       act_offset = (bank) ? nvm->flash_bank_size : 0;
+       act_offset += offset;
+
+       ret_val = 0;
+
+       for (i = 0; i < words; i += 2) {
+               if (words - i == 1) {
+                       if (dev_spec->shadow_ram[offset + i].modified) {
+                               data[i] =
+                                   dev_spec->shadow_ram[offset + i].value;
+                       } else {
+                               offset_to_read = act_offset + i -
+                                   ((act_offset + i) % 2);
+                               ret_val =
+                                 e1000_read_flash_dword_ich8lan(hw,
+                                                                offset_to_read,
+                                                                &dword);
+                               if (ret_val)
+                                       break;
+                               if ((act_offset + i) % 2 == 0)
+                                       data[i] = (u16)(dword & 0xFFFF);
+                               else
+                                       data[i] = (u16)((dword >> 16) & 0xFFFF);
+                       }
+               } else {
+                       offset_to_read = act_offset + i;
+                       if (!(dev_spec->shadow_ram[offset + i].modified) ||
+                           !(dev_spec->shadow_ram[offset + i + 1].modified)) {
+                               ret_val =
+                                 e1000_read_flash_dword_ich8lan(hw,
+                                                                offset_to_read,
+                                                                &dword);
+                               if (ret_val)
+                                       break;
+                       }
+                       if (dev_spec->shadow_ram[offset + i].modified)
+                               data[i] =
+                                   dev_spec->shadow_ram[offset + i].value;
+                       else
+                               data[i] = (u16)(dword & 0xFFFF);
+                       if (dev_spec->shadow_ram[offset + i].modified)
+                               data[i + 1] =
+                                   dev_spec->shadow_ram[offset + i + 1].value;
+                       else
+                               data[i + 1] = (u16)(dword >> 16 & 0xFFFF);
+               }
+       }
+
+       nvm->ops.release(hw);
+
+out:
+       if (ret_val)
+               e_dbg("NVM read error: %d\n", ret_val);
+
+       return ret_val;
+}
+
 /**
  *  e1000_read_nvm_ich8lan - Read word(s) from the NVM
  *  @hw: pointer to the HW structure
@@ -3090,8 +3267,10 @@ static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
        /* Clear FCERR and DAEL in hw status by writing 1 */
        hsfsts.hsf_status.flcerr = 1;
        hsfsts.hsf_status.dael = 1;
-
-       ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval);
+       if (hw->mac.type == e1000_pch_spt)
+               ew32flash(ICH_FLASH_HSFSTS, hsfsts.regval & 0xFFFF);
+       else
+               ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval);
 
        /* Either we should have a hardware SPI cycle in progress
         * bit to check against, in order to start a new cycle or
@@ -3107,7 +3286,10 @@ static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
                 * Begin by setting Flash Cycle Done.
                 */
                hsfsts.hsf_status.flcdone = 1;
-               ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval);
+               if (hw->mac.type == e1000_pch_spt)
+                       ew32flash(ICH_FLASH_HSFSTS, hsfsts.regval & 0xFFFF);
+               else
+                       ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval);
                ret_val = 0;
        } else {
                s32 i;
@@ -3128,7 +3310,11 @@ static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
                         * now set the Flash Cycle Done.
                         */
                        hsfsts.hsf_status.flcdone = 1;
-                       ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval);
+                       if (hw->mac.type == e1000_pch_spt)
+                               ew32flash(ICH_FLASH_HSFSTS,
+                                         hsfsts.regval & 0xFFFF);
+                       else
+                               ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval);
                } else {
                        e_dbg("Flash controller busy, cannot get access\n");
                }
@@ -3151,9 +3337,16 @@ static s32 e1000_flash_cycle_ich8lan(struct e1000_hw *hw, u32 timeout)
        u32 i = 0;
 
        /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
-       hsflctl.regval = er16flash(ICH_FLASH_HSFCTL);
+       if (hw->mac.type == e1000_pch_spt)
+               hsflctl.regval = er32flash(ICH_FLASH_HSFSTS) >> 16;
+       else
+               hsflctl.regval = er16flash(ICH_FLASH_HSFCTL);
        hsflctl.hsf_ctrl.flcgo = 1;
-       ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval);
+
+       if (hw->mac.type == e1000_pch_spt)
+               ew32flash(ICH_FLASH_HSFSTS, hsflctl.regval << 16);
+       else
+               ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval);
 
        /* wait till FDONE bit is set to 1 */
        do {
@@ -3169,6 +3362,23 @@ static s32 e1000_flash_cycle_ich8lan(struct e1000_hw *hw, u32 timeout)
        return -E1000_ERR_NVM;
 }
 
+/**
+ *  e1000_read_flash_dword_ich8lan - Read dword from flash
+ *  @hw: pointer to the HW structure
+ *  @offset: offset to data location
+ *  @data: pointer to the location for storing the data
+ *
+ *  Reads the flash dword at offset into data.  Offset is converted
+ *  to bytes before read.
+ **/
+static s32 e1000_read_flash_dword_ich8lan(struct e1000_hw *hw, u32 offset,
+                                         u32 *data)
+{
+       /* Must convert word offset into bytes. */
+       offset <<= 1;
+       return e1000_read_flash_data32_ich8lan(hw, offset, data);
+}
+
 /**
  *  e1000_read_flash_word_ich8lan - Read word from flash
  *  @hw: pointer to the HW structure
@@ -3201,7 +3411,14 @@ static s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
        s32 ret_val;
        u16 word = 0;
 
-       ret_val = e1000_read_flash_data_ich8lan(hw, offset, 1, &word);
+       /* In SPT, only 32 bits access is supported,
+        * so this function should not be called.
+        */
+       if (hw->mac.type == e1000_pch_spt)
+               return -E1000_ERR_NVM;
+       else
+               ret_val = e1000_read_flash_data_ich8lan(hw, offset, 1, &word);
+
        if (ret_val)
                return ret_val;
 
@@ -3286,6 +3503,82 @@ static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
        return ret_val;
 }
 
+/**
+ *  e1000_read_flash_data32_ich8lan - Read dword from NVM
+ *  @hw: pointer to the HW structure
+ *  @offset: The offset (in bytes) of the dword to read.
+ *  @data: Pointer to the dword to store the value read.
+ *
+ *  Reads a byte or word from the NVM using the flash access registers.
+ **/
+
+static s32 e1000_read_flash_data32_ich8lan(struct e1000_hw *hw, u32 offset,
+                                          u32 *data)
+{
+       union ich8_hws_flash_status hsfsts;
+       union ich8_hws_flash_ctrl hsflctl;
+       u32 flash_linear_addr;
+       s32 ret_val = -E1000_ERR_NVM;
+       u8 count = 0;
+
+       if (offset > ICH_FLASH_LINEAR_ADDR_MASK ||
+           hw->mac.type != e1000_pch_spt)
+               return -E1000_ERR_NVM;
+       flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
+                            hw->nvm.flash_base_addr);
+
+       do {
+               udelay(1);
+               /* Steps */
+               ret_val = e1000_flash_cycle_init_ich8lan(hw);
+               if (ret_val)
+                       break;
+               /* In SPT, This register is in Lan memory space, not flash.
+                * Therefore, only 32 bit access is supported
+                */
+               hsflctl.regval = er32flash(ICH_FLASH_HSFSTS) >> 16;
+
+               /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
+               hsflctl.hsf_ctrl.fldbcount = sizeof(u32) - 1;
+               hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_READ;
+               /* In SPT, This register is in Lan memory space, not flash.
+                * Therefore, only 32 bit access is supported
+                */
+               ew32flash(ICH_FLASH_HSFSTS, (u32)hsflctl.regval << 16);
+               ew32flash(ICH_FLASH_FADDR, flash_linear_addr);
+
+               ret_val =
+                  e1000_flash_cycle_ich8lan(hw,
+                                            ICH_FLASH_READ_COMMAND_TIMEOUT);
+
+               /* Check if FCERR is set to 1, if set to 1, clear it
+                * and try the whole sequence a few more times, else
+                * read in (shift in) the Flash Data0, the order is
+                * least significant byte first msb to lsb
+                */
+               if (!ret_val) {
+                       *data = er32flash(ICH_FLASH_FDATA0);
+                       break;
+               } else {
+                       /* If we've gotten here, then things are probably
+                        * completely hosed, but if the error condition is
+                        * detected, it won't hurt to give it another try...
+                        * ICH_FLASH_CYCLE_REPEAT_COUNT times.
+                        */
+                       hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
+                       if (hsfsts.hsf_status.flcerr) {
+                               /* Repeat for some time before giving up. */
+                               continue;
+                       } else if (!hsfsts.hsf_status.flcdone) {
+                               e_dbg("Timeout error - flash cycle did not complete.\n");
+                               break;
+                       }
+               }
+       } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
+
+       return ret_val;
+}
+
 /**
  *  e1000_write_nvm_ich8lan - Write word(s) to the NVM
  *  @hw: pointer to the HW structure
@@ -3321,7 +3614,7 @@ static s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
 }
 
 /**
- *  e1000_update_nvm_checksum_ich8lan - Update the checksum for NVM
+ *  e1000_update_nvm_checksum_spt - Update the checksum for NVM
  *  @hw: pointer to the HW structure
  *
  *  The NVM checksum is updated by calling the generic update_nvm_checksum,
@@ -3331,13 +3624,13 @@ static s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
  *  After a successful commit, the shadow ram is cleared and is ready for
  *  future writes.
  **/
-static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
+static s32 e1000_update_nvm_checksum_spt(struct e1000_hw *hw)
 {
        struct e1000_nvm_info *nvm = &hw->nvm;
        struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
        u32 i, act_offset, new_bank_offset, old_bank_offset, bank;
        s32 ret_val;
-       u16 data;
+       u32 dword = 0;
 
        ret_val = e1000e_update_nvm_checksum_generic(hw);
        if (ret_val)
@@ -3371,12 +3664,175 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
                if (ret_val)
                        goto release;
        }
-
-       for (i = 0; i < E1000_ICH8_SHADOW_RAM_WORDS; i++) {
+       for (i = 0; i < E1000_ICH8_SHADOW_RAM_WORDS; i += 2) {
                /* Determine whether to write the value stored
                 * in the other NVM bank or a modified value stored
                 * in the shadow RAM
                 */
+               ret_val = e1000_read_flash_dword_ich8lan(hw,
+                                                        i + old_bank_offset,
+                                                        &dword);
+
+               if (dev_spec->shadow_ram[i].modified) {
+                       dword &= 0xffff0000;
+                       dword |= (dev_spec->shadow_ram[i].value & 0xffff);
+               }
+               if (dev_spec->shadow_ram[i + 1].modified) {
+                       dword &= 0x0000ffff;
+                       dword |= ((dev_spec->shadow_ram[i + 1].value & 0xffff)
+                                 << 16);
+               }
+               if (ret_val)
+                       break;
+
+               /* If the word is 0x13, then make sure the signature bits
+                * (15:14) are 11b until the commit has completed.
+                * This will allow us to write 10b which indicates the
+                * signature is valid.  We want to do this after the write
+                * has completed so that we don't mark the segment valid
+                * while the write is still in progress
+                */
+               if (i == E1000_ICH_NVM_SIG_WORD - 1)
+                       dword |= E1000_ICH_NVM_SIG_MASK << 16;
+
+               /* Convert offset to bytes. */
+               act_offset = (i + new_bank_offset) << 1;
+
+               usleep_range(100, 200);
+
+               /* Write the data to the new bank. Offset in words */
+               act_offset = i + new_bank_offset;
+               ret_val = e1000_retry_write_flash_dword_ich8lan(hw, act_offset,
+                                                               dword);
+               if (ret_val)
+                       break;
+       }
+
+       /* Don't bother writing the segment valid bits if sector
+        * programming failed.
+        */
+       if (ret_val) {
+               /* Possibly read-only, see e1000e_write_protect_nvm_ich8lan() */
+               e_dbg("Flash commit failed.\n");
+               goto release;
+       }
+
+       /* Finally validate the new segment by setting bit 15:14
+        * to 10b in word 0x13 , this can be done without an
+        * erase as well since these bits are 11 to start with
+        * and we need to change bit 14 to 0b
+        */
+       act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD;
+
+       /*offset in words but we read dword */
+       --act_offset;
+       ret_val = e1000_read_flash_dword_ich8lan(hw, act_offset, &dword);
+
+       if (ret_val)
+               goto release;
+
+       dword &= 0xBFFFFFFF;
+       ret_val = e1000_retry_write_flash_dword_ich8lan(hw, act_offset, dword);
+
+       if (ret_val)
+               goto release;
+
+       /* And invalidate the previously valid segment by setting
+        * its signature word (0x13) high_byte to 0b. This can be
+        * done without an erase because flash erase sets all bits
+        * to 1's. We can write 1's to 0's without an erase
+        */
+       act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1;
+
+       /* offset in words but we read dword */
+       act_offset = old_bank_offset + E1000_ICH_NVM_SIG_WORD - 1;
+       ret_val = e1000_read_flash_dword_ich8lan(hw, act_offset, &dword);
+
+       if (ret_val)
+               goto release;
+
+       dword &= 0x00FFFFFF;
+       ret_val = e1000_retry_write_flash_dword_ich8lan(hw, act_offset, dword);
+
+       if (ret_val)
+               goto release;
+
+       /* Great!  Everything worked, we can now clear the cached entries. */
+       for (i = 0; i < E1000_ICH8_SHADOW_RAM_WORDS; i++) {
+               dev_spec->shadow_ram[i].modified = false;
+               dev_spec->shadow_ram[i].value = 0xFFFF;
+       }
+
+release:
+       nvm->ops.release(hw);
+
+       /* Reload the EEPROM, or else modifications will not appear
+        * until after the next adapter reset.
+        */
+       if (!ret_val) {
+               nvm->ops.reload(hw);
+               usleep_range(10000, 20000);
+       }
+
+out:
+       if (ret_val)
+               e_dbg("NVM update error: %d\n", ret_val);
+
+       return ret_val;
+}
+
+/**
+ *  e1000_update_nvm_checksum_ich8lan - Update the checksum for NVM
+ *  @hw: pointer to the HW structure
+ *
+ *  The NVM checksum is updated by calling the generic update_nvm_checksum,
+ *  which writes the checksum to the shadow ram.  The changes in the shadow
+ *  ram are then committed to the EEPROM by processing each bank at a time
+ *  checking for the modified bit and writing only the pending changes.
+ *  After a successful commit, the shadow ram is cleared and is ready for
+ *  future writes.
+ **/
+static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
+{
+       struct e1000_nvm_info *nvm = &hw->nvm;
+       struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
+       u32 i, act_offset, new_bank_offset, old_bank_offset, bank;
+       s32 ret_val;
+       u16 data = 0;
+
+       ret_val = e1000e_update_nvm_checksum_generic(hw);
+       if (ret_val)
+               goto out;
+
+       if (nvm->type != e1000_nvm_flash_sw)
+               goto out;
+
+       nvm->ops.acquire(hw);
+
+       /* We're writing to the opposite bank so if we're on bank 1,
+        * write to bank 0 etc.  We also need to erase the segment that
+        * is going to be written
+        */
+       ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
+       if (ret_val) {
+               e_dbg("Could not detect valid bank, assuming bank 0\n");
+               bank = 0;
+       }
+
+       if (bank == 0) {
+               new_bank_offset = nvm->flash_bank_size;
+               old_bank_offset = 0;
+               ret_val = e1000_erase_flash_bank_ich8lan(hw, 1);
+               if (ret_val)
+                       goto release;
+       } else {
+               old_bank_offset = nvm->flash_bank_size;
+               new_bank_offset = 0;
+               ret_val = e1000_erase_flash_bank_ich8lan(hw, 0);
+               if (ret_val)
+                       goto release;
+       }
+       for (i = 0; i < E1000_ICH8_SHADOW_RAM_WORDS; i++) {
                if (dev_spec->shadow_ram[i].modified) {
                        data = dev_spec->shadow_ram[i].value;
                } else {
@@ -3498,6 +3954,7 @@ static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw)
         */
        switch (hw->mac.type) {
        case e1000_pch_lpt:
+       case e1000_pch_spt:
                word = NVM_COMPAT;
                valid_csum_mask = NVM_COMPAT_VALID_CSUM;
                break;
@@ -3583,9 +4040,13 @@ static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
        s32 ret_val;
        u8 count = 0;
 
-       if (size < 1 || size > 2 || data > size * 0xff ||
-           offset > ICH_FLASH_LINEAR_ADDR_MASK)
-               return -E1000_ERR_NVM;
+       if (hw->mac.type == e1000_pch_spt) {
+               if (size != 4 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
+                       return -E1000_ERR_NVM;
+       } else {
+               if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
+                       return -E1000_ERR_NVM;
+       }
 
        flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
                             hw->nvm.flash_base_addr);
@@ -3596,12 +4057,25 @@ static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
                ret_val = e1000_flash_cycle_init_ich8lan(hw);
                if (ret_val)
                        break;
+               /* In SPT, This register is in Lan memory space, not
+                * flash.  Therefore, only 32 bit access is supported
+                */
+               if (hw->mac.type == e1000_pch_spt)
+                       hsflctl.regval = er32flash(ICH_FLASH_HSFSTS) >> 16;
+               else
+                       hsflctl.regval = er16flash(ICH_FLASH_HSFCTL);
 
-               hsflctl.regval = er16flash(ICH_FLASH_HSFCTL);
                /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
                hsflctl.hsf_ctrl.fldbcount = size - 1;
                hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE;
-               ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval);
+               /* In SPT, This register is in Lan memory space,
+                * not flash.  Therefore, only 32 bit access is
+                * supported
+                */
+               if (hw->mac.type == e1000_pch_spt)
+                       ew32flash(ICH_FLASH_HSFSTS, hsflctl.regval << 16);
+               else
+                       ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval);
 
                ew32flash(ICH_FLASH_FADDR, flash_linear_addr);
 
@@ -3639,6 +4113,90 @@ static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
        return ret_val;
 }
 
+/**
+*  e1000_write_flash_data32_ich8lan - Writes 4 bytes to the NVM
+*  @hw: pointer to the HW structure
+*  @offset: The offset (in bytes) of the dwords to read.
+*  @data: The 4 bytes to write to the NVM.
+*
+*  Writes one/two/four bytes to the NVM using the flash access registers.
+**/
+static s32 e1000_write_flash_data32_ich8lan(struct e1000_hw *hw, u32 offset,
+                                           u32 data)
+{
+       union ich8_hws_flash_status hsfsts;
+       union ich8_hws_flash_ctrl hsflctl;
+       u32 flash_linear_addr;
+       s32 ret_val;
+       u8 count = 0;
+
+       if (hw->mac.type == e1000_pch_spt) {
+               if (offset > ICH_FLASH_LINEAR_ADDR_MASK)
+                       return -E1000_ERR_NVM;
+       }
+       flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
+                            hw->nvm.flash_base_addr);
+       do {
+               udelay(1);
+               /* Steps */
+               ret_val = e1000_flash_cycle_init_ich8lan(hw);
+               if (ret_val)
+                       break;
+
+               /* In SPT, This register is in Lan memory space, not
+                * flash.  Therefore, only 32 bit access is supported
+                */
+               if (hw->mac.type == e1000_pch_spt)
+                       hsflctl.regval = er32flash(ICH_FLASH_HSFSTS)
+                           >> 16;
+               else
+                       hsflctl.regval = er16flash(ICH_FLASH_HSFCTL);
+
+               hsflctl.hsf_ctrl.fldbcount = sizeof(u32) - 1;
+               hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE;
+
+               /* In SPT, This register is in Lan memory space,
+                * not flash.  Therefore, only 32 bit access is
+                * supported
+                */
+               if (hw->mac.type == e1000_pch_spt)
+                       ew32flash(ICH_FLASH_HSFSTS, hsflctl.regval << 16);
+               else
+                       ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval);
+
+               ew32flash(ICH_FLASH_FADDR, flash_linear_addr);
+
+               ew32flash(ICH_FLASH_FDATA0, data);
+
+               /* check if FCERR is set to 1 , if set to 1, clear it
+                * and try the whole sequence a few more times else done
+                */
+               ret_val =
+                  e1000_flash_cycle_ich8lan(hw,
+                                            ICH_FLASH_WRITE_COMMAND_TIMEOUT);
+
+               if (!ret_val)
+                       break;
+
+               /* If we're here, then things are most likely
+                * completely hosed, but if the error condition
+                * is detected, it won't hurt to give it another
+                * try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
+                */
+               hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
+
+               if (hsfsts.hsf_status.flcerr)
+                       /* Repeat for some time before giving up. */
+                       continue;
+               if (!hsfsts.hsf_status.flcdone) {
+                       e_dbg("Timeout error - flash cycle did not complete.\n");
+                       break;
+               }
+       } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
+
+       return ret_val;
+}
+
 /**
  *  e1000_write_flash_byte_ich8lan - Write a single byte to NVM
  *  @hw: pointer to the HW structure
@@ -3655,6 +4213,40 @@ static s32 e1000_write_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
        return e1000_write_flash_data_ich8lan(hw, offset, 1, word);
 }
 
+/**
+*  e1000_retry_write_flash_dword_ich8lan - Writes a dword to NVM
+*  @hw: pointer to the HW structure
+*  @offset: The offset of the word to write.
+*  @dword: The dword to write to the NVM.
+*
+*  Writes a single dword to the NVM using the flash access registers.
+*  Goes through a retry algorithm before giving up.
+**/
+static s32 e1000_retry_write_flash_dword_ich8lan(struct e1000_hw *hw,
+                                                u32 offset, u32 dword)
+{
+       s32 ret_val;
+       u16 program_retries;
+
+       /* Must convert word offset into bytes. */
+       offset <<= 1;
+       ret_val = e1000_write_flash_data32_ich8lan(hw, offset, dword);
+
+       if (!ret_val)
+               return ret_val;
+       for (program_retries = 0; program_retries < 100; program_retries++) {
+               e_dbg("Retrying Byte %8.8X at offset %u\n", dword, offset);
+               usleep_range(100, 200);
+               ret_val = e1000_write_flash_data32_ich8lan(hw, offset, dword);
+               if (!ret_val)
+                       break;
+       }
+       if (program_retries == 100)
+               return -E1000_ERR_NVM;
+
+       return 0;
+}
+
 /**
  *  e1000_retry_write_flash_byte_ich8lan - Writes a single byte to NVM
  *  @hw: pointer to the HW structure
@@ -3759,9 +4351,18 @@ static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank)
                        /* Write a value 11 (block Erase) in Flash
                         * Cycle field in hw flash control
                         */
-                       hsflctl.regval = er16flash(ICH_FLASH_HSFCTL);
+                       if (hw->mac.type == e1000_pch_spt)
+                               hsflctl.regval =
+                                   er32flash(ICH_FLASH_HSFSTS) >> 16;
+                       else
+                               hsflctl.regval = er16flash(ICH_FLASH_HSFCTL);
+
                        hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_ERASE;
-                       ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval);
+                       if (hw->mac.type == e1000_pch_spt)
+                               ew32flash(ICH_FLASH_HSFSTS,
+                                         hsflctl.regval << 16);
+                       else
+                               ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval);
 
                        /* Write the last 24 bits of an index within the
                         * block into Flash Linear address field in Flash
@@ -4180,7 +4781,8 @@ static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw)
        ew32(RFCTL, reg);
 
        /* Enable ECC on Lynxpoint */
-       if (hw->mac.type == e1000_pch_lpt) {
+       if ((hw->mac.type == e1000_pch_lpt) ||
+           (hw->mac.type == e1000_pch_spt)) {
                reg = er32(PBECCSTS);
                reg |= E1000_PBECCSTS_ECC_ENABLE;
                ew32(PBECCSTS, reg);
@@ -4583,7 +5185,8 @@ void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw)
                if ((device_id == E1000_DEV_ID_PCH_LPTLP_I218_LM) ||
                    (device_id == E1000_DEV_ID_PCH_LPTLP_I218_V) ||
                    (device_id == E1000_DEV_ID_PCH_I218_LM3) ||
-                   (device_id == E1000_DEV_ID_PCH_I218_V3)) {
+                   (device_id == E1000_DEV_ID_PCH_I218_V3) ||
+                   (hw->mac.type == e1000_pch_spt)) {
                        u32 fextnvm6 = er32(FEXTNVM6);
 
                        ew32(FEXTNVM6, fextnvm6 & ~E1000_FEXTNVM6_REQ_PLL_CLK);
@@ -5058,6 +5661,17 @@ static const struct e1000_nvm_operations ich8_nvm_ops = {
        .write                  = e1000_write_nvm_ich8lan,
 };
 
+static const struct e1000_nvm_operations spt_nvm_ops = {
+       .acquire                = e1000_acquire_nvm_ich8lan,
+       .release                = e1000_release_nvm_ich8lan,
+       .read                   = e1000_read_nvm_spt,
+       .update                 = e1000_update_nvm_checksum_spt,
+       .reload                 = e1000e_reload_nvm_generic,
+       .valid_led_default      = e1000_valid_led_default_ich8lan,
+       .validate               = e1000_validate_nvm_checksum_ich8lan,
+       .write                  = e1000_write_nvm_ich8lan,
+};
+
 const struct e1000_info e1000_ich8_info = {
        .mac                    = e1000_ich8lan,
        .flags                  = FLAG_HAS_WOL
@@ -5166,3 +5780,23 @@ const struct e1000_info e1000_pch_lpt_info = {
        .phy_ops                = &ich8_phy_ops,
        .nvm_ops                = &ich8_nvm_ops,
 };
+
+const struct e1000_info e1000_pch_spt_info = {
+       .mac                    = e1000_pch_spt,
+       .flags                  = FLAG_IS_ICH
+                                 | FLAG_HAS_WOL
+                                 | FLAG_HAS_HW_TIMESTAMP
+                                 | FLAG_HAS_CTRLEXT_ON_LOAD
+                                 | FLAG_HAS_AMT
+                                 | FLAG_HAS_FLASH
+                                 | FLAG_HAS_JUMBO_FRAMES
+                                 | FLAG_APME_IN_WUC,
+       .flags2                 = FLAG2_HAS_PHY_STATS
+                                 | FLAG2_HAS_EEE,
+       .pba                    = 26,
+       .max_hw_frame_size      = 9018,
+       .get_variants           = e1000_get_variants_ich8lan,
+       .mac_ops                = &ich8_mac_ops,
+       .phy_ops                = &ich8_phy_ops,
+       .nvm_ops                = &spt_nvm_ops,
+};
index 8066a498eaac5439d18c7f6d99307039a53e7d80..770a573b9eea6c7dd1302ff06996c4025b2854f7 100644 (file)
 
 #define E1000_FEXTNVM6_REQ_PLL_CLK     0x00000100
 #define E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION       0x00000200
+#define E1000_FEXTNVM6_K1_OFF_ENABLE   0x80000000
+/* bit for disabling packet buffer read */
+#define E1000_FEXTNVM7_DISABLE_PB_READ 0x00040000
 
 #define E1000_FEXTNVM7_DISABLE_SMB_PERST       0x00000020
 
+#define K1_ENTRY_LATENCY       0
+#define K1_MIN_TIME            1
+#define NVM_SIZE_MULTIPLIER 4096       /*multiplier for NVMS field */
+#define E1000_FLASH_BASE_ADDR 0xE000   /*offset of NVM access regs */
+#define E1000_CTRL_EXT_NVMVS 0x3       /*NVM valid sector */
+
 #define PCIE_ICH8_SNOOP_ALL    PCIE_NO_SNOOP_ALL
 
 #define E1000_ICH_RAR_ENTRIES  7
index 1e8c40fd5c3d8fbc582dc19c4f71766023901968..4e56c31959897402bf581131af693be5375311e1 100644 (file)
@@ -70,6 +70,7 @@ static const struct e1000_info *e1000_info_tbl[] = {
        [board_pchlan]          = &e1000_pch_info,
        [board_pch2lan]         = &e1000_pch2_info,
        [board_pch_lpt]         = &e1000_pch_lpt_info,
+       [board_pch_spt]         = &e1000_pch_spt_info,
 };
 
 struct e1000_reg_info {
@@ -1796,7 +1797,8 @@ static irqreturn_t e1000_intr_msi(int __always_unused irq, void *data)
        }
 
        /* Reset on uncorrectable ECC error */
-       if ((icr & E1000_ICR_ECCER) && (hw->mac.type == e1000_pch_lpt)) {
+       if ((icr & E1000_ICR_ECCER) && ((hw->mac.type == e1000_pch_lpt) ||
+                                       (hw->mac.type == e1000_pch_spt))) {
                u32 pbeccsts = er32(PBECCSTS);
 
                adapter->corr_errors +=
@@ -1876,7 +1878,8 @@ static irqreturn_t e1000_intr(int __always_unused irq, void *data)
        }
 
        /* Reset on uncorrectable ECC error */
-       if ((icr & E1000_ICR_ECCER) && (hw->mac.type == e1000_pch_lpt)) {
+       if ((icr & E1000_ICR_ECCER) && ((hw->mac.type == e1000_pch_lpt) ||
+                                       (hw->mac.type == e1000_pch_spt))) {
                u32 pbeccsts = er32(PBECCSTS);
 
                adapter->corr_errors +=
@@ -2257,7 +2260,8 @@ static void e1000_irq_enable(struct e1000_adapter *adapter)
        if (adapter->msix_entries) {
                ew32(EIAC_82574, adapter->eiac_mask & E1000_EIAC_MASK_82574);
                ew32(IMS, adapter->eiac_mask | E1000_IMS_OTHER | E1000_IMS_LSC);
-       } else if (hw->mac.type == e1000_pch_lpt) {
+       } else if ((hw->mac.type == e1000_pch_lpt) ||
+                  (hw->mac.type == e1000_pch_spt)) {
                ew32(IMS, IMS_ENABLE_MASK | E1000_IMS_ECCER);
        } else {
                ew32(IMS, IMS_ENABLE_MASK);
@@ -3014,6 +3018,19 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
        ew32(TCTL, tctl);
 
        hw->mac.ops.config_collision_dist(hw);
+
+       /* SPT Si errata workaround to avoid data corruption */
+       if (hw->mac.type == e1000_pch_spt) {
+               u32 reg_val;
+
+               reg_val = er32(IOSFPC);
+               reg_val |= E1000_RCTL_RDMTS_HEX;
+               ew32(IOSFPC, reg_val);
+
+               reg_val = er32(TARC(0));
+               reg_val |= E1000_TARC0_CB_MULTIQ_3_REQ;
+               ew32(TARC(0), reg_val);
+       }
 }
 
 /**
@@ -3490,8 +3507,11 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca)
        struct e1000_hw *hw = &adapter->hw;
        u32 incvalue, incperiod, shift;
 
-       /* Make sure clock is enabled on I217 before checking the frequency */
-       if ((hw->mac.type == e1000_pch_lpt) &&
+       /* Make sure clock is enabled on I217/I218/I219  before checking
+        * the frequency
+        */
+       if (((hw->mac.type == e1000_pch_lpt) ||
+            (hw->mac.type == e1000_pch_spt)) &&
            !(er32(TSYNCTXCTL) & E1000_TSYNCTXCTL_ENABLED) &&
            !(er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_ENABLED)) {
                u32 fextnvm7 = er32(FEXTNVM7);
@@ -3505,10 +3525,13 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca)
        switch (hw->mac.type) {
        case e1000_pch2lan:
        case e1000_pch_lpt:
-               /* On I217, the clock frequency is 25MHz or 96MHz as
-                * indicated by the System Clock Frequency Indication
+       case e1000_pch_spt:
+               /* On I217, I218 and I219, the clock frequency is 25MHz
+                * or 96MHz as indicated by the System Clock Frequency
+                * Indication
                 */
-               if ((hw->mac.type != e1000_pch_lpt) ||
+               if (((hw->mac.type != e1000_pch_lpt) &&
+                    (hw->mac.type != e1000_pch_spt)) ||
                    (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI)) {
                        /* Stable 96MHz frequency */
                        incperiod = INCPERIOD_96MHz;
@@ -3875,6 +3898,7 @@ void e1000e_reset(struct e1000_adapter *adapter)
                break;
        case e1000_pch2lan:
        case e1000_pch_lpt:
+       case e1000_pch_spt:
                fc->refresh_time = 0x0400;
 
                if (adapter->netdev->mtu <= ETH_DATA_LEN) {
@@ -4060,6 +4084,8 @@ void e1000e_down(struct e1000_adapter *adapter, bool reset)
         */
        set_bit(__E1000_DOWN, &adapter->state);
 
+       netif_carrier_off(netdev);
+
        /* disable receives in the hardware */
        rctl = er32(RCTL);
        if (!(adapter->flags2 & FLAG2_NO_DISABLE_RX))
@@ -4084,8 +4110,6 @@ void e1000e_down(struct e1000_adapter *adapter, bool reset)
        del_timer_sync(&adapter->watchdog_timer);
        del_timer_sync(&adapter->phy_info_timer);
 
-       netif_carrier_off(netdev);
-
        spin_lock(&adapter->stats64_lock);
        e1000e_update_stats(adapter);
        spin_unlock(&adapter->stats64_lock);
@@ -4759,7 +4783,8 @@ static void e1000e_update_stats(struct e1000_adapter *adapter)
        adapter->stats.mgpdc += er32(MGTPDC);
 
        /* Correctable ECC Errors */
-       if (hw->mac.type == e1000_pch_lpt) {
+       if ((hw->mac.type == e1000_pch_lpt) ||
+           (hw->mac.type == e1000_pch_spt)) {
                u32 pbeccsts = er32(PBECCSTS);
 
                adapter->corr_errors +=
@@ -6144,7 +6169,8 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool runtime)
 
        if (adapter->hw.phy.type == e1000_phy_igp_3) {
                e1000e_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw);
-       } else if (hw->mac.type == e1000_pch_lpt) {
+       } else if ((hw->mac.type == e1000_pch_lpt) ||
+                  (hw->mac.type == e1000_pch_spt)) {
                if (!(wufc & (E1000_WUFC_EX | E1000_WUFC_MC | E1000_WUFC_BC)))
                        /* ULP does not support wake from unicast, multicast
                         * or broadcast.
@@ -6807,7 +6833,8 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                goto err_ioremap;
 
        if ((adapter->flags & FLAG_HAS_FLASH) &&
-           (pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) {
+           (pci_resource_flags(pdev, 1) & IORESOURCE_MEM) &&
+           (hw->mac.type < e1000_pch_spt)) {
                flash_start = pci_resource_start(pdev, 1);
                flash_len = pci_resource_len(pdev, 1);
                adapter->hw.flash_address = ioremap(flash_start, flash_len);
@@ -6847,7 +6874,8 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                goto err_hw_init;
 
        if ((adapter->flags & FLAG_IS_ICH) &&
-           (adapter->flags & FLAG_READ_ONLY_NVM))
+           (adapter->flags & FLAG_READ_ONLY_NVM) &&
+           (hw->mac.type < e1000_pch_spt))
                e1000e_write_protect_nvm_ich8lan(&adapter->hw);
 
        hw->mac.ops.get_bus_info(&adapter->hw);
@@ -7043,7 +7071,7 @@ err_hw_init:
        kfree(adapter->tx_ring);
        kfree(adapter->rx_ring);
 err_sw_init:
-       if (adapter->hw.flash_address)
+       if ((adapter->hw.flash_address) && (hw->mac.type < e1000_pch_spt))
                iounmap(adapter->hw.flash_address);
        e1000e_reset_interrupt_capability(adapter);
 err_flashmap:
@@ -7116,7 +7144,8 @@ static void e1000_remove(struct pci_dev *pdev)
        kfree(adapter->rx_ring);
 
        iounmap(adapter->hw.hw_addr);
-       if (adapter->hw.flash_address)
+       if ((adapter->hw.flash_address) &&
+           (adapter->hw.mac.type < e1000_pch_spt))
                iounmap(adapter->hw.flash_address);
        pci_release_selected_regions(pdev,
                                     pci_select_bars(pdev, IORESOURCE_MEM));
@@ -7213,6 +7242,10 @@ static const struct pci_device_id e1000_pci_tbl[] = {
        { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_I218_V2), board_pch_lpt },
        { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_I218_LM3), board_pch_lpt },
        { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_I218_V3), board_pch_lpt },
+       { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_SPT_I219_LM), board_pch_spt },
+       { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_SPT_I219_V), board_pch_spt },
+       { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_SPT_I219_LM2), board_pch_spt },
+       { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_SPT_I219_V2), board_pch_spt },
 
        { 0, 0, 0, 0, 0, 0, 0 } /* terminate list */
 };
index 978ef9c4a043f9e9277de439939a75721f4ee5cf..8d7b21dc7e19955a2e87868b6c1b4b340feb5236 100644 (file)
@@ -106,20 +106,18 @@ static int e1000e_phc_adjtime(struct ptp_clock_info *ptp, s64 delta)
  * Read the timecounter and return the correct value in ns after converting
  * it into a struct timespec.
  **/
-static int e1000e_phc_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
+static int e1000e_phc_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
 {
        struct e1000_adapter *adapter = container_of(ptp, struct e1000_adapter,
                                                     ptp_clock_info);
        unsigned long flags;
-       u32 remainder;
        u64 ns;
 
        spin_lock_irqsave(&adapter->systim_lock, flags);
        ns = timecounter_read(&adapter->tc);
        spin_unlock_irqrestore(&adapter->systim_lock, flags);
 
-       ts->tv_sec = div_u64_rem(ns, NSEC_PER_SEC, &remainder);
-       ts->tv_nsec = remainder;
+       *ts = ns_to_timespec64(ns);
 
        return 0;
 }
@@ -133,14 +131,14 @@ static int e1000e_phc_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
  * wall timer value.
  **/
 static int e1000e_phc_settime(struct ptp_clock_info *ptp,
-                             const struct timespec *ts)
+                             const struct timespec64 *ts)
 {
        struct e1000_adapter *adapter = container_of(ptp, struct e1000_adapter,
                                                     ptp_clock_info);
        unsigned long flags;
        u64 ns;
 
-       ns = timespec_to_ns(ts);
+       ns = timespec64_to_ns(ts);
 
        /* reset the timecounter */
        spin_lock_irqsave(&adapter->systim_lock, flags);
@@ -171,11 +169,12 @@ static void e1000e_systim_overflow_work(struct work_struct *work)
        struct e1000_adapter *adapter = container_of(work, struct e1000_adapter,
                                                     systim_overflow_work.work);
        struct e1000_hw *hw = &adapter->hw;
-       struct timespec ts;
+       struct timespec64 ts;
 
-       adapter->ptp_clock_info.gettime(&adapter->ptp_clock_info, &ts);
+       adapter->ptp_clock_info.gettime64(&adapter->ptp_clock_info, &ts);
 
-       e_dbg("SYSTIM overflow check at %ld.%09lu\n", ts.tv_sec, ts.tv_nsec);
+       e_dbg("SYSTIM overflow check at %lld.%09lu\n",
+             (long long) ts.tv_sec, ts.tv_nsec);
 
        schedule_delayed_work(&adapter->systim_overflow_work,
                              E1000_SYSTIM_OVERFLOW_PERIOD);
@@ -190,8 +189,8 @@ static const struct ptp_clock_info e1000e_ptp_clock_info = {
        .pps            = 0,
        .adjfreq        = e1000e_phc_adjfreq,
        .adjtime        = e1000e_phc_adjtime,
-       .gettime        = e1000e_phc_gettime,
-       .settime        = e1000e_phc_settime,
+       .gettime64      = e1000e_phc_gettime,
+       .settime64      = e1000e_phc_settime,
        .enable         = e1000e_phc_enable,
 };
 
@@ -221,7 +220,9 @@ void e1000e_ptp_init(struct e1000_adapter *adapter)
        switch (hw->mac.type) {
        case e1000_pch2lan:
        case e1000_pch_lpt:
-               if ((hw->mac.type != e1000_pch_lpt) ||
+       case e1000_pch_spt:
+               if (((hw->mac.type != e1000_pch_lpt) &&
+                    (hw->mac.type != e1000_pch_spt)) ||
                    (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI)) {
                        adapter->ptp_clock_info.max_adj = 24000000 - 1;
                        break;
index ea235bbe50d3c3d32361f505cd98ddfcec5d9744..85eefc4832ba1172cadca45a5f97ff6c2d5dd9af 100644 (file)
@@ -38,6 +38,7 @@
 #define E1000_FEXTNVM4 0x00024 /* Future Extended NVM 4 - RW */
 #define E1000_FEXTNVM6 0x00010 /* Future Extended NVM 6 - RW */
 #define E1000_FEXTNVM7 0x000E4 /* Future Extended NVM 7 - RW */
+#define E1000_PCIEANACFG       0x00F18 /* PCIE Analog Config */
 #define E1000_FCT      0x00030 /* Flow Control Type - RW */
 #define E1000_VET      0x00038 /* VLAN Ether Type - RW */
 #define E1000_ICR      0x000C0 /* Interrupt Cause Read - R/clr */
@@ -67,6 +68,7 @@
 #define E1000_PBA      0x01000 /* Packet Buffer Allocation - RW */
 #define E1000_PBS      0x01008 /* Packet Buffer Size */
 #define E1000_PBECCSTS 0x0100C /* Packet Buffer ECC Status - RW */
+#define E1000_IOSFPC   0x00F28 /* TX corrupted data  */
 #define E1000_EEMNGCTL 0x01010 /* MNG EEprom Control */
 #define E1000_EEWR     0x0102C /* EEPROM Write Register - RW */
 #define E1000_FLOP     0x0103C /* FLASH Opcode Register */
                                 (0x054E4 + ((_i - 16) * 8)))
 #define E1000_SHRAL(_i)                (0x05438 + ((_i) * 8))
 #define E1000_SHRAH(_i)                (0x0543C + ((_i) * 8))
+#define E1000_TARC0_CB_MULTIQ_3_REQ    (1 << 28 | 1 << 29)
 #define E1000_TDFH             0x03410 /* Tx Data FIFO Head - RW */
 #define E1000_TDFT             0x03418 /* Tx Data FIFO Tail - RW */
 #define E1000_TDFHS            0x03420 /* Tx Data FIFO Head Saved - RW */
index 42eb4344a9dc077c52bfa97723d99af2bbcbe06b..59edfd4446cdaf4daf95659ec0d7bec6bf6c0916 100644 (file)
@@ -439,6 +439,7 @@ extern char fm10k_driver_name[];
 extern const char fm10k_driver_version[];
 int fm10k_init_queueing_scheme(struct fm10k_intfc *interface);
 void fm10k_clear_queueing_scheme(struct fm10k_intfc *interface);
+__be16 fm10k_tx_encap_offload(struct sk_buff *skb);
 netdev_tx_t fm10k_xmit_frame_ring(struct sk_buff *skb,
                                  struct fm10k_ring *tx_ring);
 void fm10k_tx_timeout_reset(struct fm10k_intfc *interface);
index bf19dccd4288d8693197520d65b2f01251d891c0..6cfae6ac04eac0984037e1620394f14bc85d3fce 100644 (file)
@@ -398,7 +398,7 @@ static void fm10k_update_hw_stats_rx_q(struct fm10k_hw *hw,
        /* Retrieve RX Owner Data */
        id_rx = fm10k_read_reg(hw, FM10K_RXQCTL(idx));
 
-       /* Process RX Ring*/
+       /* Process RX Ring */
        do {
                rx_drops = fm10k_read_hw_stats_32b(hw, FM10K_QPRDC(idx),
                                                   &q->rx_drops);
@@ -466,7 +466,6 @@ void fm10k_update_hw_stats_q(struct fm10k_hw *hw, struct fm10k_hw_stats_q *q,
  *  Function invalidates the index values for the queues so any updates that
  *  may have happened are ignored and the base for the queue stats is reset.
  **/
-
 void fm10k_unbind_hw_stats_q(struct fm10k_hw_stats_q *q, u32 idx, u32 count)
 {
        u32 i;
index 651f53bc737686118e1f2fc457f452de59260998..33b6106c764becbd48e1dcdd4cda4e74b66c5497 100644 (file)
@@ -1019,7 +1019,7 @@ static int fm10k_set_channels(struct net_device *dev,
 }
 
 static int fm10k_get_ts_info(struct net_device *dev,
-                          struct ethtool_ts_info *info)
+                            struct ethtool_ts_info *info)
 {
        struct fm10k_intfc *interface = netdev_priv(dev);
 
index 0601908642389077b1df46546af52e4a734c8db0..a02308f5048fbf1911740c379f423a0336528b19 100644 (file)
@@ -275,7 +275,7 @@ s32 fm10k_iov_update_pvid(struct fm10k_intfc *interface, u16 glort, u16 pvid)
        if (vf_idx >= iov_data->num_vfs)
                return FM10K_ERR_PARAM;
 
-       /* determine if an update has occured and if so notify the VF */
+       /* determine if an update has occurred and if so notify the VF */
        vf_info = &iov_data->vf_info[vf_idx];
        if (vf_info->sw_vid != pvid) {
                vf_info->sw_vid = pvid;
index 84ab9eea2768406e2b3f9a22f946e7417160f795..c325bc0c83382c9f4254cbdb1fb1c4758969625b 100644 (file)
@@ -711,10 +711,6 @@ static struct ethhdr *fm10k_gre_is_nvgre(struct sk_buff *skb)
        if (nvgre_hdr->flags & FM10K_NVGRE_RESERVED0_FLAGS)
                return NULL;
 
-       /* verify protocol is transparent Ethernet bridging */
-       if (nvgre_hdr->proto != htons(ETH_P_TEB))
-               return NULL;
-
        /* report start of ethernet header */
        if (nvgre_hdr->flags & NVGRE_TNI)
                return (struct ethhdr *)(nvgre_hdr + 1);
@@ -722,15 +718,13 @@ static struct ethhdr *fm10k_gre_is_nvgre(struct sk_buff *skb)
        return (struct ethhdr *)(&nvgre_hdr->tni);
 }
 
-static __be16 fm10k_tx_encap_offload(struct sk_buff *skb)
+__be16 fm10k_tx_encap_offload(struct sk_buff *skb)
 {
+       u8 l4_hdr = 0, inner_l4_hdr = 0, inner_l4_hlen;
        struct ethhdr *eth_hdr;
-       u8 l4_hdr = 0;
 
-/* fm10k supports 184 octets of outer+inner headers. Minus 20 for inner L4. */
-#define FM10K_MAX_ENCAP_TRANSPORT_OFFSET       164
-       if (skb_inner_transport_header(skb) - skb_mac_header(skb) >
-           FM10K_MAX_ENCAP_TRANSPORT_OFFSET)
+       if (skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
+           skb->inner_protocol != htons(ETH_P_TEB))
                return 0;
 
        switch (vlan_get_protocol(skb)) {
@@ -760,12 +754,33 @@ static __be16 fm10k_tx_encap_offload(struct sk_buff *skb)
 
        switch (eth_hdr->h_proto) {
        case htons(ETH_P_IP):
+               inner_l4_hdr = inner_ip_hdr(skb)->protocol;
+               break;
        case htons(ETH_P_IPV6):
+               inner_l4_hdr = inner_ipv6_hdr(skb)->nexthdr;
                break;
        default:
                return 0;
        }
 
+       switch (inner_l4_hdr) {
+       case IPPROTO_TCP:
+               inner_l4_hlen = inner_tcp_hdrlen(skb);
+               break;
+       case IPPROTO_UDP:
+               inner_l4_hlen = 8;
+               break;
+       default:
+               return 0;
+       }
+
+       /* The hardware allows tunnel offloads only if the combined inner and
+        * outer header is 184 bytes or less
+        */
+       if (skb_inner_transport_header(skb) + inner_l4_hlen -
+           skb_mac_header(skb) > FM10K_TUNNEL_HEADER_LENGTH)
+               return 0;
+
        return eth_hdr->h_proto;
 }
 
@@ -934,10 +949,10 @@ static int __fm10k_maybe_stop_tx(struct fm10k_ring *tx_ring, u16 size)
 {
        netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
 
+       /* Memory barrier before checking head and tail */
        smp_mb();
 
-       /* We need to check again in a case another CPU has just
-        * made room available. */
+       /* Check again in a case another CPU has just made room available */
        if (likely(fm10k_desc_unused(tx_ring) < size))
                return -EBUSY;
 
index 9f5457c9e627620dfe421bc173c85b24fddb67a4..14ee696e98308222460fe680b2c128d413eb6839 100644 (file)
@@ -72,7 +72,7 @@ static bool fm10k_fifo_empty(struct fm10k_mbx_fifo *fifo)
  *  @fifo: pointer to FIFO
  *  @offset: offset to add to head
  *
- *  This function returns the indicies into the fifo based on head + offset
+ *  This function returns the indices into the fifo based on head + offset
  **/
 static u16 fm10k_fifo_head_offset(struct fm10k_mbx_fifo *fifo, u16 offset)
 {
@@ -84,7 +84,7 @@ static u16 fm10k_fifo_head_offset(struct fm10k_mbx_fifo *fifo, u16 offset)
  *  @fifo: pointer to FIFO
  *  @offset: offset to add to tail
  *
- *  This function returns the indicies into the fifo based on tail + offset
+ *  This function returns the indices into the fifo based on tail + offset
  **/
 static u16 fm10k_fifo_tail_offset(struct fm10k_mbx_fifo *fifo, u16 offset)
 {
@@ -326,7 +326,7 @@ static u16 fm10k_mbx_validate_msg_size(struct fm10k_mbx_info *mbx, u16 len)
  *  fm10k_mbx_write_copy - pulls data off of Tx FIFO and places it in mbmem
  *  @mbx: pointer to mailbox
  *
- *  This function will take a seciton of the Rx FIFO and copy it into the
+ *  This function will take a section of the Rx FIFO and copy it into the
                mbx->tail--;
  *  mailbox memory.  The offset in mbmem is based on the lower bits of the
  *  tail and len determines the length to copy.
@@ -418,7 +418,7 @@ static void fm10k_mbx_pull_head(struct fm10k_hw *hw,
  *  @hw: pointer to hardware structure
  *  @mbx: pointer to mailbox
  *
- *  This function will take a seciton of the mailbox memory and copy it
+ *  This function will take a section of the mailbox memory and copy it
  *  into the Rx FIFO.  The offset is based on the lower bits of the
  *  head and len determines the length to copy.
  **/
@@ -464,7 +464,7 @@ static void fm10k_mbx_read_copy(struct fm10k_hw *hw,
  *  @tail: tail index of message
  *
  *  This function will first validate the tail index and size for the
- *  incoming message.  It then updates the acknowlegment number and
+ *  incoming message.  It then updates the acknowledgment number and
  *  copies the data into the FIFO.  It will return the number of messages
  *  dequeued on success and a negative value on error.
  **/
@@ -761,7 +761,7 @@ static s32 fm10k_mbx_enqueue_tx(struct fm10k_hw *hw,
                err = fm10k_fifo_enqueue(&mbx->tx, msg);
        }
 
-       /* if we failed trhead the error */
+       /* if we failed treat the error */
        if (err) {
                mbx->timeout = 0;
                mbx->tx_busy++;
@@ -815,7 +815,7 @@ static void fm10k_mbx_write(struct fm10k_hw *hw, struct fm10k_mbx_info *mbx)
 {
        u32 mbmem = mbx->mbmem_reg;
 
-       /* write new msg header to notify recepient of change */
+       /* write new msg header to notify recipient of change */
        fm10k_write_reg(hw, mbmem, mbx->mbx_hdr);
 
        /* write mailbox to sent interrupt */
@@ -1251,7 +1251,7 @@ static s32 fm10k_mbx_process_error(struct fm10k_hw *hw,
        /* we will need to pull all of the fields for verification */
        head = FM10K_MSG_HDR_FIELD_GET(*hdr, HEAD);
 
-       /* we only have lower 10 bits of error number os add upper bits */
+       /* we only have lower 10 bits of error number so add upper bits */
        err_no = FM10K_MSG_HDR_FIELD_GET(*hdr, ERR_NO);
        err_no |= ~FM10K_MSG_HDR_MASK(ERR_NO);
 
@@ -1548,7 +1548,7 @@ s32 fm10k_pfvf_mbx_init(struct fm10k_hw *hw, struct fm10k_mbx_info *mbx,
        mbx->timeout = 0;
        mbx->udelay = FM10K_MBX_INIT_DELAY;
 
-       /* initalize tail and head */
+       /* initialize tail and head */
        mbx->tail = 1;
        mbx->head = 1;
 
@@ -1627,7 +1627,7 @@ static void fm10k_sm_mbx_connect_reset(struct fm10k_mbx_info *mbx)
        mbx->local = FM10K_SM_MBX_VERSION;
        mbx->remote = 0;
 
-       /* initalize tail and head */
+       /* initialize tail and head */
        mbx->tail = 1;
        mbx->head = 1;
 
index cfde8bac1aeb2a23e4c447f786e06394f3ce285e..d5b303dad95e439258ab50f95ca3c9b73a4aa874 100644 (file)
@@ -356,7 +356,7 @@ static void fm10k_free_all_rx_resources(struct fm10k_intfc *interface)
  * fm10k_request_glort_range - Request GLORTs for use in configuring rules
  * @interface: board private structure
  *
- * This function allocates a range of glorts for this inteface to use.
+ * This function allocates a range of glorts for this interface to use.
  **/
 static void fm10k_request_glort_range(struct fm10k_intfc *interface)
 {
@@ -781,7 +781,7 @@ static int fm10k_update_vid(struct net_device *netdev, u16 vid, bool set)
 
        fm10k_mbx_lock(interface);
 
-       /* only need to update the VLAN if not in promiscous mode */
+       /* only need to update the VLAN if not in promiscuous mode */
        if (!(netdev->flags & IFF_PROMISC)) {
                err = hw->mac.ops.update_vlan(hw, vid, 0, set);
                if (err)
@@ -970,7 +970,7 @@ static void fm10k_set_rx_mode(struct net_device *dev)
 
        fm10k_mbx_lock(interface);
 
-       /* syncronize all of the addresses */
+       /* synchronize all of the addresses */
        if (xcast_mode != FM10K_XCAST_MODE_PROMISC) {
                __dev_uc_sync(dev, fm10k_uc_sync, fm10k_uc_unsync);
                if (xcast_mode != FM10K_XCAST_MODE_ALLMULTI)
@@ -1051,7 +1051,7 @@ void fm10k_restore_rx_state(struct fm10k_intfc *interface)
                                           vid, true, 0);
        }
 
-       /* syncronize all of the addresses */
+       /* synchronize all of the addresses */
        if (xcast_mode != FM10K_XCAST_MODE_PROMISC) {
                __dev_uc_sync(netdev, fm10k_uc_sync, fm10k_uc_unsync);
                if (xcast_mode != FM10K_XCAST_MODE_ALLMULTI)
@@ -1350,6 +1350,16 @@ static void fm10k_dfwd_del_station(struct net_device *dev, void *priv)
        }
 }
 
+static netdev_features_t fm10k_features_check(struct sk_buff *skb,
+                                             struct net_device *dev,
+                                             netdev_features_t features)
+{
+       if (!skb->encapsulation || fm10k_tx_encap_offload(skb))
+               return features;
+
+       return features & ~(NETIF_F_ALL_CSUM | NETIF_F_GSO_MASK);
+}
+
 static const struct net_device_ops fm10k_netdev_ops = {
        .ndo_open               = fm10k_open,
        .ndo_stop               = fm10k_close,
@@ -1372,6 +1382,7 @@ static const struct net_device_ops fm10k_netdev_ops = {
        .ndo_do_ioctl           = fm10k_ioctl,
        .ndo_dfwd_add_station   = fm10k_dfwd_add_station,
        .ndo_dfwd_del_station   = fm10k_dfwd_del_station,
+       .ndo_features_check     = fm10k_features_check,
 };
 
 #define DEFAULT_DEBUG_LEVEL_SHIFT 3
index 4f5892cc32d70c15b7911e0975eb6f39a5f5b6fb..8978d55a1c514d18bdd22de6988a00b7f8ca6b22 100644 (file)
@@ -648,7 +648,7 @@ static void fm10k_configure_rx_ring(struct fm10k_intfc *interface,
        /* Configure the Rx buffer size for one buff without split */
        srrctl |= FM10K_RX_BUFSZ >> FM10K_SRRCTL_BSIZEPKT_SHIFT;
 
-       /* Configure the Rx ring to supress loopback packets */
+       /* Configure the Rx ring to suppress loopback packets */
        srrctl |= FM10K_SRRCTL_LOOPBACK_SUPPRESS;
        fm10k_write_reg(hw, FM10K_SRRCTL(reg_idx), srrctl);
 
index 7e4711958e463a959c69365fdb3d5c9ff956f3e4..159cd8463800bebfd584363013b77526bdb3161f 100644 (file)
@@ -234,8 +234,7 @@ static s32 fm10k_update_vlan_pf(struct fm10k_hw *hw, u32 vid, u8 vsi, bool set)
        vid = (vid << 17) >> 17;
 
        /* verify the reserved 0 fields are 0 */
-       if (len >= FM10K_VLAN_TABLE_VID_MAX ||
-           vid >= FM10K_VLAN_TABLE_VID_MAX)
+       if (len >= FM10K_VLAN_TABLE_VID_MAX || vid >= FM10K_VLAN_TABLE_VID_MAX)
                return FM10K_ERR_PARAM;
 
        /* Loop through the table updating all required VLANs */
@@ -312,7 +311,7 @@ bool fm10k_glort_valid_pf(struct fm10k_hw *hw, u16 glort)
 }
 
 /**
- *  fm10k_update_uc_addr_pf - Update device unicast addresss
+ *  fm10k_update_xc_addr_pf - Update device addresses
  *  @hw: pointer to the HW structure
  *  @glort: base resource tag for this request
  *  @mac: MAC address to add/remove from table
@@ -356,7 +355,7 @@ static s32 fm10k_update_xc_addr_pf(struct fm10k_hw *hw, u16 glort,
 }
 
 /**
- *  fm10k_update_uc_addr_pf - Update device unicast addresss
+ *  fm10k_update_uc_addr_pf - Update device unicast addresses
  *  @hw: pointer to the HW structure
  *  @glort: base resource tag for this request
  *  @mac: MAC address to add/remove from table
@@ -454,7 +453,7 @@ static void fm10k_update_int_moderator_pf(struct fm10k_hw *hw)
                        break;
        }
 
-       /* always reset VFITR2[0] to point to last enabled PF vector*/
+       /* always reset VFITR2[0] to point to last enabled PF vector */
        fm10k_write_reg(hw, FM10K_ITR2(FM10K_ITR_REG_COUNT_PF), i);
 
        /* reset ITR2[0] to point to last enabled PF vector */
@@ -812,7 +811,7 @@ static s32 fm10k_iov_assign_int_moderator_pf(struct fm10k_hw *hw, u16 vf_idx)
        if (vf_idx >= hw->iov.num_vfs)
                return FM10K_ERR_PARAM;
 
-       /* determine vector offset and count*/
+       /* determine vector offset and count */
        vf_v_idx = fm10k_vf_vector_index(hw, vf_idx);
        vf_v_limit = vf_v_idx + fm10k_vectors_per_pool(hw);
 
@@ -951,7 +950,7 @@ static s32 fm10k_iov_reset_resources_pf(struct fm10k_hw *hw,
        if (vf_info->mbx.ops.disconnect)
                vf_info->mbx.ops.disconnect(hw, &vf_info->mbx);
 
-       /* determine vector offset and count*/
+       /* determine vector offset and count */
        vf_v_idx = fm10k_vf_vector_index(hw, vf_idx);
        vf_v_limit = vf_v_idx + fm10k_vectors_per_pool(hw);
 
@@ -1035,7 +1034,7 @@ static s32 fm10k_iov_reset_resources_pf(struct fm10k_hw *hw,
                        ((u32)vf_info->mac[2]);
        }
 
-       /* map queue pairs back to VF from last to first*/
+       /* map queue pairs back to VF from last to first */
        for (i = queues_per_pool; i--;) {
                fm10k_write_reg(hw, FM10K_TDBAL(vf_q_idx + i), tdbal);
                fm10k_write_reg(hw, FM10K_TDBAH(vf_q_idx + i), tdbah);
@@ -1141,7 +1140,7 @@ static s32 fm10k_iov_report_timestamp_pf(struct fm10k_hw *hw,
  *
  *  This function is a default handler for MSI-X requests from the VF.  The
  *  assumption is that in this case it is acceptable to just directly
- *  hand off the message form the VF to the underlying shared code.
+ *  hand off the message from the VF to the underlying shared code.
  **/
 s32 fm10k_iov_msg_msix_pf(struct fm10k_hw *hw, u32 **results,
                          struct fm10k_mbx_info *mbx)
@@ -1160,7 +1159,7 @@ s32 fm10k_iov_msg_msix_pf(struct fm10k_hw *hw, u32 **results,
  *
  *  This function is a default handler for MAC/VLAN requests from the VF.
  *  The assumption is that in this case it is acceptable to just directly
- *  hand off the message form the VF to the underlying shared code.
+ *  hand off the message from the VF to the underlying shared code.
  **/
 s32 fm10k_iov_msg_mac_vlan_pf(struct fm10k_hw *hw, u32 **results,
                              struct fm10k_mbx_info *mbx)
@@ -1404,7 +1403,7 @@ static void fm10k_update_hw_stats_pf(struct fm10k_hw *hw,
                                                    &stats->vlan_drop);
                loopback_drop = fm10k_read_hw_stats_32b(hw,
                                                        FM10K_STATS_LOOPBACK_DROP,
-                                                    &stats->loopback_drop);
+                                                       &stats->loopback_drop);
                nodesc_drop = fm10k_read_hw_stats_32b(hw,
                                                      FM10K_STATS_NODESC_DROP,
                                                      &stats->nodesc_drop);
@@ -1573,7 +1572,7 @@ static s32 fm10k_get_host_state_pf(struct fm10k_hw *hw, bool *switch_ready)
        s32 ret_val = 0;
        u32 dma_ctrl2;
 
-       /* verify the switch is ready for interraction */
+       /* verify the switch is ready for interaction */
        dma_ctrl2 = fm10k_read_reg(hw, FM10K_DMA_CTRL2);
        if (!(dma_ctrl2 & FM10K_DMA_CTRL2_SWITCH_READY))
                goto out;
index d966044e017af043fdc24770d4d642700949102b..02008e976d186f754470340089f344e781e9bb04 100644 (file)
@@ -285,7 +285,7 @@ static int fm10k_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
        return 0;
 }
 
-static int fm10k_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
+static int fm10k_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
 {
        struct fm10k_intfc *interface;
        unsigned long flags;
@@ -297,17 +297,17 @@ static int fm10k_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
        now = fm10k_systime_read(interface) + interface->ptp_adjust;
        read_unlock_irqrestore(&interface->systime_lock, flags);
 
-       *ts = ns_to_timespec(now);
+       *ts = ns_to_timespec64(now);
 
        return 0;
 }
 
 static int fm10k_ptp_settime(struct ptp_clock_info *ptp,
-                            const struct timespec *ts)
+                            const struct timespec64 *ts)
 {
        struct fm10k_intfc *interface;
        unsigned long flags;
-       u64 ns = timespec_to_ns(ts);
+       u64 ns = timespec64_to_ns(ts);
 
        interface = container_of(ptp, struct fm10k_intfc, ptp_caps);
 
@@ -419,8 +419,8 @@ void fm10k_ptp_register(struct fm10k_intfc *interface)
        ptp_caps->max_adj       = 976562;
        ptp_caps->adjfreq       = fm10k_ptp_adjfreq;
        ptp_caps->adjtime       = fm10k_ptp_adjtime;
-       ptp_caps->gettime       = fm10k_ptp_gettime;
-       ptp_caps->settime       = fm10k_ptp_settime;
+       ptp_caps->gettime64     = fm10k_ptp_gettime;
+       ptp_caps->settime64     = fm10k_ptp_settime;
 
        /* provide pins if BAR4 is accessible */
        if (interface->sw_addr) {
index fd0a05f011a863e48f47994577c9d9077d8148e3..9b29d7b0377a4302aea542a2afd99460707241f1 100644 (file)
@@ -710,7 +710,7 @@ void fm10k_tlv_msg_test_create(u32 *msg, u32 attr_flags)
 /**
  *  fm10k_tlv_msg_test - Validate all results on test message receive
  *  @hw: Pointer to hardware structure
- *  @results: Pointer array to attributes in the mesage
+ *  @results: Pointer array to attributes in the message
  *  @mbx: Pointer to mailbox information structure
  *
  *  This function does a check to verify all attributes match what the test
index 7c6d9d5a8ae5c5042f8e68843a26ef770b45bbf5..4af96686c58407b8385113731685dbf1f7933749 100644 (file)
@@ -356,6 +356,9 @@ struct fm10k_hw;
 #define FM10K_QUEUE_DISABLE_TIMEOUT            100
 #define FM10K_RESET_TIMEOUT                    150
 
+/* Maximum supported combined inner and outer header length for encapsulation */
+#define FM10K_TUNNEL_HEADER_LENGTH     184
+
 /* VF registers */
 #define FM10K_VFCTRL           0x00000
 #define FM10K_VFCTRL_RST                       0x00000008
@@ -593,7 +596,7 @@ struct fm10k_vf_info {
        u16                     sw_vid;         /* Switch API assigned VLAN */
        u16                     pf_vid;         /* PF assigned Default VLAN */
        u8                      mac[ETH_ALEN];  /* PF Default MAC address */
-       u8                      vsi;            /* VSI idenfifier */
+       u8                      vsi;            /* VSI identifier */
        u8                      vf_idx;         /* which VF this is */
        u8                      vf_flags;       /* flags indicating what modes
                                                 * are supported for the port
index f0aa0f97b4a91d7f7cc4c07d990e2e04f06aafa1..17219678439abbfa2a3f2acd3dddd09c6ffee94e 100644 (file)
@@ -37,7 +37,7 @@ static s32 fm10k_stop_hw_vf(struct fm10k_hw *hw)
        if (err)
                return err;
 
-       /* If permenant address is set then we need to restore it */
+       /* If permanent address is set then we need to restore it */
        if (is_valid_ether_addr(perm_addr)) {
                bal = (((u32)perm_addr[3]) << 24) |
                      (((u32)perm_addr[4]) << 16) |
@@ -65,7 +65,7 @@ static s32 fm10k_stop_hw_vf(struct fm10k_hw *hw)
  *  fm10k_reset_hw_vf - VF hardware reset
  *  @hw: pointer to hardware structure
  *
- *  This function should return the hardare to a state similar to the
+ *  This function should return the hardware to a state similar to the
  *  one it is in after just being initialized.
  **/
 static s32 fm10k_reset_hw_vf(struct fm10k_hw *hw)
@@ -252,7 +252,7 @@ static s32 fm10k_read_mac_addr_vf(struct fm10k_hw *hw)
 }
 
 /**
- *  fm10k_update_uc_addr_vf - Update device unicast address
+ *  fm10k_update_uc_addr_vf - Update device unicast addresses
  *  @hw: pointer to the HW structure
  *  @glort: unused
  *  @mac: MAC address to add/remove from table
@@ -282,7 +282,7 @@ static s32 fm10k_update_uc_addr_vf(struct fm10k_hw *hw, u16 glort,
            memcmp(hw->mac.perm_addr, mac, ETH_ALEN))
                return FM10K_ERR_PARAM;
 
-       /* add bit to notify us if this is a set of clear operation */
+       /* add bit to notify us if this is a set or clear operation */
        if (!add)
                vid |= FM10K_VLAN_CLEAR;
 
@@ -295,7 +295,7 @@ static s32 fm10k_update_uc_addr_vf(struct fm10k_hw *hw, u16 glort,
 }
 
 /**
- *  fm10k_update_mc_addr_vf - Update device multicast address
+ *  fm10k_update_mc_addr_vf - Update device multicast addresses
  *  @hw: pointer to the HW structure
  *  @glort: unused
  *  @mac: MAC address to add/remove from table
@@ -319,7 +319,7 @@ static s32 fm10k_update_mc_addr_vf(struct fm10k_hw *hw, u16 glort,
        if (!is_multicast_ether_addr(mac))
                return FM10K_ERR_PARAM;
 
-       /* add bit to notify us if this is a set of clear operation */
+       /* add bit to notify us if this is a set or clear operation */
        if (!add)
                vid |= FM10K_VLAN_CLEAR;
 
@@ -515,7 +515,7 @@ static s32 fm10k_adjust_systime_vf(struct fm10k_hw *hw, s32 ppb)
  *  @hw: pointer to the hardware structure
  *
  *  Function reads the content of 2 registers, combined to represent a 64 bit
- *  value measured in nanosecods.  In order to guarantee the value is accurate
+ *  value measured in nanoseconds.  In order to guarantee the value is accurate
  *  we check the 32 most significant bits both before and after reading the
  *  32 least significant bits to verify they didn't change as we were reading
  *  the registers.
index c405819991214e21a25b5a670d0097482fd643ab..b4729ba57c9c1e88f10fa2ceaecd32821e7d18e7 100644 (file)
@@ -1,7 +1,7 @@
 ################################################################################
 #
 # Intel Ethernet Controller XL710 Family Linux Driver
-# Copyright(c) 2013 - 2014 Intel Corporation.
+# Copyright(c) 2013 - 2015 Intel Corporation.
 #
 # This program is free software; you can redistribute it and/or modify it
 # under the terms and conditions of the GNU General Public License,
index 2b65cdcad6bace52ba03934738599cb4aab314ef..33c35d3b7420fa9ae545aea4ebd5160036914718 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
  *
  * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2014 Intel Corporation.
+ * Copyright(c) 2013 - 2015 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -36,6 +36,7 @@
 #include <linux/aer.h>
 #include <linux/netdevice.h>
 #include <linux/ioport.h>
+#include <linux/iommu.h>
 #include <linux/slab.h>
 #include <linux/list.h>
 #include <linux/string.h>
@@ -49,6 +50,7 @@
 #include <net/ip6_checksum.h>
 #include <linux/ethtool.h>
 #include <linux/if_vlan.h>
+#include <linux/if_bridge.h>
 #include <linux/clocksource.h>
 #include <linux/net_tstamp.h>
 #include <linux/ptp_clock_kernel.h>
@@ -70,6 +72,7 @@
 
 #define I40E_MAX_NUM_DESCRIPTORS      4096
 #define I40E_MAX_REGISTER     0x800000
+#define I40E_MAX_CSR_SPACE (4 * 1024 * 1024 - 64 * 1024)
 #define I40E_DEFAULT_NUM_DESCRIPTORS  512
 #define I40E_REQ_DESCRIPTOR_MULTIPLE  32
 #define I40E_MIN_NUM_DESCRIPTORS      64
@@ -94,6 +97,9 @@
 #define I40E_QUEUE_WAIT_RETRY_LIMIT   10
 #define I40E_INT_NAME_STR_LEN        (IFNAMSIZ + 9)
 
+/* Ethtool Private Flags */
+#define I40E_PRIV_FLAGS_NPAR_FLAG      (1 << 0)
+
 #define I40E_NVM_VERSION_LO_SHIFT  0
 #define I40E_NVM_VERSION_LO_MASK   (0xff << I40E_NVM_VERSION_LO_SHIFT)
 #define I40E_NVM_VERSION_HI_SHIFT  12
@@ -140,6 +146,7 @@ enum i40e_state_t {
        __I40E_CORE_RESET_REQUESTED,
        __I40E_GLOBAL_RESET_REQUESTED,
        __I40E_EMP_RESET_REQUESTED,
+       __I40E_EMP_RESET_INTR_RECEIVED,
        __I40E_FILTER_OVERFLOW_PROMISC,
        __I40E_SUSPENDED,
        __I40E_PTP_TX_IN_PROGRESS,
@@ -168,6 +175,9 @@ struct i40e_lump_tracking {
 #define I40E_FDIR_MAX_RAW_PACKET_SIZE  512
 #define I40E_FDIR_BUFFER_FULL_MARGIN   10
 #define I40E_FDIR_BUFFER_HEAD_ROOM     32
+#define I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR (I40E_FDIR_BUFFER_HEAD_ROOM * 4)
+
+#define I40E_HKEY_ARRAY_SIZE ((I40E_PFQF_HKEY_MAX_INDEX + 1) * 4)
 
 enum i40e_fd_stat_idx {
        I40E_FD_STAT_ATR,
@@ -232,17 +242,17 @@ struct i40e_pf {
        bool fc_autoneg_status;
 
        u16 eeprom_version;
-       u16 num_vmdq_vsis;         /* num vmdq vsis this pf has set up */
+       u16 num_vmdq_vsis;         /* num vmdq vsis this PF has set up */
        u16 num_vmdq_qps;          /* num queue pairs per vmdq pool */
        u16 num_vmdq_msix;         /* num queue vectors per vmdq pool */
-       u16 num_req_vfs;           /* num vfs requested for this vf */
-       u16 num_vf_qps;            /* num queue pairs per vf */
+       u16 num_req_vfs;           /* num VFs requested for this VF */
+       u16 num_vf_qps;            /* num queue pairs per VF */
 #ifdef I40E_FCOE
-       u16 num_fcoe_qps;          /* num fcoe queues this pf has set up */
+       u16 num_fcoe_qps;          /* num fcoe queues this PF has set up */
        u16 num_fcoe_msix;         /* num queue vectors per fcoe pool */
 #endif /* I40E_FCOE */
-       u16 num_lan_qps;           /* num lan queues this pf has set up */
-       u16 num_lan_msix;          /* num queue vectors for the base pf vsi */
+       u16 num_lan_qps;           /* num lan queues this PF has set up */
+       u16 num_lan_msix;          /* num queue vectors for the base PF vsi */
        int queues_left;           /* queues left unclaimed */
        u16 rss_size;              /* num queues in the RSS array */
        u16 rss_size_max;          /* HW defined max RSS queues */
@@ -269,7 +279,7 @@ struct i40e_pf {
        enum i40e_interrupt_policy int_policy;
        u16 rx_itr_default;
        u16 tx_itr_default;
-       u16 msg_enable;
+       u32 msg_enable;
        char int_name[I40E_INT_NAME_STR_LEN];
        u16 adminq_work_limit; /* num of admin receive queue desc to process */
        unsigned long service_timer_period;
@@ -383,6 +393,9 @@ struct i40e_pf {
        bool ptp_tx;
        bool ptp_rx;
        u16 rss_table_size;
+       /* These are only valid in NPAR modes */
+       u32 npar_max_bw;
+       u32 npar_min_bw;
 };
 
 struct i40e_mac_filter {
@@ -405,6 +418,7 @@ struct i40e_veb {
        u16 uplink_seid;
        u16 stats_idx;           /* index of VEB parent */
        u8  enabled_tc;
+       u16 bridge_mode;        /* Bridge Mode (VEB/VEPA) */
        u16 flags;
        u16 bw_limit;
        u8  bw_max_quanta;
@@ -461,6 +475,9 @@ struct i40e_vsi {
        u16 rx_itr_setting;
        u16 tx_itr_setting;
 
+       u16 rss_table_size;
+       u16 rss_size;
+
        u16 max_frame;
        u16 rx_hdr_len;
        u16 rx_buf_len;
@@ -478,6 +495,7 @@ struct i40e_vsi {
 
        u16 base_queue;      /* vsi's first queue in hw array */
        u16 alloc_queue_pairs; /* Allocated Tx/Rx queues */
+       u16 req_queue_pairs; /* User requested queue pairs */
        u16 num_queue_pairs; /* Used tx and rx pairs */
        u16 num_desc;
        enum i40e_vsi_type type;  /* VSI type, e.g., LAN, FCoE, etc */
@@ -504,6 +522,9 @@ struct i40e_vsi {
 
        /* VSI specific handlers */
        irqreturn_t (*irq_handler)(int irq, void *data);
+
+       /* current rxnfc data */
+       struct ethtool_rxnfc rxnfc; /* current rss hash opts */
 } ____cacheline_internodealigned_in_smp;
 
 struct i40e_netdev_priv {
@@ -544,14 +565,14 @@ static inline char *i40e_fw_version_str(struct i40e_hw *hw)
        static char buf[32];
 
        snprintf(buf, sizeof(buf),
-                "f%d.%d a%d.%d n%02x.%02x e%08x",
-                hw->aq.fw_maj_ver, hw->aq.fw_min_ver,
+                "f%d.%d.%05d a%d.%d n%x.%02x e%x",
+                hw->aq.fw_maj_ver, hw->aq.fw_min_ver, hw->aq.fw_build,
                 hw->aq.api_maj_ver, hw->aq.api_min_ver,
                 (hw->nvm.version & I40E_NVM_VERSION_HI_MASK) >>
                        I40E_NVM_VERSION_HI_SHIFT,
                 (hw->nvm.version & I40E_NVM_VERSION_LO_MASK) >>
                        I40E_NVM_VERSION_LO_SHIFT,
-                hw->nvm.eetrack);
+                (hw->nvm.eetrack & 0xffffff));
 
        return buf;
 }
@@ -593,7 +614,7 @@ static inline bool i40e_rx_is_programming_status(u64 qw)
 
 /**
  * i40e_get_fd_cnt_all - get the total FD filter space available
- * @pf: pointer to the pf struct
+ * @pf: pointer to the PF struct
  **/
 static inline int i40e_get_fd_cnt_all(struct i40e_pf *pf)
 {
@@ -607,6 +628,7 @@ extern const char i40e_driver_name[];
 extern const char i40e_driver_version_str[];
 void i40e_do_reset_safe(struct i40e_pf *pf, u32 reset_flags);
 void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags);
+struct i40e_vsi *i40e_find_vsi_from_id(struct i40e_pf *pf, u16 id);
 void i40e_update_stats(struct i40e_vsi *vsi);
 void i40e_update_eth_stats(struct i40e_vsi *vsi);
 struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi);
@@ -618,9 +640,10 @@ int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data, u8 *raw_packet,
 int i40e_add_del_fdir(struct i40e_vsi *vsi,
                      struct i40e_fdir_filter *input, bool add);
 void i40e_fdir_check_and_reenable(struct i40e_pf *pf);
-int i40e_get_current_fd_count(struct i40e_pf *pf);
-int i40e_get_cur_guaranteed_fd_count(struct i40e_pf *pf);
-int i40e_get_current_atr_cnt(struct i40e_pf *pf);
+u32 i40e_get_current_fd_count(struct i40e_pf *pf);
+u32 i40e_get_cur_guaranteed_fd_count(struct i40e_pf *pf);
+u32 i40e_get_current_atr_cnt(struct i40e_pf *pf);
+u32 i40e_get_global_fd_count(struct i40e_pf *pf);
 bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features);
 void i40e_set_ethtool_ops(struct net_device *netdev);
 struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
@@ -680,6 +703,7 @@ int i40e_vlan_rx_add_vid(struct net_device *netdev,
 int i40e_vlan_rx_kill_vid(struct net_device *netdev,
                          __always_unused __be16 proto, u16 vid);
 #endif
+int i40e_open(struct net_device *netdev);
 int i40e_vsi_open(struct i40e_vsi *vsi);
 void i40e_vlan_stripping_disable(struct i40e_vsi *vsi);
 int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid);
@@ -690,7 +714,6 @@ bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi);
 struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, u8 *macaddr,
                                      bool is_vf, bool is_netdev);
 #ifdef I40E_FCOE
-int i40e_open(struct net_device *netdev);
 int i40e_close(struct net_device *netdev);
 int i40e_setup_tc(struct net_device *netdev, u8 tc);
 void i40e_netpoll(struct net_device *netdev);
@@ -712,6 +735,7 @@ void i40e_fcoe_handle_status(struct i40e_ring *rx_ring,
 void i40e_vlan_stripping_enable(struct i40e_vsi *vsi);
 #ifdef CONFIG_I40E_DCB
 void i40e_dcbnl_flush_apps(struct i40e_pf *pf,
+                          struct i40e_dcbx_config *old_cfg,
                           struct i40e_dcbx_config *new_cfg);
 void i40e_dcbnl_set_all(struct i40e_vsi *vsi);
 void i40e_dcbnl_setup(struct i40e_vsi *vsi);
@@ -727,4 +751,8 @@ int i40e_ptp_set_ts_config(struct i40e_pf *pf, struct ifreq *ifr);
 int i40e_ptp_get_ts_config(struct i40e_pf *pf, struct ifreq *ifr);
 void i40e_ptp_init(struct i40e_pf *pf);
 void i40e_ptp_stop(struct i40e_pf *pf);
+int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi);
+i40e_status i40e_get_npar_bw_setting(struct i40e_pf *pf);
+i40e_status i40e_set_npar_bw_setting(struct i40e_pf *pf);
+i40e_status i40e_commit_npar_bw_setting(struct i40e_pf *pf);
 #endif /* _I40E_H_ */
index 77f6254a89ac6078136e7cd92af1f910981c3f6f..3e0d20037675e84164ae9da793ddc080737e1d81 100644 (file)
@@ -592,6 +592,7 @@ i40e_status i40e_init_adminq(struct i40e_hw *hw)
                ret_code = i40e_aq_get_firmware_version(hw,
                                                        &hw->aq.fw_maj_ver,
                                                        &hw->aq.fw_min_ver,
+                                                       &hw->aq.fw_build,
                                                        &hw->aq.api_maj_ver,
                                                        &hw->aq.api_min_ver,
                                                        NULL);
@@ -605,7 +606,8 @@ i40e_status i40e_init_adminq(struct i40e_hw *hw)
                goto init_adminq_free_arq;
 
        /* get the NVM version info */
-       i40e_read_nvm_word(hw, I40E_SR_NVM_IMAGE_VERSION, &hw->nvm.version);
+       i40e_read_nvm_word(hw, I40E_SR_NVM_DEV_STARTER_VERSION,
+                          &hw->nvm.version);
        i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_LO, &eetrack_lo);
        i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_HI, &eetrack_hi);
        hw->nvm.eetrack = (eetrack_hi << 16) | eetrack_lo;
index de17b6fbcc4e2a2f42054a875f961f9344a27d9a..28e519a50de4063edcae851c6e94fce88687203b 100644 (file)
@@ -93,6 +93,7 @@ struct i40e_adminq_info {
        u16 asq_buf_size;               /* send queue buffer size */
        u16 fw_maj_ver;                 /* firmware major version */
        u16 fw_min_ver;                 /* firmware minor version */
+       u32 fw_build;                   /* firmware build number */
        u16 api_maj_ver;                /* api major version */
        u16 api_min_ver;                /* api minor version */
        bool nvm_release_on_done;
index 6aea65dae5ed654b5da2e7a8885a02a92c75710d..d596f6624025bc29c551e0093a7cbc2323c97efe 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
  *
  * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2014 Intel Corporation.
+ * Copyright(c) 2013 - 2015 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -51,6 +51,7 @@ static i40e_status i40e_set_mac_type(struct i40e_hw *hw)
                case I40E_DEV_ID_QSFP_B:
                case I40E_DEV_ID_QSFP_C:
                case I40E_DEV_ID_10G_BASE_T:
+               case I40E_DEV_ID_20G_KR2:
                        hw->mac.type = I40E_MAC_XL710;
                        break;
                case I40E_DEV_ID_VF:
@@ -85,46 +86,53 @@ void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc,
 {
        struct i40e_aq_desc *aq_desc = (struct i40e_aq_desc *)desc;
        u16 len = le16_to_cpu(aq_desc->datalen);
-       u8 *aq_buffer = (u8 *)buffer;
-       u32 data[4];
-       u32 i = 0;
+       u8 *buf = (u8 *)buffer;
+       u16 i = 0;
 
        if ((!(mask & hw->debug_mask)) || (desc == NULL))
                return;
 
        i40e_debug(hw, mask,
                   "AQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n",
-                  aq_desc->opcode, aq_desc->flags, aq_desc->datalen,
-                  aq_desc->retval);
+                  le16_to_cpu(aq_desc->opcode),
+                  le16_to_cpu(aq_desc->flags),
+                  le16_to_cpu(aq_desc->datalen),
+                  le16_to_cpu(aq_desc->retval));
        i40e_debug(hw, mask, "\tcookie (h,l) 0x%08X 0x%08X\n",
-                  aq_desc->cookie_high, aq_desc->cookie_low);
+                  le32_to_cpu(aq_desc->cookie_high),
+                  le32_to_cpu(aq_desc->cookie_low));
        i40e_debug(hw, mask, "\tparam (0,1)  0x%08X 0x%08X\n",
-                  aq_desc->params.internal.param0,
-                  aq_desc->params.internal.param1);
+                  le32_to_cpu(aq_desc->params.internal.param0),
+                  le32_to_cpu(aq_desc->params.internal.param1));
        i40e_debug(hw, mask, "\taddr (h,l)   0x%08X 0x%08X\n",
-                  aq_desc->params.external.addr_high,
-                  aq_desc->params.external.addr_low);
+                  le32_to_cpu(aq_desc->params.external.addr_high),
+                  le32_to_cpu(aq_desc->params.external.addr_low));
 
        if ((buffer != NULL) && (aq_desc->datalen != 0)) {
-               memset(data, 0, sizeof(data));
                i40e_debug(hw, mask, "AQ CMD Buffer:\n");
                if (buf_len < len)
                        len = buf_len;
-               for (i = 0; i < len; i++) {
-                       data[((i % 16) / 4)] |=
-                               ((u32)aq_buffer[i]) << (8 * (i % 4));
-                       if ((i % 16) == 15) {
-                               i40e_debug(hw, mask,
-                                          "\t0x%04X  %08X %08X %08X %08X\n",
-                                          i - 15, data[0], data[1], data[2],
-                                          data[3]);
-                               memset(data, 0, sizeof(data));
-                       }
+               /* write the full 16-byte chunks */
+               for (i = 0; i < (len - 16); i += 16)
+                       i40e_debug(hw, mask,
+                                  "\t0x%04X  %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X\n",
+                                  i, buf[i], buf[i + 1], buf[i + 2],
+                                  buf[i + 3], buf[i + 4], buf[i + 5],
+                                  buf[i + 6], buf[i + 7], buf[i + 8],
+                                  buf[i + 9], buf[i + 10], buf[i + 11],
+                                  buf[i + 12], buf[i + 13], buf[i + 14],
+                                  buf[i + 15]);
+               /* write whatever's left over without overrunning the buffer */
+               if (i < len) {
+                       char d_buf[80];
+                       int j = 0;
+
+                       memset(d_buf, 0, sizeof(d_buf));
+                       j += sprintf(d_buf, "\t0x%04X ", i);
+                       while (i < len)
+                               j += sprintf(&d_buf[j], " %02X", buf[i++]);
+                       i40e_debug(hw, mask, "%s\n", d_buf);
                }
-               if ((i % 16) != 0)
-                       i40e_debug(hw, mask, "\t0x%04X  %08X %08X %08X %08X\n",
-                                  i - (i % 16), data[0], data[1], data[2],
-                                  data[3]);
        }
 }
 
@@ -534,7 +542,6 @@ struct i40e_rx_ptype_decoded i40e_ptype_lookup[] = {
        I40E_PTT_UNUSED_ENTRY(255)
 };
 
-
 /**
  * i40e_init_shared_code - Initialize the shared code
  * @hw: pointer to hardware structure
@@ -685,7 +692,7 @@ i40e_status i40e_get_port_mac_addr(struct i40e_hw *hw, u8 *mac_addr)
 /**
  * i40e_pre_tx_queue_cfg - pre tx queue configure
  * @hw: pointer to the HW structure
- * @queue: target pf queue index
+ * @queue: target PF queue index
  * @enable: state change request
  *
  * Handles hw requirement to indicate intention to enable
@@ -827,12 +834,15 @@ static enum i40e_media_type i40e_get_media_type(struct i40e_hw *hw)
        case I40E_PHY_TYPE_10GBASE_CR1:
        case I40E_PHY_TYPE_40GBASE_CR4:
        case I40E_PHY_TYPE_10GBASE_SFPP_CU:
+       case I40E_PHY_TYPE_40GBASE_AOC:
+       case I40E_PHY_TYPE_10GBASE_AOC:
                media = I40E_MEDIA_TYPE_DA;
                break;
        case I40E_PHY_TYPE_1000BASE_KX:
        case I40E_PHY_TYPE_10GBASE_KX4:
        case I40E_PHY_TYPE_10GBASE_KR:
        case I40E_PHY_TYPE_40GBASE_KR4:
+       case I40E_PHY_TYPE_20GBASE_KR2:
                media = I40E_MEDIA_TYPE_BACKPLANE;
                break;
        case I40E_PHY_TYPE_SGMII:
@@ -849,7 +859,7 @@ static enum i40e_media_type i40e_get_media_type(struct i40e_hw *hw)
 }
 
 #define I40E_PF_RESET_WAIT_COUNT_A0    200
-#define I40E_PF_RESET_WAIT_COUNT       110
+#define I40E_PF_RESET_WAIT_COUNT       200
 /**
  * i40e_pf_reset - Reset the PF
  * @hw: pointer to the hardware structure
@@ -947,7 +957,7 @@ void i40e_clear_hw(struct i40e_hw *hw)
        u32 val;
        u32 eol = 0x7ff;
 
-       /* get number of interrupts, queues, and vfs */
+       /* get number of interrupts, queues, and VFs */
        val = rd32(hw, I40E_GLPCI_CNF2);
        num_pf_int = (val & I40E_GLPCI_CNF2_MSI_X_PF_N_MASK) >>
                     I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT;
@@ -1076,8 +1086,11 @@ static u32 i40e_led_is_mine(struct i40e_hw *hw, int idx)
        return gpio_val;
 }
 
-#define I40E_LED0 22
+#define I40E_COMBINED_ACTIVITY 0xA
+#define I40E_FILTER_ACTIVITY 0xE
 #define I40E_LINK_ACTIVITY 0xC
+#define I40E_MAC_ACTIVITY 0xD
+#define I40E_LED0 22
 
 /**
  * i40e_led_get - return current on/off mode
@@ -1090,6 +1103,7 @@ static u32 i40e_led_is_mine(struct i40e_hw *hw, int idx)
  **/
 u32 i40e_led_get(struct i40e_hw *hw)
 {
+       u32 current_mode = 0;
        u32 mode = 0;
        int i;
 
@@ -1102,6 +1116,20 @@ u32 i40e_led_get(struct i40e_hw *hw)
                if (!gpio_val)
                        continue;
 
+               /* ignore gpio LED src mode entries related to the activity
+                * LEDs
+                */
+               current_mode = ((gpio_val & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK)
+                               >> I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT);
+               switch (current_mode) {
+               case I40E_COMBINED_ACTIVITY:
+               case I40E_FILTER_ACTIVITY:
+               case I40E_MAC_ACTIVITY:
+                       continue;
+               default:
+                       break;
+               }
+
                mode = (gpio_val & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK) >>
                        I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT;
                break;
@@ -1121,6 +1149,7 @@ u32 i40e_led_get(struct i40e_hw *hw)
  **/
 void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink)
 {
+       u32 current_mode = 0;
        int i;
 
        if (mode & 0xfffffff0)
@@ -1135,6 +1164,20 @@ void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink)
                if (!gpio_val)
                        continue;
 
+               /* ignore gpio LED src mode entries related to the activity
+                * LEDs
+                */
+               current_mode = ((gpio_val & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK)
+                               >> I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT);
+               switch (current_mode) {
+               case I40E_COMBINED_ACTIVITY:
+               case I40E_FILTER_ACTIVITY:
+               case I40E_MAC_ACTIVITY:
+                       continue;
+               default:
+                       break;
+               }
+
                gpio_val &= ~I40E_GLGEN_GPIO_CTL_LED_MODE_MASK;
                /* this & is a bit of paranoia, but serves as a range check */
                gpio_val |= ((mode << I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT) &
@@ -1298,14 +1341,14 @@ enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures,
                        *aq_failures |= I40E_SET_FC_AQ_FAIL_SET;
        }
        /* Update the link info */
-       status = i40e_update_link_info(hw, true);
+       status = i40e_aq_get_link_info(hw, true, NULL, NULL);
        if (status) {
                /* Wait a little bit (on 40G cards it sometimes takes a really
                 * long time for link to come back from the atomic reset)
                 * and try once more
                 */
                msleep(1000);
-               status = i40e_update_link_info(hw, true);
+               status = i40e_aq_get_link_info(hw, true, NULL, NULL);
        }
        if (status)
                *aq_failures |= I40E_SET_FC_AQ_FAIL_UPDATE;
@@ -1441,6 +1484,10 @@ i40e_status i40e_aq_get_link_info(struct i40e_hw *hw,
        else
                hw_link_info->lse_enable = false;
 
+       if ((hw->aq.fw_maj_ver < 4 || (hw->aq.fw_maj_ver == 4 &&
+            hw->aq.fw_min_ver < 40)) && hw_link_info->phy_type == 0xE)
+               hw_link_info->phy_type = I40E_PHY_TYPE_10GBASE_SFPP_CU;
+
        /* save link status information */
        if (link)
                *link = *hw_link_info;
@@ -1452,35 +1499,6 @@ aq_get_link_info_exit:
        return status;
 }
 
-/**
- * i40e_update_link_info
- * @hw: pointer to the hw struct
- * @enable_lse: enable/disable LinkStatusEvent reporting
- *
- * Returns the link status of the adapter
- **/
-i40e_status i40e_update_link_info(struct i40e_hw *hw, bool enable_lse)
-{
-       struct i40e_aq_get_phy_abilities_resp abilities;
-       i40e_status status;
-
-       status = i40e_aq_get_link_info(hw, enable_lse, NULL, NULL);
-       if (status)
-               return status;
-
-       status = i40e_aq_get_phy_capabilities(hw, false, false,
-                                             &abilities, NULL);
-       if (status)
-               return status;
-
-       if (abilities.abilities & I40E_AQ_PHY_AN_ENABLED)
-               hw->phy.link_info.an_enabled = true;
-       else
-               hw->phy.link_info.an_enabled = false;
-
-       return status;
-}
-
 /**
  * i40e_aq_set_phy_int_mask
  * @hw: pointer to the hw struct
@@ -1760,6 +1778,7 @@ i40e_status i40e_aq_get_switch_config(struct i40e_hw *hw,
  * @hw: pointer to the hw struct
  * @fw_major_version: firmware major version
  * @fw_minor_version: firmware minor version
+ * @fw_build: firmware build number
  * @api_major_version: major queue version
  * @api_minor_version: minor queue version
  * @cmd_details: pointer to command details structure or NULL
@@ -1768,6 +1787,7 @@ i40e_status i40e_aq_get_switch_config(struct i40e_hw *hw,
  **/
 i40e_status i40e_aq_get_firmware_version(struct i40e_hw *hw,
                                u16 *fw_major_version, u16 *fw_minor_version,
+                               u32 *fw_build,
                                u16 *api_major_version, u16 *api_minor_version,
                                struct i40e_asq_cmd_details *cmd_details)
 {
@@ -1781,13 +1801,15 @@ i40e_status i40e_aq_get_firmware_version(struct i40e_hw *hw,
        status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
 
        if (!status) {
-               if (fw_major_version != NULL)
+               if (fw_major_version)
                        *fw_major_version = le16_to_cpu(resp->fw_major);
-               if (fw_minor_version != NULL)
+               if (fw_minor_version)
                        *fw_minor_version = le16_to_cpu(resp->fw_minor);
-               if (api_major_version != NULL)
+               if (fw_build)
+                       *fw_build = le32_to_cpu(resp->fw_build);
+               if (api_major_version)
                        *api_major_version = le16_to_cpu(resp->api_major);
-               if (api_minor_version != NULL)
+               if (api_minor_version)
                        *api_minor_version = le16_to_cpu(resp->api_minor);
        }
 
@@ -1817,7 +1839,7 @@ i40e_status i40e_aq_send_driver_version(struct i40e_hw *hw,
 
        i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_driver_version);
 
-       desc.flags |= cpu_to_le16(I40E_AQ_FLAG_SI);
+       desc.flags |= cpu_to_le16(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD);
        cmd->driver_major_ver = dv->major_version;
        cmd->driver_minor_ver = dv->minor_version;
        cmd->driver_build_ver = dv->build_version;
@@ -1997,7 +2019,7 @@ i40e_status i40e_aq_add_macvlan(struct i40e_hw *hw, u16 seid,
        if (count == 0 || !mv_list || !hw)
                return I40E_ERR_PARAM;
 
-       buf_size = count * sizeof(struct i40e_aqc_add_macvlan_element_data);
+       buf_size = count * sizeof(*mv_list);
 
        /* prep the rest of the request */
        i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_macvlan);
@@ -2039,7 +2061,7 @@ i40e_status i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 seid,
        if (count == 0 || !mv_list || !hw)
                return I40E_ERR_PARAM;
 
-       buf_size = count * sizeof(struct i40e_aqc_remove_macvlan_element_data);
+       buf_size = count * sizeof(*mv_list);
 
        /* prep the rest of the request */
        i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_remove_macvlan);
@@ -2061,7 +2083,7 @@ i40e_status i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 seid,
 /**
  * i40e_aq_send_msg_to_vf
  * @hw: pointer to the hardware structure
- * @vfid: vf id to send msg
+ * @vfid: VF id to send msg
  * @v_opcode: opcodes for VF-PF communication
  * @v_retval: return error code
  * @msg: pointer to the msg buffer
@@ -2106,7 +2128,7 @@ i40e_status i40e_aq_send_msg_to_vf(struct i40e_hw *hw, u16 vfid,
  * Read the register using the admin queue commands
  **/
 i40e_status i40e_aq_debug_read_register(struct i40e_hw *hw,
-                               u32  reg_addr, u64 *reg_val,
+                               u32 reg_addr, u64 *reg_val,
                                struct i40e_asq_cmd_details *cmd_details)
 {
        struct i40e_aq_desc desc;
@@ -2117,17 +2139,15 @@ i40e_status i40e_aq_debug_read_register(struct i40e_hw *hw,
        if (reg_val == NULL)
                return I40E_ERR_PARAM;
 
-       i40e_fill_default_direct_cmd_desc(&desc,
-                                         i40e_aqc_opc_debug_read_reg);
+       i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_debug_read_reg);
 
        cmd_resp->address = cpu_to_le32(reg_addr);
 
        status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
 
        if (!status) {
-               *reg_val = ((u64)cmd_resp->value_high << 32) |
-                           (u64)cmd_resp->value_low;
-               *reg_val = le64_to_cpu(*reg_val);
+               *reg_val = ((u64)le32_to_cpu(cmd_resp->value_high) << 32) |
+                          (u64)le32_to_cpu(cmd_resp->value_low);
        }
 
        return status;
@@ -3376,6 +3396,47 @@ i40e_status i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw,
        return status;
 }
 
+/**
+ * i40e_aq_alternate_read
+ * @hw: pointer to the hardware structure
+ * @reg_addr0: address of first dword to be read
+ * @reg_val0: pointer for data read from 'reg_addr0'
+ * @reg_addr1: address of second dword to be read
+ * @reg_val1: pointer for data read from 'reg_addr1'
+ *
+ * Read one or two dwords from alternate structure. Fields are indicated
+ * by 'reg_addr0' and 'reg_addr1' register numbers. If 'reg_val1' pointer
+ * is not passed then only register at 'reg_addr0' is read.
+ *
+ **/
+static i40e_status i40e_aq_alternate_read(struct i40e_hw *hw,
+                                         u32 reg_addr0, u32 *reg_val0,
+                                         u32 reg_addr1, u32 *reg_val1)
+{
+       struct i40e_aq_desc desc;
+       struct i40e_aqc_alternate_write *cmd_resp =
+               (struct i40e_aqc_alternate_write *)&desc.params.raw;
+       i40e_status status;
+
+       if (!reg_val0)
+               return I40E_ERR_PARAM;
+
+       i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_alternate_read);
+       cmd_resp->address0 = cpu_to_le32(reg_addr0);
+       cmd_resp->address1 = cpu_to_le32(reg_addr1);
+
+       status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
+
+       if (!status) {
+               *reg_val0 = le32_to_cpu(cmd_resp->data0);
+
+               if (reg_val1)
+                       *reg_val1 = le32_to_cpu(cmd_resp->data1);
+       }
+
+       return status;
+}
+
 /**
  * i40e_aq_resume_port_tx
  * @hw: pointer to the hardware structure
@@ -3440,3 +3501,79 @@ void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status)
                break;
        }
 }
+
+/**
+ * i40e_read_bw_from_alt_ram
+ * @hw: pointer to the hardware structure
+ * @max_bw: pointer for max_bw read
+ * @min_bw: pointer for min_bw read
+ * @min_valid: pointer for bool that is true if min_bw is a valid value
+ * @max_valid: pointer for bool that is true if max_bw is a valid value
+ *
+ * Read bw from the alternate ram for the given pf
+ **/
+i40e_status i40e_read_bw_from_alt_ram(struct i40e_hw *hw,
+                                     u32 *max_bw, u32 *min_bw,
+                                     bool *min_valid, bool *max_valid)
+{
+       i40e_status status;
+       u32 max_bw_addr, min_bw_addr;
+
+       /* Calculate the address of the min/max bw registers */
+       max_bw_addr = I40E_ALT_STRUCT_FIRST_PF_OFFSET +
+                     I40E_ALT_STRUCT_MAX_BW_OFFSET +
+                     (I40E_ALT_STRUCT_DWORDS_PER_PF * hw->pf_id);
+       min_bw_addr = I40E_ALT_STRUCT_FIRST_PF_OFFSET +
+                     I40E_ALT_STRUCT_MIN_BW_OFFSET +
+                     (I40E_ALT_STRUCT_DWORDS_PER_PF * hw->pf_id);
+
+       /* Read the bandwidths from alt ram */
+       status = i40e_aq_alternate_read(hw, max_bw_addr, max_bw,
+                                       min_bw_addr, min_bw);
+
+       if (*min_bw & I40E_ALT_BW_VALID_MASK)
+               *min_valid = true;
+       else
+               *min_valid = false;
+
+       if (*max_bw & I40E_ALT_BW_VALID_MASK)
+               *max_valid = true;
+       else
+               *max_valid = false;
+
+       return status;
+}
+
+/**
+ * i40e_aq_configure_partition_bw
+ * @hw: pointer to the hardware structure
+ * @bw_data: Buffer holding valid pfs and bw limits
+ * @cmd_details: pointer to command details
+ *
+ * Configure partitions guaranteed/max bw
+ **/
+i40e_status i40e_aq_configure_partition_bw(struct i40e_hw *hw,
+                       struct i40e_aqc_configure_partition_bw_data *bw_data,
+                       struct i40e_asq_cmd_details *cmd_details)
+{
+       i40e_status status;
+       struct i40e_aq_desc desc;
+       u16 bwd_size = sizeof(*bw_data);
+
+       i40e_fill_default_direct_cmd_desc(&desc,
+                                         i40e_aqc_opc_configure_partition_bw);
+
+       /* Indirect command */
+       desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
+       desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD);
+
+       if (bwd_size > I40E_AQ_LARGE_BUF)
+               desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+
+       desc.datalen = cpu_to_le16(bwd_size);
+
+       status = i40e_asq_send_command(hw, &desc, bw_data, bwd_size,
+                                      cmd_details);
+
+       return status;
+}
index 3ce43588592d99c7c5449b6cd0fb84a31177d84f..6e146675676097562985ea3cc7924562499938a3 100644 (file)
@@ -459,7 +459,7 @@ static void i40e_cee_to_dcb_v1_config(
        sync = (status & I40E_TLV_STATUS_SYNC) ? 1 : 0;
        oper = (status & I40E_TLV_STATUS_OPER) ? 1 : 0;
        /* Add APPs if Error is False and Oper/Sync is True */
-       if (!err && sync && oper) {
+       if (!err) {
                /* CEE operating configuration supports FCoE/iSCSI/FIP only */
                dcbcfg->numapps = I40E_CEE_OPER_MAX_APPS;
 
index a11c70ca5a2811c84cc094ac425b93ec0b840d5d..bd5079d5c1b682016db7a166c11c9a0e9f392b38 100644 (file)
@@ -178,6 +178,10 @@ void i40e_dcbnl_set_all(struct i40e_vsi *vsi)
        if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
                return;
 
+       /* MFP mode but not an iSCSI PF so return */
+       if ((pf->flags & I40E_FLAG_MFP_ENABLED) && !(pf->hw.func_caps.iscsi))
+               return;
+
        dcbxcfg = &hw->local_dcbx_config;
 
        /* Set up all the App TLVs if DCBx is negotiated */
@@ -223,7 +227,7 @@ static int i40e_dcbnl_vsi_del_app(struct i40e_vsi *vsi,
 
 /**
  * i40e_dcbnl_del_app - Delete APP on all VSIs
- * @pf: the corresponding pf
+ * @pf: the corresponding PF
  * @app: APP to delete
  *
  * Delete given APP from all the VSIs for given PF
@@ -268,23 +272,26 @@ static bool i40e_dcbnl_find_app(struct i40e_dcbx_config *cfg,
 
 /**
  * i40e_dcbnl_flush_apps - Delete all removed APPs
- * @pf: the corresponding pf
+ * @pf: the corresponding PF
+ * @old_cfg: old DCBX configuration data
  * @new_cfg: new DCBX configuration data
  *
  * Find and delete all APPs that are not present in the passed
  * DCB configuration
  **/
 void i40e_dcbnl_flush_apps(struct i40e_pf *pf,
+                          struct i40e_dcbx_config *old_cfg,
                           struct i40e_dcbx_config *new_cfg)
 {
        struct i40e_dcb_app_priority_table app;
-       struct i40e_dcbx_config *dcbxcfg;
-       struct i40e_hw *hw = &pf->hw;
        int i;
 
-       dcbxcfg = &hw->local_dcbx_config;
-       for (i = 0; i < dcbxcfg->numapps; i++) {
-               app = dcbxcfg->app[i];
+       /* MFP mode but not an iSCSI PF so return */
+       if ((pf->flags & I40E_FLAG_MFP_ENABLED) && !(pf->hw.func_caps.iscsi))
+               return;
+
+       for (i = 0; i < old_cfg->numapps; i++) {
+               app = old_cfg->app[i];
                /* The APP is not available anymore delete it */
                if (!i40e_dcbnl_find_app(new_cfg, &app))
                        i40e_dcbnl_del_app(pf, &app);
@@ -306,9 +313,7 @@ void i40e_dcbnl_setup(struct i40e_vsi *vsi)
        if (!(pf->flags & I40E_FLAG_DCB_CAPABLE))
                return;
 
-       /* Do not setup DCB NL ops for MFP mode */
-       if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
-               dev->dcbnl_ops = &dcbnl_ops;
+       dev->dcbnl_ops = &dcbnl_ops;
 
        /* Set initial IEEE DCB settings */
        i40e_dcbnl_set_all(vsi);
index c17ee77100d3651e254265ae192bd4d3e54c3659..daa88263af663eb0f88052602b9ec1cc4ce697e4 100644 (file)
@@ -35,7 +35,7 @@ static struct dentry *i40e_dbg_root;
 
 /**
  * i40e_dbg_find_vsi - searches for the vsi with the given seid
- * @pf - the pf structure to search for the vsi
+ * @pf - the PF structure to search for the vsi
  * @seid - seid of the vsi it is searching for
  **/
 static struct i40e_vsi *i40e_dbg_find_vsi(struct i40e_pf *pf, int seid)
@@ -54,7 +54,7 @@ static struct i40e_vsi *i40e_dbg_find_vsi(struct i40e_pf *pf, int seid)
 
 /**
  * i40e_dbg_find_veb - searches for the veb with the given seid
- * @pf - the pf structure to search for the veb
+ * @pf - the PF structure to search for the veb
  * @seid - seid of the veb it is searching for
  **/
 static struct i40e_veb *i40e_dbg_find_veb(struct i40e_pf *pf, int seid)
@@ -112,7 +112,7 @@ static ssize_t i40e_dbg_dump_read(struct file *filp, char __user *buffer,
 
 /**
  * i40e_dbg_prep_dump_buf
- * @pf: the pf we're working with
+ * @pf: the PF we're working with
  * @buflen: the desired buffer length
  *
  * Return positive if success, 0 if failed
@@ -318,7 +318,7 @@ static const struct file_operations i40e_dbg_dump_fops = {
  * setup, adding or removing filters, or other things.  Many of
  * these will be useful for some forms of unit testing.
  **************************************************************/
-static char i40e_dbg_command_buf[256] = "hello world";
+static char i40e_dbg_command_buf[256] = "";
 
 /**
  * i40e_dbg_command_read - read for command datum
@@ -390,6 +390,11 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
                 "    netdev_registered = %i, current_netdev_flags = 0x%04x, state = %li flags = 0x%08lx\n",
                 vsi->netdev_registered,
                 vsi->current_netdev_flags, vsi->state, vsi->flags);
+       if (vsi == pf->vsi[pf->lan_vsi])
+               dev_info(&pf->pdev->dev, "MAC address: %pM SAN MAC: %pM Port MAC: %pM\n",
+                        pf->hw.mac.addr,
+                        pf->hw.mac.san_addr,
+                        pf->hw.mac.port_addr);
        list_for_each_entry(f, &vsi->mac_filter_list, list) {
                dev_info(&pf->pdev->dev,
                         "    mac_filter_list: %pM vid=%d, is_netdev=%d is_vf=%d counter=%d\n",
@@ -675,7 +680,7 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
                 vsi->info.resp_reserved[8], vsi->info.resp_reserved[9],
                 vsi->info.resp_reserved[10], vsi->info.resp_reserved[11]);
        if (vsi->back)
-               dev_info(&pf->pdev->dev, "    pf = %p\n", vsi->back);
+               dev_info(&pf->pdev->dev, "    PF = %p\n", vsi->back);
        dev_info(&pf->pdev->dev, "    idx = %d\n", vsi->idx);
        dev_info(&pf->pdev->dev,
                 "    tc_config: numtc = %d, enabled_tc = 0x%x\n",
@@ -921,9 +926,10 @@ static void i40e_dbg_dump_veb_seid(struct i40e_pf *pf, int seid)
                return;
        }
        dev_info(&pf->pdev->dev,
-                "veb idx=%d,%d stats_ic=%d  seid=%d uplink=%d\n",
+                "veb idx=%d,%d stats_ic=%d  seid=%d uplink=%d mode=%s\n",
                 veb->idx, veb->veb_idx, veb->stats_idx, veb->seid,
-                veb->uplink_seid);
+                veb->uplink_seid,
+                veb->bridge_mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
        i40e_dbg_dump_eth_stats(pf, &veb->stats);
 }
 
@@ -945,7 +951,7 @@ static void i40e_dbg_dump_veb_all(struct i40e_pf *pf)
 
 /**
  * i40e_dbg_cmd_fd_ctrl - Enable/disable FD sideband/ATR
- * @pf: the pf that would be altered
+ * @pf: the PF that would be altered
  * @flag: flag that needs enabling or disabling
  * @enable: Enable/disable FD SD/ATR
  **/
@@ -957,7 +963,7 @@ static void i40e_dbg_cmd_fd_ctrl(struct i40e_pf *pf, u64 flag, bool enable)
                pf->flags &= ~flag;
                pf->auto_disable_flags |= flag;
        }
-       dev_info(&pf->pdev->dev, "requesting a pf reset\n");
+       dev_info(&pf->pdev->dev, "requesting a PF reset\n");
        i40e_do_reset_safe(pf, (1 << __I40E_PF_RESET_REQUESTED));
 }
 
@@ -1487,11 +1493,15 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
                        } else {
                                dev_info(&pf->pdev->dev, "clear_stats vsi [seid]\n");
                        }
-               } else if (strncmp(&cmd_buf[12], "pf", 2) == 0) {
-                       i40e_pf_reset_stats(pf);
-                       dev_info(&pf->pdev->dev, "pf clear stats called\n");
+               } else if (strncmp(&cmd_buf[12], "port", 4) == 0) {
+                       if (pf->hw.partition_id == 1) {
+                               i40e_pf_reset_stats(pf);
+                               dev_info(&pf->pdev->dev, "port stats cleared\n");
+                       } else {
+                               dev_info(&pf->pdev->dev, "clear port stats not allowed on this port partition\n");
+                       }
                } else {
-                       dev_info(&pf->pdev->dev, "clear_stats vsi [seid] or clear_stats pf\n");
+                       dev_info(&pf->pdev->dev, "clear_stats vsi [seid] or clear_stats port\n");
                }
        } else if (strncmp(cmd_buf, "send aq_cmd", 11) == 0) {
                struct i40e_aq_desc *desc;
@@ -1897,7 +1907,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
                dev_info(&pf->pdev->dev, "  read <reg>\n");
                dev_info(&pf->pdev->dev, "  write <reg> <value>\n");
                dev_info(&pf->pdev->dev, "  clear_stats vsi [seid]\n");
-               dev_info(&pf->pdev->dev, "  clear_stats pf\n");
+               dev_info(&pf->pdev->dev, "  clear_stats port\n");
                dev_info(&pf->pdev->dev, "  pfr\n");
                dev_info(&pf->pdev->dev, "  corer\n");
                dev_info(&pf->pdev->dev, "  globr\n");
@@ -1935,7 +1945,7 @@ static const struct file_operations i40e_dbg_command_fops = {
  * The netdev_ops entry in debugfs is for giving the driver commands
  * to be executed from the netdev operations.
  **************************************************************/
-static char i40e_dbg_netdev_ops_buf[256] = "hello world";
+static char i40e_dbg_netdev_ops_buf[256] = "";
 
 /**
  * i40e_dbg_netdev_ops - read for netdev_ops datum
@@ -2123,8 +2133,8 @@ static const struct file_operations i40e_dbg_netdev_ops_fops = {
 };
 
 /**
- * i40e_dbg_pf_init - setup the debugfs directory for the pf
- * @pf: the pf that is starting up
+ * i40e_dbg_pf_init - setup the debugfs directory for the PF
+ * @pf: the PF that is starting up
  **/
 void i40e_dbg_pf_init(struct i40e_pf *pf)
 {
@@ -2160,8 +2170,8 @@ create_failed:
 }
 
 /**
- * i40e_dbg_pf_exit - clear out the pf's debugfs entries
- * @pf: the pf that is stopping
+ * i40e_dbg_pf_exit - clear out the PF's debugfs entries
+ * @pf: the PF that is stopping
  **/
 void i40e_dbg_pf_exit(struct i40e_pf *pf)
 {
index b8230dc205ec7f01a2c1e481097a8f9554420d15..c848b186251277a6a1c691c19a26e3d8510634e5 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
  *
  * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2014 Intel Corporation.
+ * Copyright(c) 2013 - 2015 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -113,7 +113,6 @@ static struct i40e_stats i40e_gstrings_stats[] = {
        I40E_PF_STAT("tx_broadcast", stats.eth.tx_broadcast),
        I40E_PF_STAT("tx_errors", stats.eth.tx_errors),
        I40E_PF_STAT("rx_dropped", stats.eth.rx_discards),
-       I40E_PF_STAT("tx_dropped", stats.eth.tx_discards),
        I40E_PF_STAT("tx_dropped_link_down", stats.tx_dropped_link_down),
        I40E_PF_STAT("crc_errors", stats.crc_errors),
        I40E_PF_STAT("illegal_bytes", stats.illegal_bytes),
@@ -218,6 +217,13 @@ static const char i40e_gstrings_test[][ETH_GSTRING_LEN] = {
 
 #define I40E_TEST_LEN (sizeof(i40e_gstrings_test) / ETH_GSTRING_LEN)
 
+static const char i40e_priv_flags_strings[][ETH_GSTRING_LEN] = {
+       "NPAR",
+};
+
+#define I40E_PRIV_FLAGS_STR_LEN \
+       (sizeof(i40e_priv_flags_strings) / ETH_GSTRING_LEN)
+
 /**
  * i40e_partition_setting_complaint - generic complaint for MFP restriction
  * @pf: the PF struct
@@ -229,73 +235,20 @@ static void i40e_partition_setting_complaint(struct i40e_pf *pf)
 }
 
 /**
- * i40e_get_settings - Get Link Speed and Duplex settings
+ * i40e_get_settings_link_up - Get the Link settings for when link is up
+ * @hw: hw structure
+ * @ecmd: ethtool command to fill in
  * @netdev: network interface device structure
- * @ecmd: ethtool command
  *
- * Reports speed/duplex settings based on media_type
  **/
-static int i40e_get_settings(struct net_device *netdev,
-                            struct ethtool_cmd *ecmd)
+static void i40e_get_settings_link_up(struct i40e_hw *hw,
+                                     struct ethtool_cmd *ecmd,
+                                     struct net_device *netdev)
 {
-       struct i40e_netdev_priv *np = netdev_priv(netdev);
-       struct i40e_pf *pf = np->vsi->back;
-       struct i40e_hw *hw = &pf->hw;
        struct i40e_link_status *hw_link_info = &hw->phy.link_info;
-       bool link_up = hw_link_info->link_info & I40E_AQ_LINK_UP;
        u32 link_speed = hw_link_info->link_speed;
 
-       /* hardware is either in 40G mode or 10G mode
-        * NOTE: this section initializes supported and advertising
-        */
-       if (!link_up) {
-               /* link is down and the driver needs to fall back on
-                * device ID to determine what kinds of info to display,
-                * it's mostly a guess that may change when link is up
-                */
-               switch (hw->device_id) {
-               case I40E_DEV_ID_QSFP_A:
-               case I40E_DEV_ID_QSFP_B:
-               case I40E_DEV_ID_QSFP_C:
-                       /* pluggable QSFP */
-                       ecmd->supported = SUPPORTED_40000baseSR4_Full |
-                                         SUPPORTED_40000baseCR4_Full |
-                                         SUPPORTED_40000baseLR4_Full;
-                       ecmd->advertising = ADVERTISED_40000baseSR4_Full |
-                                           ADVERTISED_40000baseCR4_Full |
-                                           ADVERTISED_40000baseLR4_Full;
-                       break;
-               case I40E_DEV_ID_KX_B:
-                       /* backplane 40G */
-                       ecmd->supported = SUPPORTED_40000baseKR4_Full;
-                       ecmd->advertising = ADVERTISED_40000baseKR4_Full;
-                       break;
-               case I40E_DEV_ID_KX_C:
-                       /* backplane 10G */
-                       ecmd->supported = SUPPORTED_10000baseKR_Full;
-                       ecmd->advertising = ADVERTISED_10000baseKR_Full;
-                       break;
-               case I40E_DEV_ID_10G_BASE_T:
-                       ecmd->supported = SUPPORTED_10000baseT_Full |
-                                         SUPPORTED_1000baseT_Full |
-                                         SUPPORTED_100baseT_Full;
-                       ecmd->advertising = ADVERTISED_10000baseT_Full |
-                                           ADVERTISED_1000baseT_Full |
-                                           ADVERTISED_100baseT_Full;
-                       break;
-               default:
-                       /* all the rest are 10G/1G */
-                       ecmd->supported = SUPPORTED_10000baseT_Full |
-                                         SUPPORTED_1000baseT_Full;
-                       ecmd->advertising = ADVERTISED_10000baseT_Full |
-                                           ADVERTISED_1000baseT_Full;
-                       break;
-               }
-
-               /* skip phy_type use as it is zero when link is down */
-               goto no_valid_phy_type;
-       }
-
+       /* Initialize supported and advertised settings based on phy settings */
        switch (hw_link_info->phy_type) {
        case I40E_PHY_TYPE_40GBASE_CR4:
        case I40E_PHY_TYPE_40GBASE_CR4_CU:
@@ -304,6 +257,11 @@ static int i40e_get_settings(struct net_device *netdev,
                ecmd->advertising = ADVERTISED_Autoneg |
                                    ADVERTISED_40000baseCR4_Full;
                break;
+       case I40E_PHY_TYPE_XLAUI:
+       case I40E_PHY_TYPE_XLPPI:
+       case I40E_PHY_TYPE_40GBASE_AOC:
+               ecmd->supported = SUPPORTED_40000baseCR4_Full;
+               break;
        case I40E_PHY_TYPE_40GBASE_KR4:
                ecmd->supported = SUPPORTED_Autoneg |
                                  SUPPORTED_40000baseKR4_Full;
@@ -311,13 +269,17 @@ static int i40e_get_settings(struct net_device *netdev,
                                    ADVERTISED_40000baseKR4_Full;
                break;
        case I40E_PHY_TYPE_40GBASE_SR4:
-       case I40E_PHY_TYPE_XLPPI:
-       case I40E_PHY_TYPE_XLAUI:
                ecmd->supported = SUPPORTED_40000baseSR4_Full;
                break;
        case I40E_PHY_TYPE_40GBASE_LR4:
                ecmd->supported = SUPPORTED_40000baseLR4_Full;
                break;
+       case I40E_PHY_TYPE_20GBASE_KR2:
+               ecmd->supported = SUPPORTED_Autoneg |
+                                 SUPPORTED_20000baseKR2_Full;
+               ecmd->advertising = ADVERTISED_Autoneg |
+                                   ADVERTISED_20000baseKR2_Full;
+               break;
        case I40E_PHY_TYPE_10GBASE_KX4:
                ecmd->supported = SUPPORTED_Autoneg |
                                  SUPPORTED_10000baseKX4_Full;
@@ -334,55 +296,56 @@ static int i40e_get_settings(struct net_device *netdev,
        case I40E_PHY_TYPE_10GBASE_LR:
        case I40E_PHY_TYPE_1000BASE_SX:
        case I40E_PHY_TYPE_1000BASE_LX:
-               ecmd->supported = SUPPORTED_10000baseT_Full;
-               ecmd->supported |= SUPPORTED_1000baseT_Full;
+               ecmd->supported = SUPPORTED_10000baseT_Full |
+                                 SUPPORTED_1000baseT_Full;
+               if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB)
+                       ecmd->advertising |= ADVERTISED_10000baseT_Full;
+               if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB)
+                       ecmd->advertising |= ADVERTISED_1000baseT_Full;
+               break;
+       case I40E_PHY_TYPE_1000BASE_KX:
+               ecmd->supported = SUPPORTED_Autoneg |
+                                 SUPPORTED_1000baseKX_Full;
+               ecmd->advertising = ADVERTISED_Autoneg |
+                                   ADVERTISED_1000baseKX_Full;
                break;
-       case I40E_PHY_TYPE_10GBASE_CR1_CU:
-       case I40E_PHY_TYPE_10GBASE_CR1:
        case I40E_PHY_TYPE_10GBASE_T:
+       case I40E_PHY_TYPE_1000BASE_T:
+       case I40E_PHY_TYPE_100BASE_TX:
                ecmd->supported = SUPPORTED_Autoneg |
                                  SUPPORTED_10000baseT_Full |
                                  SUPPORTED_1000baseT_Full |
                                  SUPPORTED_100baseT_Full;
+               ecmd->advertising = ADVERTISED_Autoneg;
+               if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB)
+                       ecmd->advertising |= ADVERTISED_10000baseT_Full;
+               if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB)
+                       ecmd->advertising |= ADVERTISED_1000baseT_Full;
+               if (hw_link_info->requested_speeds & I40E_LINK_SPEED_100MB)
+                       ecmd->advertising |= ADVERTISED_100baseT_Full;
+               break;
+       case I40E_PHY_TYPE_10GBASE_CR1_CU:
+       case I40E_PHY_TYPE_10GBASE_CR1:
+               ecmd->supported = SUPPORTED_Autoneg |
+                                 SUPPORTED_10000baseT_Full;
                ecmd->advertising = ADVERTISED_Autoneg |
-                                   ADVERTISED_10000baseT_Full |
-                                   ADVERTISED_1000baseT_Full |
-                                   ADVERTISED_100baseT_Full;
+                                   ADVERTISED_10000baseT_Full;
                break;
        case I40E_PHY_TYPE_XAUI:
        case I40E_PHY_TYPE_XFI:
        case I40E_PHY_TYPE_SFI:
        case I40E_PHY_TYPE_10GBASE_SFPP_CU:
+       case I40E_PHY_TYPE_10GBASE_AOC:
                ecmd->supported = SUPPORTED_10000baseT_Full;
                break;
-       case I40E_PHY_TYPE_1000BASE_KX:
-       case I40E_PHY_TYPE_1000BASE_T:
-               ecmd->supported = SUPPORTED_Autoneg |
-                                 SUPPORTED_10000baseT_Full |
-                                 SUPPORTED_1000baseT_Full |
-                                 SUPPORTED_100baseT_Full;
-               ecmd->advertising = ADVERTISED_Autoneg |
-                                   ADVERTISED_10000baseT_Full |
-                                   ADVERTISED_1000baseT_Full |
-                                   ADVERTISED_100baseT_Full;
-               break;
-       case I40E_PHY_TYPE_100BASE_TX:
-               ecmd->supported = SUPPORTED_Autoneg |
-                                 SUPPORTED_10000baseT_Full |
-                                 SUPPORTED_1000baseT_Full |
-                                 SUPPORTED_100baseT_Full;
-               ecmd->advertising = ADVERTISED_Autoneg |
-                                   ADVERTISED_10000baseT_Full |
-                                   ADVERTISED_1000baseT_Full |
-                                   ADVERTISED_100baseT_Full;
-               break;
        case I40E_PHY_TYPE_SGMII:
                ecmd->supported = SUPPORTED_Autoneg |
                                  SUPPORTED_1000baseT_Full |
                                  SUPPORTED_100baseT_Full;
-               ecmd->advertising = ADVERTISED_Autoneg |
-                                   ADVERTISED_1000baseT_Full |
-                                   ADVERTISED_100baseT_Full;
+               if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB)
+                       ecmd->advertising |= ADVERTISED_1000baseT_Full;
+               if (hw_link_info->requested_speeds & I40E_LINK_SPEED_100MB)
+                       ecmd->advertising |= ADVERTISED_100baseT_Full;
                break;
        default:
                /* if we got here and link is up something bad is afoot */
@@ -390,8 +353,126 @@ static int i40e_get_settings(struct net_device *netdev,
                            hw_link_info->phy_type);
        }
 
-no_valid_phy_type:
-       /* this is if autoneg is enabled or disabled */
+       /* Set speed and duplex */
+       switch (link_speed) {
+       case I40E_LINK_SPEED_40GB:
+               /* need a SPEED_40000 in ethtool.h */
+               ethtool_cmd_speed_set(ecmd, 40000);
+               break;
+       case I40E_LINK_SPEED_20GB:
+               ethtool_cmd_speed_set(ecmd, SPEED_20000);
+               break;
+       case I40E_LINK_SPEED_10GB:
+               ethtool_cmd_speed_set(ecmd, SPEED_10000);
+               break;
+       case I40E_LINK_SPEED_1GB:
+               ethtool_cmd_speed_set(ecmd, SPEED_1000);
+               break;
+       case I40E_LINK_SPEED_100MB:
+               ethtool_cmd_speed_set(ecmd, SPEED_100);
+               break;
+       default:
+               break;
+       }
+       ecmd->duplex = DUPLEX_FULL;
+}
+
+/**
+ * i40e_get_settings_link_down - Get the Link settings for when link is down
+ * @hw: hw structure
+ * @ecmd: ethtool command to fill in
+ *
+ * Reports link settings that can be determined when link is down
+ **/
+static void i40e_get_settings_link_down(struct i40e_hw *hw,
+                                       struct ethtool_cmd *ecmd)
+{
+       struct i40e_link_status *hw_link_info = &hw->phy.link_info;
+
+       /* link is down and the driver needs to fall back on
+        * device ID to determine what kinds of info to display,
+        * it's mostly a guess that may change when link is up
+        */
+       switch (hw->device_id) {
+       case I40E_DEV_ID_QSFP_A:
+       case I40E_DEV_ID_QSFP_B:
+       case I40E_DEV_ID_QSFP_C:
+               /* pluggable QSFP */
+               ecmd->supported = SUPPORTED_40000baseSR4_Full |
+                                 SUPPORTED_40000baseCR4_Full |
+                                 SUPPORTED_40000baseLR4_Full;
+               ecmd->advertising = ADVERTISED_40000baseSR4_Full |
+                                   ADVERTISED_40000baseCR4_Full |
+                                   ADVERTISED_40000baseLR4_Full;
+               break;
+       case I40E_DEV_ID_KX_B:
+               /* backplane 40G */
+               ecmd->supported = SUPPORTED_40000baseKR4_Full;
+               ecmd->advertising = ADVERTISED_40000baseKR4_Full;
+               break;
+       case I40E_DEV_ID_KX_C:
+               /* backplane 10G */
+               ecmd->supported = SUPPORTED_10000baseKR_Full;
+               ecmd->advertising = ADVERTISED_10000baseKR_Full;
+               break;
+       case I40E_DEV_ID_10G_BASE_T:
+               ecmd->supported = SUPPORTED_10000baseT_Full |
+                                 SUPPORTED_1000baseT_Full |
+                                 SUPPORTED_100baseT_Full;
+               /* Figure out what has been requested */
+               if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB)
+                       ecmd->advertising |= ADVERTISED_10000baseT_Full;
+               if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB)
+                       ecmd->advertising |= ADVERTISED_1000baseT_Full;
+               if (hw_link_info->requested_speeds & I40E_LINK_SPEED_100MB)
+                       ecmd->advertising |= ADVERTISED_100baseT_Full;
+               break;
+       case I40E_DEV_ID_20G_KR2:
+               /* backplane 20G */
+               ecmd->supported = SUPPORTED_20000baseKR2_Full;
+               ecmd->advertising = ADVERTISED_20000baseKR2_Full;
+               break;
+       default:
+               /* all the rest are 10G/1G */
+               ecmd->supported = SUPPORTED_10000baseT_Full |
+                                 SUPPORTED_1000baseT_Full;
+               /* Figure out what has been requested */
+               if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB)
+                       ecmd->advertising |= ADVERTISED_10000baseT_Full;
+               if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB)
+                       ecmd->advertising |= ADVERTISED_1000baseT_Full;
+               break;
+       }
+
+       /* With no link speed and duplex are unknown */
+       ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
+       ecmd->duplex = DUPLEX_UNKNOWN;
+}
+
+/**
+ * i40e_get_settings - Get Link Speed and Duplex settings
+ * @netdev: network interface device structure
+ * @ecmd: ethtool command
+ *
+ * Reports speed/duplex settings based on media_type
+ **/
+static int i40e_get_settings(struct net_device *netdev,
+                            struct ethtool_cmd *ecmd)
+{
+       struct i40e_netdev_priv *np = netdev_priv(netdev);
+       struct i40e_pf *pf = np->vsi->back;
+       struct i40e_hw *hw = &pf->hw;
+       struct i40e_link_status *hw_link_info = &hw->phy.link_info;
+       bool link_up = hw_link_info->link_info & I40E_AQ_LINK_UP;
+
+       if (link_up)
+               i40e_get_settings_link_up(hw, ecmd, netdev);
+       else
+               i40e_get_settings_link_down(hw, ecmd);
+
+       /* Now set the settings that don't rely on link being up/down */
+
+       /* Set autoneg settings */
        ecmd->autoneg = ((hw_link_info->an_info & I40E_AQ_AN_COMPLETED) ?
                          AUTONEG_ENABLE : AUTONEG_DISABLE);
 
@@ -424,11 +505,13 @@ no_valid_phy_type:
                break;
        }
 
+       /* Set transceiver */
        ecmd->transceiver = XCVR_EXTERNAL;
 
+       /* Set flow control settings */
        ecmd->supported |= SUPPORTED_Pause;
 
-       switch (hw->fc.current_mode) {
+       switch (hw->fc.requested_mode) {
        case I40E_FC_FULL:
                ecmd->advertising |= ADVERTISED_Pause;
                break;
@@ -445,30 +528,6 @@ no_valid_phy_type:
                break;
        }
 
-       if (link_up) {
-               switch (link_speed) {
-               case I40E_LINK_SPEED_40GB:
-                       /* need a SPEED_40000 in ethtool.h */
-                       ethtool_cmd_speed_set(ecmd, 40000);
-                       break;
-               case I40E_LINK_SPEED_10GB:
-                       ethtool_cmd_speed_set(ecmd, SPEED_10000);
-                       break;
-               case I40E_LINK_SPEED_1GB:
-                       ethtool_cmd_speed_set(ecmd, SPEED_1000);
-                       break;
-               case I40E_LINK_SPEED_100MB:
-                       ethtool_cmd_speed_set(ecmd, SPEED_100);
-                       break;
-               default:
-                       break;
-               }
-               ecmd->duplex = DUPLEX_FULL;
-       } else {
-               ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
-               ecmd->duplex = DUPLEX_UNKNOWN;
-       }
-
        return 0;
 }
 
@@ -588,6 +647,8 @@ static int i40e_set_settings(struct net_device *netdev,
            advertise & ADVERTISED_10000baseKX4_Full ||
            advertise & ADVERTISED_10000baseKR_Full)
                config.link_speed |= I40E_LINK_SPEED_10GB;
+       if (advertise & ADVERTISED_20000baseKR2_Full)
+               config.link_speed |= I40E_LINK_SPEED_20GB;
        if (advertise & ADVERTISED_40000baseKR4_Full ||
            advertise & ADVERTISED_40000baseCR4_Full ||
            advertise & ADVERTISED_40000baseSR4_Full ||
@@ -601,6 +662,8 @@ static int i40e_set_settings(struct net_device *netdev,
                config.eeer = abilities.eeer_val;
                config.low_power_ctrl = abilities.d3_lpan;
 
+               /* save the requested speeds */
+               hw->phy.link_info.requested_speeds = config.link_speed;
                /* set link and auto negotiation so changes take effect */
                config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
                /* If link is up put link down */
@@ -621,7 +684,7 @@ static int i40e_set_settings(struct net_device *netdev,
                        return -EAGAIN;
                }
 
-               status = i40e_update_link_info(hw, true);
+               status = i40e_aq_get_link_info(hw, true, NULL, NULL);
                if (status)
                        netdev_info(netdev, "Updating link info failed with error %d\n",
                                    status);
@@ -767,7 +830,7 @@ static int i40e_set_pauseparam(struct net_device *netdev,
                err = -EAGAIN;
        }
        if (aq_failures & I40E_SET_FC_AQ_FAIL_UPDATE) {
-               netdev_info(netdev, "Set fc failed on the update_link_info call with error %d and status %d\n",
+               netdev_info(netdev, "Set fc failed on the get_link_info call with error %d and status %d\n",
                            status, hw->aq.asq_last_status);
                err = -EAGAIN;
        }
@@ -870,7 +933,9 @@ static int i40e_get_eeprom(struct net_device *netdev,
 
                cmd = (struct i40e_nvm_access *)eeprom;
                ret_val = i40e_nvmupd_command(hw, cmd, bytes, &errno);
-               if (ret_val)
+               if (ret_val &&
+                   ((hw->aq.asq_last_status != I40E_AQ_RC_EACCES) ||
+                    (hw->debug_mask & I40E_DEBUG_NVM)))
                        dev_info(&pf->pdev->dev,
                                 "NVMUpdate read failed err=%d status=0x%x errno=%d module=%d offset=0x%x size=%d\n",
                                 ret_val, hw->aq.asq_last_status, errno,
@@ -974,7 +1039,10 @@ static int i40e_set_eeprom(struct net_device *netdev,
 
        cmd = (struct i40e_nvm_access *)eeprom;
        ret_val = i40e_nvmupd_command(hw, cmd, bytes, &errno);
-       if (ret_val && hw->aq.asq_last_status != I40E_AQ_RC_EBUSY)
+       if (ret_val &&
+           ((hw->aq.asq_last_status != I40E_AQ_RC_EPERM &&
+             hw->aq.asq_last_status != I40E_AQ_RC_EBUSY) ||
+            (hw->debug_mask & I40E_DEBUG_NVM)))
                dev_info(&pf->pdev->dev,
                         "NVMUpdate write failed err=%d status=0x%x errno=%d module=%d offset=0x%x size=%d\n",
                         ret_val, hw->aq.asq_last_status, errno,
@@ -998,6 +1066,7 @@ static void i40e_get_drvinfo(struct net_device *netdev,
                sizeof(drvinfo->fw_version));
        strlcpy(drvinfo->bus_info, pci_name(pf->pdev),
                sizeof(drvinfo->bus_info));
+       drvinfo->n_priv_flags = I40E_PRIV_FLAGS_STR_LEN;
 }
 
 static void i40e_get_ringparam(struct net_device *netdev,
@@ -1176,7 +1245,7 @@ static int i40e_get_sset_count(struct net_device *netdev, int sset)
        case ETH_SS_TEST:
                return I40E_TEST_LEN;
        case ETH_SS_STATS:
-               if (vsi == pf->vsi[pf->lan_vsi]) {
+               if (vsi == pf->vsi[pf->lan_vsi] && pf->hw.partition_id == 1) {
                        int len = I40E_PF_STATS_LEN(netdev);
 
                        if (pf->lan_veb != I40E_NO_VEB)
@@ -1185,6 +1254,8 @@ static int i40e_get_sset_count(struct net_device *netdev, int sset)
                } else {
                        return I40E_VSI_STATS_LEN(netdev);
                }
+       case ETH_SS_PRIV_FLAGS:
+               return I40E_PRIV_FLAGS_STR_LEN;
        default:
                return -EOPNOTSUPP;
        }
@@ -1247,7 +1318,7 @@ static void i40e_get_ethtool_stats(struct net_device *netdev,
                i += 2;
        }
        rcu_read_unlock();
-       if (vsi != pf->vsi[pf->lan_vsi])
+       if (vsi != pf->vsi[pf->lan_vsi] || pf->hw.partition_id != 1)
                return;
 
        if (pf->lan_veb != I40E_NO_VEB) {
@@ -1320,7 +1391,7 @@ static void i40e_get_strings(struct net_device *netdev, u32 stringset,
                        snprintf(p, ETH_GSTRING_LEN, "rx-%u.rx_bytes", i);
                        p += ETH_GSTRING_LEN;
                }
-               if (vsi != pf->vsi[pf->lan_vsi])
+               if (vsi != pf->vsi[pf->lan_vsi] || pf->hw.partition_id != 1)
                        return;
 
                if (pf->lan_veb != I40E_NO_VEB) {
@@ -1358,6 +1429,15 @@ static void i40e_get_strings(struct net_device *netdev, u32 stringset,
                }
                /* BUG_ON(p - data != I40E_STATS_LEN * ETH_GSTRING_LEN); */
                break;
+       case ETH_SS_PRIV_FLAGS:
+               for (i = 0; i < I40E_PRIV_FLAGS_STR_LEN; i++) {
+                       memcpy(data, i40e_priv_flags_strings[i],
+                              ETH_GSTRING_LEN);
+                       data += ETH_GSTRING_LEN;
+               }
+               break;
+       default:
+               break;
        }
 }
 
@@ -1473,6 +1553,7 @@ static void i40e_diag_test(struct net_device *netdev,
                           struct ethtool_test *eth_test, u64 *data)
 {
        struct i40e_netdev_priv *np = netdev_priv(netdev);
+       bool if_running = netif_running(netdev);
        struct i40e_pf *pf = np->vsi->back;
 
        if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
@@ -1480,6 +1561,12 @@ static void i40e_diag_test(struct net_device *netdev,
                netif_info(pf, drv, netdev, "offline testing starting\n");
 
                set_bit(__I40E_TESTING, &pf->state);
+               /* If the device is online then take it offline */
+               if (if_running)
+                       /* indicate we're in test mode */
+                       dev_close(netdev);
+               else
+                       i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED));
 
                /* Link test performed before hardware reset
                 * so autoneg doesn't interfere with test result
@@ -1502,6 +1589,9 @@ static void i40e_diag_test(struct net_device *netdev,
 
                clear_bit(__I40E_TESTING, &pf->state);
                i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED));
+
+               if (if_running)
+                       dev_open(netdev);
        } else {
                /* Online tests */
                netif_info(pf, drv, netdev, "online testing starting\n");
@@ -1599,6 +1689,8 @@ static int i40e_set_phys_id(struct net_device *netdev,
        case ETHTOOL_ID_INACTIVE:
                i40e_led_set(hw, pf->led_status, false);
                break;
+       default:
+               break;
        }
 
        return 0;
@@ -1703,6 +1795,11 @@ static int i40e_get_rss_hash_opts(struct i40e_pf *pf, struct ethtool_rxnfc *cmd)
 {
        cmd->data = 0;
 
+       if (pf->vsi[pf->lan_vsi]->rxnfc.data != 0) {
+               cmd->data = pf->vsi[pf->lan_vsi]->rxnfc.data;
+               cmd->flow_type = pf->vsi[pf->lan_vsi]->rxnfc.flow_type;
+               return 0;
+       }
        /* Report default options for RSS on i40e */
        switch (cmd->flow_type) {
        case TCP_V4_FLOW:
@@ -1974,6 +2071,9 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
        wr32(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
        i40e_flush(hw);
 
+       /* Save setting for future output/update */
+       pf->vsi[pf->lan_vsi]->rxnfc = *nfc;
+
        return 0;
 }
 
@@ -2281,10 +2381,6 @@ static int i40e_set_channels(struct net_device *dev,
        /* update feature limits from largest to smallest supported values */
        /* TODO: Flow director limit, DCB etc */
 
-       /* cap RSS limit */
-       if (count > pf->rss_size_max)
-               count = pf->rss_size_max;
-
        /* use rss_reconfig to rebuild with new queue count and update traffic
         * class queue mapping
         */
@@ -2295,6 +2391,133 @@ static int i40e_set_channels(struct net_device *dev,
                return -EINVAL;
 }
 
+#define I40E_HLUT_ARRAY_SIZE ((I40E_PFQF_HLUT_MAX_INDEX + 1) * 4)
+/**
+ * i40e_get_rxfh_key_size - get the RSS hash key size
+ * @netdev: network interface device structure
+ *
+ * Returns the table size.
+ **/
+static u32 i40e_get_rxfh_key_size(struct net_device *netdev)
+{
+       return I40E_HKEY_ARRAY_SIZE;
+}
+
+/**
+ * i40e_get_rxfh_indir_size - get the rx flow hash indirection table size
+ * @netdev: network interface device structure
+ *
+ * Returns the table size.
+ **/
+static u32 i40e_get_rxfh_indir_size(struct net_device *netdev)
+{
+       return I40E_HLUT_ARRAY_SIZE;
+}
+
+static int i40e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
+                        u8 *hfunc)
+{
+       struct i40e_netdev_priv *np = netdev_priv(netdev);
+       struct i40e_vsi *vsi = np->vsi;
+       struct i40e_pf *pf = vsi->back;
+       struct i40e_hw *hw = &pf->hw;
+       u32 reg_val;
+       int i, j;
+
+       if (hfunc)
+               *hfunc = ETH_RSS_HASH_TOP;
+
+       if (!indir)
+               return 0;
+
+       for (i = 0, j = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++) {
+               reg_val = rd32(hw, I40E_PFQF_HLUT(i));
+               indir[j++] = reg_val & 0xff;
+               indir[j++] = (reg_val >> 8) & 0xff;
+               indir[j++] = (reg_val >> 16) & 0xff;
+               indir[j++] = (reg_val >> 24) & 0xff;
+       }
+
+       if (key) {
+               for (i = 0, j = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++) {
+                       reg_val = rd32(hw, I40E_PFQF_HKEY(i));
+                       key[j++] = (u8)(reg_val & 0xff);
+                       key[j++] = (u8)((reg_val >> 8) & 0xff);
+                       key[j++] = (u8)((reg_val >> 16) & 0xff);
+                       key[j++] = (u8)((reg_val >> 24) & 0xff);
+               }
+       }
+       return 0;
+}
+
+/**
+ * i40e_set_rxfh - set the rx flow hash indirection table
+ * @netdev: network interface device structure
+ * @indir: indirection table
+ * @key: hash key
+ *
+ * Returns -EINVAL if the table specifies an inavlid queue id, otherwise
+ * returns 0 after programming the table.
+ **/
+static int i40e_set_rxfh(struct net_device *netdev, const u32 *indir,
+                        const u8 *key, const u8 hfunc)
+{
+       struct i40e_netdev_priv *np = netdev_priv(netdev);
+       struct i40e_vsi *vsi = np->vsi;
+       struct i40e_pf *pf = vsi->back;
+       struct i40e_hw *hw = &pf->hw;
+       u32 reg_val;
+       int i, j;
+
+       if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
+               return -EOPNOTSUPP;
+
+       if (!indir)
+               return 0;
+
+       for (i = 0, j = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++) {
+               reg_val = indir[j++];
+               reg_val |= indir[j++] << 8;
+               reg_val |= indir[j++] << 16;
+               reg_val |= indir[j++] << 24;
+               wr32(hw, I40E_PFQF_HLUT(i), reg_val);
+       }
+
+       if (key) {
+               for (i = 0, j = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++) {
+                       reg_val = key[j++];
+                       reg_val |= key[j++] << 8;
+                       reg_val |= key[j++] << 16;
+                       reg_val |= key[j++] << 24;
+                       wr32(hw, I40E_PFQF_HKEY(i), reg_val);
+               }
+       }
+       return 0;
+}
+
+/**
+ * i40e_get_priv_flags - report device private flags
+ * @dev: network interface device structure
+ *
+ * The get string set count and the string set should be matched for each
+ * flag returned.  Add new strings for each flag to the i40e_priv_flags_strings
+ * array.
+ *
+ * Returns a u32 bitmap of flags.
+ **/
+static u32 i40e_get_priv_flags(struct net_device *dev)
+{
+       struct i40e_netdev_priv *np = netdev_priv(dev);
+       struct i40e_vsi *vsi = np->vsi;
+       struct i40e_pf *pf = vsi->back;
+       u32 ret_flags = 0;
+
+       ret_flags |= pf->hw.func_caps.npar_enable ?
+               I40E_PRIV_FLAGS_NPAR_FLAG : 0;
+
+       return ret_flags;
+}
+
 static const struct ethtool_ops i40e_ethtool_ops = {
        .get_settings           = i40e_get_settings,
        .set_settings           = i40e_set_settings,
@@ -2323,9 +2546,14 @@ static const struct ethtool_ops i40e_ethtool_ops = {
        .get_ethtool_stats      = i40e_get_ethtool_stats,
        .get_coalesce           = i40e_get_coalesce,
        .set_coalesce           = i40e_set_coalesce,
+       .get_rxfh_key_size      = i40e_get_rxfh_key_size,
+       .get_rxfh_indir_size    = i40e_get_rxfh_indir_size,
+       .get_rxfh               = i40e_get_rxfh,
+       .set_rxfh               = i40e_set_rxfh,
        .get_channels           = i40e_get_channels,
        .set_channels           = i40e_set_channels,
        .get_ts_info            = i40e_get_ts_info,
+       .get_priv_flags         = i40e_get_priv_flags,
 };
 
 void i40e_set_ethtool_ops(struct net_device *netdev)
index 27c206e62da71f0159361bca2703a1e367aeffcb..1803afeef23ede81ed906b5400e9f3164234a4de 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
  *
  * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2014 Intel Corporation.
+ * Copyright(c) 2013 - 2015 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -24,7 +24,6 @@
  *
  ******************************************************************************/
 
-
 #include <linux/if_ether.h>
 #include <scsi/scsi_cmnd.h>
 #include <scsi/scsi_device.h>
@@ -150,7 +149,7 @@ static inline bool i40e_fcoe_xid_is_valid(u16 xid)
 
 /**
  * i40e_fcoe_ddp_unmap - unmap the mapped sglist associated
- * @pf: pointer to pf
+ * @pf: pointer to PF
  * @ddp: sw DDP context
  *
  * Unmap the scatter-gather list associated with the given SW DDP context
@@ -269,7 +268,7 @@ out:
 
 /**
  * i40e_fcoe_sw_init - sets up the HW for FCoE
- * @pf: pointer to pf
+ * @pf: pointer to PF
  *
  * Returns 0 if FCoE is supported otherwise the error code
  **/
@@ -329,7 +328,7 @@ int i40e_init_pf_fcoe(struct i40e_pf *pf)
 
 /**
  * i40e_get_fcoe_tc_map - Return TC map for FCoE APP
- * @pf: pointer to pf
+ * @pf: pointer to PF
  *
  **/
 u8 i40e_get_fcoe_tc_map(struct i40e_pf *pf)
@@ -381,12 +380,11 @@ int i40e_fcoe_vsi_init(struct i40e_vsi *vsi, struct i40e_vsi_context *ctxt)
        ctxt->pf_num = hw->pf_id;
        ctxt->vf_num = 0;
        ctxt->uplink_seid = vsi->uplink_seid;
-       ctxt->connection_type = 0x1;
+       ctxt->connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
        ctxt->flags = I40E_AQ_VSI_TYPE_PF;
 
        /* FCoE VSI would need the following sections */
-       info->valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID |
-                                           I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
+       info->valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
 
        /* FCoE VSI does not need these sections */
        info->valid_sections &= cpu_to_le16(~(I40E_AQ_VSI_PROP_SECURITY_VALID |
@@ -395,7 +393,12 @@ int i40e_fcoe_vsi_init(struct i40e_vsi *vsi, struct i40e_vsi_context *ctxt)
                                            I40E_AQ_VSI_PROP_INGRESS_UP_VALID |
                                            I40E_AQ_VSI_PROP_EGRESS_UP_VALID));
 
-       info->switch_id = cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
+       if (i40e_is_vsi_uplink_mode_veb(vsi)) {
+               info->valid_sections |=
+                               cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
+               info->switch_id =
+                               cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
+       }
        enabled_tc = i40e_get_fcoe_tc_map(pf);
        i40e_vsi_setup_queue_map(vsi, ctxt, enabled_tc, true);
 
@@ -1303,8 +1306,7 @@ static void i40e_fcoe_tx_map(struct i40e_ring *tx_ring,
        /* MACLEN is ether header length in words not bytes */
        td_offset |= (maclen >> 1) << I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
 
-       return i40e_tx_map(tx_ring, skb, first, tx_flags, hdr_len,
-                          td_cmd, td_offset);
+       i40e_tx_map(tx_ring, skb, first, tx_flags, hdr_len, td_cmd, td_offset);
 }
 
 /**
@@ -1443,7 +1445,6 @@ static int i40e_fcoe_set_features(struct net_device *netdev,
        return 0;
 }
 
-
 static const struct net_device_ops i40e_fcoe_netdev_ops = {
        .ndo_open               = i40e_open,
        .ndo_stop               = i40e_close,
@@ -1470,6 +1471,11 @@ static const struct net_device_ops i40e_fcoe_netdev_ops = {
        .ndo_set_features       = i40e_fcoe_set_features,
 };
 
+/* fcoe network device type */
+static struct device_type fcoe_netdev_type = {
+       .name = "fcoe",
+};
+
 /**
  * i40e_fcoe_config_netdev - prepares the VSI context for creating a FCoE VSI
  * @vsi: pointer to the associated VSI struct
@@ -1503,6 +1509,7 @@ void i40e_fcoe_config_netdev(struct net_device *netdev, struct i40e_vsi *vsi)
        strlcpy(netdev->name, "fcoe%d", IFNAMSIZ-1);
        netdev->mtu = FCOE_MTU;
        SET_NETDEV_DEV(netdev, &pf->pdev->dev);
+       SET_NETDEV_DEVTYPE(netdev, &fcoe_netdev_type);
        /* set different dev_port value 1 for FCoE netdev than the default
         * zero dev_port value for PF netdev, this helps biosdevname user
         * tool to differentiate them correctly while both attached to the
@@ -1523,7 +1530,7 @@ void i40e_fcoe_config_netdev(struct net_device *netdev, struct i40e_vsi *vsi)
 
 /**
  * i40e_fcoe_vsi_setup - allocate and set up FCoE VSI
- * @pf: the pf that VSI is associated with
+ * @pf: the PF that VSI is associated with
  *
  **/
 void i40e_fcoe_vsi_setup(struct i40e_pf *pf)
@@ -1550,7 +1557,7 @@ void i40e_fcoe_vsi_setup(struct i40e_pf *pf)
        vsi = i40e_vsi_setup(pf, I40E_VSI_FCOE, seid, 0);
        if (vsi) {
                dev_dbg(&pf->pdev->dev,
-                       "Successfully created FCoE VSI seid %d id %d uplink_seid %d pf seid %d\n",
+                       "Successfully created FCoE VSI seid %d id %d uplink_seid %d PF seid %d\n",
                        vsi->seid, vsi->id, vsi->uplink_seid, seid);
        } else {
                dev_info(&pf->pdev->dev, "Failed to create FCoE VSI\n");
index 21e0f582031c5fffb1b2634ac86fdb06c04b6ce2..0d49e2d15d408c671c3acf581b10df5763fee7c3 100644 (file)
@@ -37,7 +37,6 @@
 #define I40E_FILTER_CONTEXT_DESC(R, i)  \
        (&(((struct i40e_fcoe_filter_context_desc *)((R)->desc))[i]))
 
-
 /* receive queue descriptor filter status for FCoE */
 #define I40E_RX_DESC_FLTSTAT_FCMASK    0x3
 #define I40E_RX_DESC_FLTSTAT_NOMTCH    0x0     /* no ddp context match */
index 4627588f461346f292e8e03aca4967f622dbdc6c..0079ad7bcd0e1ff9c5fb985322a72d635418df14 100644 (file)
@@ -856,7 +856,7 @@ static void i40e_write_dword(u8 *hmc_bits,
        if (ce_info->width < 32)
                mask = ((u32)1 << ce_info->width) - 1;
        else
-               mask = 0xFFFFFFFF;
+               mask = ~(u32)0;
 
        /* don't swizzle the bits until after the mask because the mask bits
         * will be in a different bit position on big endian machines
@@ -908,7 +908,7 @@ static void i40e_write_qword(u8 *hmc_bits,
        if (ce_info->width < 64)
                mask = ((u64)1 << ce_info->width) - 1;
        else
-               mask = 0xFFFFFFFFFFFFFFFF;
+               mask = ~(u64)0;
 
        /* don't swizzle the bits until after the mask because the mask bits
         * will be in a different bit position on big endian machines
index dadda3c5d658b950cf64d21c838f5ffd5176f0f6..63de3f4b7a94e4f2b6ec5454028e5bc0f5e99140 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
  *
  * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2014 Intel Corporation.
+ * Copyright(c) 2013 - 2015 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -38,8 +38,8 @@ static const char i40e_driver_string[] =
 #define DRV_KERN "-k"
 
 #define DRV_VERSION_MAJOR 1
-#define DRV_VERSION_MINOR 2
-#define DRV_VERSION_BUILD 6
+#define DRV_VERSION_MINOR 3
+#define DRV_VERSION_BUILD 1
 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
             __stringify(DRV_VERSION_MINOR) "." \
             __stringify(DRV_VERSION_BUILD)    DRV_KERN
@@ -75,6 +75,7 @@ static const struct pci_device_id i40e_pci_tbl[] = {
        {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_B), 0},
        {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_C), 0},
        {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T), 0},
+       {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2), 0},
        /* required last entry */
        {0, }
 };
@@ -248,6 +249,22 @@ static int i40e_put_lump(struct i40e_lump_tracking *pile, u16 index, u16 id)
        return count;
 }
 
+/**
+ * i40e_find_vsi_from_id - searches for the vsi with the given id
+ * @pf - the pf structure to search for the vsi
+ * @id - id of the vsi it is searching for
+ **/
+struct i40e_vsi *i40e_find_vsi_from_id(struct i40e_pf *pf, u16 id)
+{
+       int i;
+
+       for (i = 0; i < pf->num_alloc_vsi; i++)
+               if (pf->vsi[i] && (pf->vsi[i]->id == id))
+                       return pf->vsi[i];
+
+       return NULL;
+}
+
 /**
  * i40e_service_event_schedule - Schedule the service task to wake up
  * @pf: board private structure
@@ -450,7 +467,7 @@ void i40e_vsi_reset_stats(struct i40e_vsi *vsi)
 }
 
 /**
- * i40e_pf_reset_stats - Reset all of the stats for the given pf
+ * i40e_pf_reset_stats - Reset all of the stats for the given PF
  * @pf: the PF to be reset
  **/
 void i40e_pf_reset_stats(struct i40e_pf *pf)
@@ -896,7 +913,7 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
 }
 
 /**
- * i40e_update_pf_stats - Update the pf statistics counters.
+ * i40e_update_pf_stats - Update the PF statistics counters.
  * @pf: the PF to be updated
  **/
 static void i40e_update_pf_stats(struct i40e_pf *pf)
@@ -919,11 +936,6 @@ static void i40e_update_pf_stats(struct i40e_pf *pf)
                           pf->stat_offsets_loaded,
                           &osd->eth.rx_discards,
                           &nsd->eth.rx_discards);
-       i40e_stat_update32(hw, I40E_GLPRT_TDPC(hw->port),
-                          pf->stat_offsets_loaded,
-                          &osd->eth.tx_discards,
-                          &nsd->eth.tx_discards);
-
        i40e_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port),
                           I40E_GLPRT_UPRCL(hw->port),
                           pf->stat_offsets_loaded,
@@ -1133,7 +1145,7 @@ void i40e_update_stats(struct i40e_vsi *vsi)
  * @vsi: the VSI to be searched
  * @macaddr: the MAC address
  * @vlan: the vlan
- * @is_vf: make sure its a vf filter, else doesn't matter
+ * @is_vf: make sure its a VF filter, else doesn't matter
  * @is_netdev: make sure its a netdev filter, else doesn't matter
  *
  * Returns ptr to the filter object or NULL
@@ -1161,7 +1173,7 @@ static struct i40e_mac_filter *i40e_find_filter(struct i40e_vsi *vsi,
  * i40e_find_mac - Find a mac addr in the macvlan filters list
  * @vsi: the VSI to be searched
  * @macaddr: the MAC address we are searching for
- * @is_vf: make sure its a vf filter, else doesn't matter
+ * @is_vf: make sure its a VF filter, else doesn't matter
  * @is_netdev: make sure its a netdev filter, else doesn't matter
  *
  * Returns the first filter with the provided MAC address or NULL if
@@ -1209,7 +1221,7 @@ bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi)
  * i40e_put_mac_in_vlan - Make macvlan filters from macaddrs and vlans
  * @vsi: the VSI to be searched
  * @macaddr: the mac address to be filtered
- * @is_vf: true if it is a vf
+ * @is_vf: true if it is a VF
  * @is_netdev: true if it is a netdev
  *
  * Goes through all the macvlan filters and adds a
@@ -1270,7 +1282,7 @@ static int i40e_rm_default_mac_filter(struct i40e_vsi *vsi, u8 *macaddr)
  * @vsi: the VSI to be searched
  * @macaddr: the MAC address
  * @vlan: the vlan
- * @is_vf: make sure its a vf filter, else doesn't matter
+ * @is_vf: make sure its a VF filter, else doesn't matter
  * @is_netdev: make sure its a netdev filter, else doesn't matter
  *
  * Returns ptr to the filter object or NULL when no memory available.
@@ -1330,7 +1342,7 @@ add_filter_out:
  * @vsi: the VSI to be searched
  * @macaddr: the MAC address
  * @vlan: the vlan
- * @is_vf: make sure it's a vf filter, else doesn't matter
+ * @is_vf: make sure it's a VF filter, else doesn't matter
  * @is_netdev: make sure it's a netdev filter, else doesn't matter
  **/
 void i40e_del_filter(struct i40e_vsi *vsi,
@@ -1357,7 +1369,7 @@ void i40e_del_filter(struct i40e_vsi *vsi,
                        f->counter--;
                }
        } else {
-               /* make sure we don't remove a filter in use by vf or netdev */
+               /* make sure we don't remove a filter in use by VF or netdev */
                int min_f = 0;
                min_f += (f->is_vf ? 1 : 0);
                min_f += (f->is_netdev ? 1 : 0);
@@ -1546,7 +1558,7 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
                        vsi->tc_config.tc_info[i].qoffset = offset;
                        vsi->tc_config.tc_info[i].qcount = qcount;
 
-                       /* find the power-of-2 of the number of queue pairs */
+                       /* find the next higher power-of-2 of num queue pairs */
                        num_qps = qcount;
                        pow = 0;
                        while (num_qps && ((1 << pow) < qcount)) {
@@ -1576,6 +1588,12 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
 
        /* Set actual Tx/Rx queue pairs */
        vsi->num_queue_pairs = offset;
+       if ((vsi->type == I40E_VSI_MAIN) && (numtc == 1)) {
+               if (vsi->req_queue_pairs > 0)
+                       vsi->num_queue_pairs = vsi->req_queue_pairs;
+               else
+                       vsi->num_queue_pairs = pf->num_lan_msix;
+       }
 
        /* Scheduler section valid can only be set for ADD VSI */
        if (is_add) {
@@ -1967,7 +1985,7 @@ void i40e_vlan_stripping_enable(struct i40e_vsi *vsi)
                                    I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
 
        ctxt.seid = vsi->seid;
-       memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
+       ctxt.info = vsi->info;
        ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
        if (ret) {
                dev_info(&vsi->back->pdev->dev,
@@ -1996,7 +2014,7 @@ void i40e_vlan_stripping_disable(struct i40e_vsi *vsi)
                                    I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
 
        ctxt.seid = vsi->seid;
-       memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
+       ctxt.info = vsi->info;
        ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
        if (ret) {
                dev_info(&vsi->back->pdev->dev,
@@ -2280,7 +2298,7 @@ int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid)
                                    I40E_AQ_VSI_PVLAN_EMOD_STR;
 
        ctxt.seid = vsi->seid;
-       memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
+       ctxt.info = vsi->info;
        aq_ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
        if (aq_ret) {
                dev_info(&vsi->back->pdev->dev,
@@ -2398,20 +2416,20 @@ static void i40e_config_xps_tx_ring(struct i40e_ring *ring)
        struct i40e_vsi *vsi = ring->vsi;
        cpumask_var_t mask;
 
-       if (ring->q_vector && ring->netdev) {
-               /* Single TC mode enable XPS */
-               if (vsi->tc_config.numtc <= 1 &&
-                   !test_and_set_bit(__I40E_TX_XPS_INIT_DONE, &ring->state)) {
+       if (!ring->q_vector || !ring->netdev)
+               return;
+
+       /* Single TC mode enable XPS */
+       if (vsi->tc_config.numtc <= 1) {
+               if (!test_and_set_bit(__I40E_TX_XPS_INIT_DONE, &ring->state))
                        netif_set_xps_queue(ring->netdev,
                                            &ring->q_vector->affinity_mask,
                                            ring->queue_index);
-               } else if (alloc_cpumask_var(&mask, GFP_KERNEL)) {
-                       /* Disable XPS to allow selection based on TC */
-                       bitmap_zero(cpumask_bits(mask), nr_cpumask_bits);
-                       netif_set_xps_queue(ring->netdev, mask,
-                                           ring->queue_index);
-                       free_cpumask_var(mask);
-               }
+       } else if (alloc_cpumask_var(&mask, GFP_KERNEL)) {
+               /* Disable XPS to allow selection based on TC */
+               bitmap_zero(cpumask_bits(mask), nr_cpumask_bits);
+               netif_set_xps_queue(ring->netdev, mask, ring->queue_index);
+               free_cpumask_var(mask);
        }
 }
 
@@ -2596,7 +2614,12 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
        ring->tail = hw->hw_addr + I40E_QRX_TAIL(pf_q);
        writel(0, ring->tail);
 
-       i40e_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring));
+       if (ring_is_ps_enabled(ring)) {
+               i40e_alloc_rx_headers(ring);
+               i40e_alloc_rx_buffers_ps(ring, I40E_DESC_UNUSED(ring));
+       } else {
+               i40e_alloc_rx_buffers_1buf(ring, I40E_DESC_UNUSED(ring));
+       }
 
        return 0;
 }
@@ -3183,13 +3206,16 @@ static irqreturn_t i40e_intr(int irq, void *data)
                        pf->globr_count++;
                } else if (val == I40E_RESET_EMPR) {
                        pf->empr_count++;
-                       set_bit(__I40E_EMP_RESET_REQUESTED, &pf->state);
+                       set_bit(__I40E_EMP_RESET_INTR_RECEIVED, &pf->state);
                }
        }
 
        if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK) {
                icr0 &= ~I40E_PFINT_ICR0_HMC_ERR_MASK;
                dev_info(&pf->pdev->dev, "HMC error interrupt\n");
+               dev_info(&pf->pdev->dev, "HMC error info 0x%x, HMC error data 0x%x\n",
+                        rd32(hw, I40E_PFHMC_ERRORINFO),
+                        rd32(hw, I40E_PFHMC_ERRORDATA));
        }
 
        if (icr0 & I40E_PFINT_ICR0_TIMESYNC_MASK) {
@@ -3825,6 +3851,8 @@ static void i40e_reset_interrupt_capability(struct i40e_pf *pf)
                pci_disable_msix(pf->pdev);
                kfree(pf->msix_entries);
                pf->msix_entries = NULL;
+               kfree(pf->irq_pile);
+               pf->irq_pile = NULL;
        } else if (pf->flags & I40E_FLAG_MSI_ENABLED) {
                pci_disable_msi(pf->pdev);
        }
@@ -4021,7 +4049,7 @@ static int i40e_pf_wait_txq_disabled(struct i40e_pf *pf)
 #endif
 /**
  * i40e_get_iscsi_tc_map - Return TC map for iSCSI APP
- * @pf: pointer to pf
+ * @pf: pointer to PF
  *
  * Get TC map for ISCSI PF type that will include iSCSI TC
  * and LAN TC.
@@ -4119,7 +4147,7 @@ static u8 i40e_pf_get_num_tc(struct i40e_pf *pf)
        if (pf->hw.func_caps.iscsi)
                enabled_tc =  i40e_get_iscsi_tc_map(pf);
        else
-               enabled_tc = pf->hw.func_caps.enabled_tcmap;
+               return 1; /* Only TC0 */
 
        /* At least have TC0 */
        enabled_tc = (enabled_tc ? enabled_tc : 0x1);
@@ -4169,11 +4197,11 @@ static u8 i40e_pf_get_tc_map(struct i40e_pf *pf)
        if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
                return i40e_dcb_get_enabled_tc(&pf->hw.local_dcbx_config);
 
-       /* MPF enabled and iSCSI PF type */
+       /* MFP enabled and iSCSI PF type */
        if (pf->hw.func_caps.iscsi)
                return i40e_get_iscsi_tc_map(pf);
        else
-               return pf->hw.func_caps.enabled_tcmap;
+               return i40e_pf_get_default_tc(pf);
 }
 
 /**
@@ -4196,7 +4224,7 @@ static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi)
        aq_ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
        if (aq_ret) {
                dev_info(&pf->pdev->dev,
-                        "couldn't get pf vsi bw config, err %d, aq_err %d\n",
+                        "couldn't get PF vsi bw config, err %d, aq_err %d\n",
                         aq_ret, pf->hw.aq.asq_last_status);
                return -EINVAL;
        }
@@ -4206,7 +4234,7 @@ static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi)
                                                  NULL);
        if (aq_ret) {
                dev_info(&pf->pdev->dev,
-                        "couldn't get pf vsi ets bw config, err %d, aq_err %d\n",
+                        "couldn't get PF vsi ets bw config, err %d, aq_err %d\n",
                         aq_ret, pf->hw.aq.asq_last_status);
                return -EINVAL;
        }
@@ -4383,7 +4411,7 @@ static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
        ctxt.pf_num = vsi->back->hw.pf_id;
        ctxt.vf_num = 0;
        ctxt.uplink_seid = vsi->uplink_seid;
-       memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
+       ctxt.info = vsi->info;
        i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false);
 
        /* Update the VSI after updating the VSI queue-mapping information */
@@ -4563,6 +4591,11 @@ static int i40e_init_pf_dcb(struct i40e_pf *pf)
        struct i40e_hw *hw = &pf->hw;
        int err = 0;
 
+       /* Do not enable DCB for SW1 and SW2 images even if the FW is capable */
+       if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) ||
+           (pf->hw.aq.fw_maj_ver < 4))
+               goto out;
+
        /* Get the initial DCB configuration */
        err = i40e_init_dcb(hw);
        if (!err) {
@@ -4626,6 +4659,9 @@ static void i40e_print_link_message(struct i40e_vsi *vsi, bool isup)
        case I40E_LINK_SPEED_40GB:
                strlcpy(speed, "40 Gbps", SPEED_SIZE);
                break;
+       case I40E_LINK_SPEED_20GB:
+               strncpy(speed, "20 Gbps", SPEED_SIZE);
+               break;
        case I40E_LINK_SPEED_10GB:
                strlcpy(speed, "10 Gbps", SPEED_SIZE);
                break;
@@ -4853,11 +4889,7 @@ exit:
  *
  * Returns 0 on success, negative value on failure
  **/
-#ifdef I40E_FCOE
 int i40e_open(struct net_device *netdev)
-#else
-static int i40e_open(struct net_device *netdev)
-#endif
 {
        struct i40e_netdev_priv *np = netdev_priv(netdev);
        struct i40e_vsi *vsi = np->vsi;
@@ -4967,7 +4999,7 @@ err_setup_tx:
 
 /**
  * i40e_fdir_filter_exit - Cleans up the Flow Director accounting
- * @pf: Pointer to pf
+ * @pf: Pointer to PF
  *
  * This function destroys the hlist where all the Flow Director
  * filters were saved.
@@ -5055,24 +5087,6 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags)
                wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
                i40e_flush(&pf->hw);
 
-       } else if (reset_flags & (1 << __I40E_EMP_RESET_REQUESTED)) {
-
-               /* Request a Firmware Reset
-                *
-                * Same as Global reset, plus restarting the
-                * embedded firmware engine.
-                */
-               /* enable EMP Reset */
-               val = rd32(&pf->hw, I40E_GLGEN_RSTENA_EMP);
-               val |= I40E_GLGEN_RSTENA_EMP_EMP_RST_ENA_MASK;
-               wr32(&pf->hw, I40E_GLGEN_RSTENA_EMP, val);
-
-               /* force the reset */
-               val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
-               val |= I40E_GLGEN_RTRIG_EMPFWR_MASK;
-               wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
-               i40e_flush(&pf->hw);
-
        } else if (reset_flags & (1 << __I40E_PF_RESET_REQUESTED)) {
 
                /* Request a PF Reset
@@ -5195,7 +5209,6 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf,
        struct i40e_aqc_lldp_get_mib *mib =
                (struct i40e_aqc_lldp_get_mib *)&e->desc.params.raw;
        struct i40e_hw *hw = &pf->hw;
-       struct i40e_dcbx_config *dcbx_cfg = &hw->local_dcbx_config;
        struct i40e_dcbx_config tmp_dcbx_cfg;
        bool need_reconfig = false;
        int ret = 0;
@@ -5226,10 +5239,11 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf,
                goto exit;
        }
 
-       memset(&tmp_dcbx_cfg, 0, sizeof(tmp_dcbx_cfg));
        /* Store the old configuration */
-       tmp_dcbx_cfg = *dcbx_cfg;
+       tmp_dcbx_cfg = hw->local_dcbx_config;
 
+       /* Reset the old DCBx configuration data */
+       memset(&hw->local_dcbx_config, 0, sizeof(hw->local_dcbx_config));
        /* Get updated DCBX data from firmware */
        ret = i40e_get_dcb_config(&pf->hw);
        if (ret) {
@@ -5238,20 +5252,22 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf,
        }
 
        /* No change detected in DCBX configs */
-       if (!memcmp(&tmp_dcbx_cfg, dcbx_cfg, sizeof(tmp_dcbx_cfg))) {
+       if (!memcmp(&tmp_dcbx_cfg, &hw->local_dcbx_config,
+                   sizeof(tmp_dcbx_cfg))) {
                dev_dbg(&pf->pdev->dev, "No change detected in DCBX configuration.\n");
                goto exit;
        }
 
-       need_reconfig = i40e_dcb_need_reconfig(pf, &tmp_dcbx_cfg, dcbx_cfg);
+       need_reconfig = i40e_dcb_need_reconfig(pf, &tmp_dcbx_cfg,
+                                              &hw->local_dcbx_config);
 
-       i40e_dcbnl_flush_apps(pf, dcbx_cfg);
+       i40e_dcbnl_flush_apps(pf, &tmp_dcbx_cfg, &hw->local_dcbx_config);
 
        if (!need_reconfig)
                goto exit;
 
        /* Enable DCB tagging only when more than one TC */
-       if (i40e_dcb_get_num_tc(dcbx_cfg) > 1)
+       if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1)
                pf->flags |= I40E_FLAG_DCB_ENABLED;
        else
                pf->flags &= ~I40E_FLAG_DCB_ENABLED;
@@ -5351,9 +5367,9 @@ static void i40e_service_event_complete(struct i40e_pf *pf)
  * i40e_get_cur_guaranteed_fd_count - Get the consumed guaranteed FD filters
  * @pf: board private structure
  **/
-int i40e_get_cur_guaranteed_fd_count(struct i40e_pf *pf)
+u32 i40e_get_cur_guaranteed_fd_count(struct i40e_pf *pf)
 {
-       int val, fcnt_prog;
+       u32 val, fcnt_prog;
 
        val = rd32(&pf->hw, I40E_PFQF_FDSTAT);
        fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK);
@@ -5361,12 +5377,13 @@ int i40e_get_cur_guaranteed_fd_count(struct i40e_pf *pf)
 }
 
 /**
- * i40e_get_current_fd_count - Get the count of total FD filters programmed
+ * i40e_get_current_fd_count - Get total FD filters programmed for this PF
  * @pf: board private structure
  **/
-int i40e_get_current_fd_count(struct i40e_pf *pf)
+u32 i40e_get_current_fd_count(struct i40e_pf *pf)
 {
-       int val, fcnt_prog;
+       u32 val, fcnt_prog;
+
        val = rd32(&pf->hw, I40E_PFQF_FDSTAT);
        fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) +
                    ((val & I40E_PFQF_FDSTAT_BEST_CNT_MASK) >>
@@ -5374,6 +5391,21 @@ int i40e_get_current_fd_count(struct i40e_pf *pf)
        return fcnt_prog;
 }
 
+/**
+ * i40e_get_global_fd_count - Get total FD filters programmed on device
+ * @pf: board private structure
+ **/
+u32 i40e_get_global_fd_count(struct i40e_pf *pf)
+{
+       u32 val, fcnt_prog;
+
+       val = rd32(&pf->hw, I40E_GLQF_FDCNT_0);
+       fcnt_prog = (val & I40E_GLQF_FDCNT_0_GUARANT_CNT_MASK) +
+                   ((val & I40E_GLQF_FDCNT_0_BESTCNT_MASK) >>
+                    I40E_GLQF_FDCNT_0_BESTCNT_SHIFT);
+       return fcnt_prog;
+}
+
 /**
  * i40e_fdir_check_and_reenable - Function to reenabe FD ATR or SB if disabled
  * @pf: board private structure
@@ -5388,7 +5420,7 @@ void i40e_fdir_check_and_reenable(struct i40e_pf *pf)
        /* Check if, FD SB or ATR was auto disabled and if there is enough room
         * to re-enable
         */
-       fcnt_prog = i40e_get_cur_guaranteed_fd_count(pf);
+       fcnt_prog = i40e_get_global_fd_count(pf);
        fcnt_avail = pf->fdir_pf_filter_count;
        if ((fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM)) ||
            (pf->fd_add_err == 0) ||
@@ -5410,13 +5442,17 @@ void i40e_fdir_check_and_reenable(struct i40e_pf *pf)
 }
 
 #define I40E_MIN_FD_FLUSH_INTERVAL 10
+#define I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE 30
 /**
  * i40e_fdir_flush_and_replay - Function to flush all FD filters and replay SB
  * @pf: board private structure
  **/
 static void i40e_fdir_flush_and_replay(struct i40e_pf *pf)
 {
+       unsigned long min_flush_time;
        int flush_wait_retry = 50;
+       bool disable_atr = false;
+       int fd_room;
        int reg;
 
        if (!(pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED)))
@@ -5424,9 +5460,20 @@ static void i40e_fdir_flush_and_replay(struct i40e_pf *pf)
 
        if (time_after(jiffies, pf->fd_flush_timestamp +
                                (I40E_MIN_FD_FLUSH_INTERVAL * HZ))) {
-               set_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state);
+               /* If the flush is happening too quick and we have mostly
+                * SB rules we should not re-enable ATR for some time.
+                */
+               min_flush_time = pf->fd_flush_timestamp
+                               + (I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE * HZ);
+               fd_room = pf->fdir_pf_filter_count - pf->fdir_pf_active_filters;
+
+               if (!(time_after(jiffies, min_flush_time)) &&
+                   (fd_room < I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR)) {
+                       dev_info(&pf->pdev->dev, "ATR disabled, not enough FD filter space.\n");
+                       disable_atr = true;
+               }
+
                pf->fd_flush_timestamp = jiffies;
-               pf->auto_disable_flags |= I40E_FLAG_FD_SB_ENABLED;
                pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
                /* flush all filters */
                wr32(&pf->hw, I40E_PFQF_CTL_1,
@@ -5446,10 +5493,8 @@ static void i40e_fdir_flush_and_replay(struct i40e_pf *pf)
                } else {
                        /* replay sideband filters */
                        i40e_fdir_filter_restore(pf->vsi[pf->lan_vsi]);
-
-                       pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
-                       pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED;
-                       pf->auto_disable_flags &= ~I40E_FLAG_FD_SB_ENABLED;
+                       if (!disable_atr)
+                               pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
                        clear_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state);
                        dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n");
                }
@@ -5460,7 +5505,7 @@ static void i40e_fdir_flush_and_replay(struct i40e_pf *pf)
  * i40e_get_current_atr_count - Get the count of total FD ATR filters programmed
  * @pf: board private structure
  **/
-int i40e_get_current_atr_cnt(struct i40e_pf *pf)
+u32 i40e_get_current_atr_cnt(struct i40e_pf *pf)
 {
        return i40e_get_current_fd_count(pf) - pf->fdir_pf_active_filters;
 }
@@ -5486,9 +5531,7 @@ static void i40e_fdir_reinit_subtask(struct i40e_pf *pf)
        if (!(pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED)))
                return;
 
-       if ((pf->fd_add_err >= I40E_MAX_FD_PROGRAM_ERROR) &&
-           (i40e_get_current_atr_cnt(pf) >= pf->fd_atr_cnt) &&
-           (i40e_get_current_atr_cnt(pf) > pf->fdir_pf_filter_count))
+       if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state))
                i40e_fdir_flush_and_replay(pf);
 
        i40e_fdir_check_and_reenable(pf);
@@ -5757,11 +5800,9 @@ static void i40e_handle_link_event(struct i40e_pf *pf,
        struct i40e_hw *hw = &pf->hw;
        struct i40e_aqc_get_link_status *status =
                (struct i40e_aqc_get_link_status *)&e->desc.params.raw;
-       struct i40e_link_status *hw_link_info = &hw->phy.link_info;
 
        /* save off old link status information */
-       memcpy(&pf->hw.phy.link_info_old, hw_link_info,
-              sizeof(pf->hw.phy.link_info_old));
+       hw->phy.link_info_old = hw->phy.link_info;
 
        /* Do a new status request to re-enable LSE reporting
         * and load new status information into the hw struct
@@ -5875,6 +5916,10 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
                case i40e_aqc_opc_send_msg_to_peer:
                        dev_info(&pf->pdev->dev, "ARQ: Msg from other pf\n");
                        break;
+               case i40e_aqc_opc_nvm_erase:
+               case i40e_aqc_opc_nvm_update:
+                       i40e_debug(&pf->hw, I40E_DEBUG_NVM, "ARQ NVM operation completed\n");
+                       break;
                default:
                        dev_info(&pf->pdev->dev,
                                 "ARQ Error: Unknown event 0x%04x received\n",
@@ -5918,6 +5963,94 @@ static void i40e_verify_eeprom(struct i40e_pf *pf)
        }
 }
 
+/**
+ * i40e_enable_pf_switch_lb
+ * @pf: pointer to the PF structure
+ *
+ * enable switch loop back or die - no point in a return value
+ **/
+static void i40e_enable_pf_switch_lb(struct i40e_pf *pf)
+{
+       struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
+       struct i40e_vsi_context ctxt;
+       int aq_ret;
+
+       ctxt.seid = pf->main_vsi_seid;
+       ctxt.pf_num = pf->hw.pf_id;
+       ctxt.vf_num = 0;
+       aq_ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
+       if (aq_ret) {
+               dev_info(&pf->pdev->dev,
+                        "%s couldn't get PF vsi config, err %d, aq_err %d\n",
+                        __func__, aq_ret, pf->hw.aq.asq_last_status);
+               return;
+       }
+       ctxt.flags = I40E_AQ_VSI_TYPE_PF;
+       ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
+       ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
+
+       aq_ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
+       if (aq_ret) {
+               dev_info(&pf->pdev->dev,
+                        "%s: update vsi switch failed, aq_err=%d\n",
+                        __func__, vsi->back->hw.aq.asq_last_status);
+       }
+}
+
+/**
+ * i40e_disable_pf_switch_lb
+ * @pf: pointer to the PF structure
+ *
+ * disable switch loop back or die - no point in a return value
+ **/
+static void i40e_disable_pf_switch_lb(struct i40e_pf *pf)
+{
+       struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
+       struct i40e_vsi_context ctxt;
+       int aq_ret;
+
+       ctxt.seid = pf->main_vsi_seid;
+       ctxt.pf_num = pf->hw.pf_id;
+       ctxt.vf_num = 0;
+       aq_ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
+       if (aq_ret) {
+               dev_info(&pf->pdev->dev,
+                        "%s couldn't get PF vsi config, err %d, aq_err %d\n",
+                        __func__, aq_ret, pf->hw.aq.asq_last_status);
+               return;
+       }
+       ctxt.flags = I40E_AQ_VSI_TYPE_PF;
+       ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
+       ctxt.info.switch_id &= ~cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
+
+       aq_ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
+       if (aq_ret) {
+               dev_info(&pf->pdev->dev,
+                        "%s: update vsi switch failed, aq_err=%d\n",
+                        __func__, vsi->back->hw.aq.asq_last_status);
+       }
+}
+
+/**
+ * i40e_config_bridge_mode - Configure the HW bridge mode
+ * @veb: pointer to the bridge instance
+ *
+ * Configure the loop back mode for the LAN VSI that is downlink to the
+ * specified HW bridge instance. It is expected this function is called
+ * when a new HW bridge is instantiated.
+ **/
+static void i40e_config_bridge_mode(struct i40e_veb *veb)
+{
+       struct i40e_pf *pf = veb->pf;
+
+       dev_info(&pf->pdev->dev, "enabling bridge mode: %s\n",
+                veb->bridge_mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
+       if (veb->bridge_mode & BRIDGE_MODE_VEPA)
+               i40e_disable_pf_switch_lb(pf);
+       else
+               i40e_enable_pf_switch_lb(pf);
+}
+
 /**
  * i40e_reconstitute_veb - rebuild the VEB and anything connected to it
  * @veb: pointer to the VEB instance
@@ -5964,8 +6097,7 @@ static int i40e_reconstitute_veb(struct i40e_veb *veb)
        if (ret)
                goto end_reconstitute;
 
-       /* Enable LB mode for the main VSI now that it is on a VEB */
-       i40e_enable_pf_switch_lb(pf);
+       i40e_config_bridge_mode(veb);
 
        /* create the remaining VSIs attached to this VEB */
        for (v = 0; v < pf->num_alloc_vsi; v++) {
@@ -6137,7 +6269,7 @@ static void i40e_fdir_teardown(struct i40e_pf *pf)
  * i40e_prep_for_reset - prep for the core to reset
  * @pf: board private structure
  *
- * Close up the VFs and other things in prep for pf Reset.
+ * Close up the VFs and other things in prep for PF Reset.
   **/
 static void i40e_prep_for_reset(struct i40e_pf *pf)
 {
@@ -6222,10 +6354,8 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
        }
 
        /* re-verify the eeprom if we just had an EMP reset */
-       if (test_bit(__I40E_EMP_RESET_REQUESTED, &pf->state)) {
-               clear_bit(__I40E_EMP_RESET_REQUESTED, &pf->state);
+       if (test_and_clear_bit(__I40E_EMP_RESET_INTR_RECEIVED, &pf->state))
                i40e_verify_eeprom(pf);
-       }
 
        i40e_clear_pxe_mode(hw);
        ret = i40e_get_capabilities(pf);
@@ -6335,13 +6465,14 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
                }
        }
 
-       msleep(75);
-       ret = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
-       if (ret) {
-               dev_info(&pf->pdev->dev, "link restart failed, aq_err=%d\n",
-                        pf->hw.aq.asq_last_status);
+       if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) ||
+           (pf->hw.aq.fw_maj_ver < 4)) {
+               msleep(75);
+               ret = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
+               if (ret)
+                       dev_info(&pf->pdev->dev, "link restart failed, aq_err=%d\n",
+                                pf->hw.aq.asq_last_status);
        }
-
        /* reinit the misc interrupt */
        if (pf->flags & I40E_FLAG_MSIX_ENABLED)
                ret = i40e_setup_misc_vector(pf);
@@ -6364,7 +6495,7 @@ clear_recovery:
 }
 
 /**
- * i40e_handle_reset_warning - prep for the pf to reset, reset and rebuild
+ * i40e_handle_reset_warning - prep for the PF to reset, reset and rebuild
  * @pf: board private structure
  *
  * Close up the VFs and other things in prep for a Core Reset,
@@ -6378,7 +6509,7 @@ static void i40e_handle_reset_warning(struct i40e_pf *pf)
 
 /**
  * i40e_handle_mdd_event
- * @pf: pointer to the pf structure
+ * @pf: pointer to the PF structure
  *
  * Called from the MDD irq handler to identify possibly malicious vfs
  **/
@@ -6407,7 +6538,7 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf)
                                I40E_GL_MDET_TX_QUEUE_SHIFT) -
                                pf->hw.func_caps.base_queue;
                if (netif_msg_tx_err(pf))
-                       dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on TX queue %d pf number 0x%02x vf number 0x%02x\n",
+                       dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on TX queue %d PF number 0x%02x VF number 0x%02x\n",
                                 event, queue, pf_num, vf_num);
                wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
                mdd_detected = true;
@@ -6493,7 +6624,6 @@ static void i40e_sync_vxlan_filters_subtask(struct i40e_pf *pf)
 {
        struct i40e_hw *hw = &pf->hw;
        i40e_status ret;
-       u8 filter_index;
        __be16 port;
        int i;
 
@@ -6506,22 +6636,20 @@ static void i40e_sync_vxlan_filters_subtask(struct i40e_pf *pf)
                if (pf->pending_vxlan_bitmap & (1 << i)) {
                        pf->pending_vxlan_bitmap &= ~(1 << i);
                        port = pf->vxlan_ports[i];
-                       ret = port ?
-                             i40e_aq_add_udp_tunnel(hw, ntohs(port),
+                       if (port)
+                               ret = i40e_aq_add_udp_tunnel(hw, ntohs(port),
                                                     I40E_AQC_TUNNEL_TYPE_VXLAN,
-                                                    &filter_index, NULL)
-                             : i40e_aq_del_udp_tunnel(hw, i, NULL);
+                                                    NULL, NULL);
+                       else
+                               ret = i40e_aq_del_udp_tunnel(hw, i, NULL);
 
                        if (ret) {
-                               dev_info(&pf->pdev->dev, "Failed to execute AQ command for %s port %d with index %d\n",
-                                        port ? "adding" : "deleting",
-                                        ntohs(port), port ? i : i);
-
+                               dev_info(&pf->pdev->dev,
+                                        "%s vxlan port %d, index %d failed, err %d, aq_err %d\n",
+                                        port ? "add" : "delete",
+                                        ntohs(port), i, ret,
+                                        pf->hw.aq.asq_last_status);
                                pf->vxlan_ports[i] = 0;
-                       } else {
-                               dev_info(&pf->pdev->dev, "%s port %d with AQ command with index %d\n",
-                                        port ? "Added" : "Deleted",
-                                        ntohs(port), port ? i : filter_index);
                        }
                }
        }
@@ -6728,6 +6856,8 @@ static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
        vsi->idx = vsi_idx;
        vsi->rx_itr_setting = pf->rx_itr_default;
        vsi->tx_itr_setting = pf->tx_itr_default;
+       vsi->rss_table_size = (vsi->type == I40E_VSI_MAIN) ?
+                               pf->rss_table_size : 64;
        vsi->netdev_registered = false;
        vsi->work_limit = I40E_DEFAULT_IRQ_WORK;
        INIT_LIST_HEAD(&vsi->mac_filter_list);
@@ -6808,7 +6938,7 @@ static int i40e_vsi_clear(struct i40e_vsi *vsi)
                goto unlock_vsi;
        }
 
-       /* updates the pf for this cleared vsi */
+       /* updates the PF for this cleared vsi */
        i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
        i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx);
 
@@ -6921,15 +7051,14 @@ static int i40e_reserve_msix_vectors(struct i40e_pf *pf, int vectors)
  *
  * Work with the OS to set up the MSIX vectors needed.
  *
- * Returns 0 on success, negative on failure
+ * Returns the number of vectors reserved or negative on failure
  **/
 static int i40e_init_msix(struct i40e_pf *pf)
 {
-       i40e_status err = 0;
        struct i40e_hw *hw = &pf->hw;
-       int other_vecs = 0;
+       int vectors_left;
        int v_budget, i;
-       int vec;
+       int v_actual;
 
        if (!(pf->flags & I40E_FLAG_MSIX_ENABLED))
                return -ENODEV;
@@ -6951,24 +7080,62 @@ static int i40e_init_msix(struct i40e_pf *pf)
         * If we can't get what we want, we'll simplify to nearly nothing
         * and try again.  If that still fails, we punt.
         */
-       pf->num_lan_msix = pf->num_lan_qps - (pf->rss_size_max - pf->rss_size);
-       pf->num_vmdq_msix = pf->num_vmdq_qps;
-       other_vecs = 1;
-       other_vecs += (pf->num_vmdq_vsis * pf->num_vmdq_msix);
-       if (pf->flags & I40E_FLAG_FD_SB_ENABLED)
-               other_vecs++;
-
-       /* Scale down if necessary, and the rings will share vectors */
-       pf->num_lan_msix = min_t(int, pf->num_lan_msix,
-                       (hw->func_caps.num_msix_vectors - other_vecs));
-       v_budget = pf->num_lan_msix + other_vecs;
+       vectors_left = hw->func_caps.num_msix_vectors;
+       v_budget = 0;
+
+       /* reserve one vector for miscellaneous handler */
+       if (vectors_left) {
+               v_budget++;
+               vectors_left--;
+       }
+
+       /* reserve vectors for the main PF traffic queues */
+       pf->num_lan_msix = min_t(int, num_online_cpus(), vectors_left);
+       vectors_left -= pf->num_lan_msix;
+       v_budget += pf->num_lan_msix;
+
+       /* reserve one vector for sideband flow director */
+       if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
+               if (vectors_left) {
+                       v_budget++;
+                       vectors_left--;
+               } else {
+                       pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
+               }
+       }
 
 #ifdef I40E_FCOE
+       /* can we reserve enough for FCoE? */
        if (pf->flags & I40E_FLAG_FCOE_ENABLED) {
-               pf->num_fcoe_msix = pf->num_fcoe_qps;
+               if (!vectors_left)
+                       pf->num_fcoe_msix = 0;
+               else if (vectors_left >= pf->num_fcoe_qps)
+                       pf->num_fcoe_msix = pf->num_fcoe_qps;
+               else
+                       pf->num_fcoe_msix = 1;
                v_budget += pf->num_fcoe_msix;
+               vectors_left -= pf->num_fcoe_msix;
        }
+
 #endif
+       /* any vectors left over go for VMDq support */
+       if (pf->flags & I40E_FLAG_VMDQ_ENABLED) {
+               int vmdq_vecs_wanted = pf->num_vmdq_vsis * pf->num_vmdq_qps;
+               int vmdq_vecs = min_t(int, vectors_left, vmdq_vecs_wanted);
+
+               /* if we're short on vectors for what's desired, we limit
+                * the queues per vmdq.  If this is still more than are
+                * available, the user will need to change the number of
+                * queues/vectors used by the PF later with the ethtool
+                * channels command
+                */
+               if (vmdq_vecs < vmdq_vecs_wanted)
+                       pf->num_vmdq_qps = 1;
+               pf->num_vmdq_msix = pf->num_vmdq_qps;
+
+               v_budget += vmdq_vecs;
+               vectors_left -= vmdq_vecs;
+       }
 
        pf->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry),
                                   GFP_KERNEL);
@@ -6977,9 +7144,9 @@ static int i40e_init_msix(struct i40e_pf *pf)
 
        for (i = 0; i < v_budget; i++)
                pf->msix_entries[i].entry = i;
-       vec = i40e_reserve_msix_vectors(pf, v_budget);
+       v_actual = i40e_reserve_msix_vectors(pf, v_budget);
 
-       if (vec != v_budget) {
+       if (v_actual != v_budget) {
                /* If we have limited resources, we will start with no vectors
                 * for the special features and then allocate vectors to some
                 * of these features based on the policy and at the end disable
@@ -6992,26 +7159,30 @@ static int i40e_init_msix(struct i40e_pf *pf)
                pf->num_vmdq_msix = 0;
        }
 
-       if (vec < I40E_MIN_MSIX) {
+       if (v_actual < I40E_MIN_MSIX) {
                pf->flags &= ~I40E_FLAG_MSIX_ENABLED;
                kfree(pf->msix_entries);
                pf->msix_entries = NULL;
                return -ENODEV;
 
-       } else if (vec == I40E_MIN_MSIX) {
+       } else if (v_actual == I40E_MIN_MSIX) {
                /* Adjust for minimal MSIX use */
                pf->num_vmdq_vsis = 0;
                pf->num_vmdq_qps = 0;
                pf->num_lan_qps = 1;
                pf->num_lan_msix = 1;
 
-       } else if (vec != v_budget) {
+       } else if (v_actual != v_budget) {
+               int vec;
+
                /* reserve the misc vector */
-               vec--;
+               vec = v_actual - 1;
 
                /* Scale vector usage down */
                pf->num_vmdq_msix = 1;    /* force VMDqs to only one vector */
                pf->num_vmdq_vsis = 1;
+               pf->num_vmdq_qps = 1;
+               pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
 
                /* partition out the remaining vectors */
                switch (vec) {
@@ -7037,10 +7208,8 @@ static int i40e_init_msix(struct i40e_pf *pf)
                                vec--;
                        }
 #endif
-                       pf->num_lan_msix = min_t(int, (vec / 2),
-                                                pf->num_lan_qps);
-                       pf->num_vmdq_vsis = min_t(int, (vec - pf->num_lan_msix),
-                                                 I40E_DEFAULT_NUM_VMDQ_VSI);
+                       /* give the rest to the PF */
+                       pf->num_lan_msix = min_t(int, vec, pf->num_lan_qps);
                        break;
                }
        }
@@ -7057,7 +7226,7 @@ static int i40e_init_msix(struct i40e_pf *pf)
                pf->flags &= ~I40E_FLAG_FCOE_ENABLED;
        }
 #endif
-       return err;
+       return v_actual;
 }
 
 /**
@@ -7134,11 +7303,12 @@ err_out:
  **/
 static void i40e_init_interrupt_scheme(struct i40e_pf *pf)
 {
-       int err = 0;
+       int vectors = 0;
+       ssize_t size;
 
        if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
-               err = i40e_init_msix(pf);
-               if (err) {
+               vectors = i40e_init_msix(pf);
+               if (vectors < 0) {
                        pf->flags &= ~(I40E_FLAG_MSIX_ENABLED   |
 #ifdef I40E_FCOE
                                       I40E_FLAG_FCOE_ENABLED   |
@@ -7158,18 +7328,26 @@ static void i40e_init_interrupt_scheme(struct i40e_pf *pf)
        if (!(pf->flags & I40E_FLAG_MSIX_ENABLED) &&
            (pf->flags & I40E_FLAG_MSI_ENABLED)) {
                dev_info(&pf->pdev->dev, "MSI-X not available, trying MSI\n");
-               err = pci_enable_msi(pf->pdev);
-               if (err) {
-                       dev_info(&pf->pdev->dev, "MSI init failed - %d\n", err);
+               vectors = pci_enable_msi(pf->pdev);
+               if (vectors < 0) {
+                       dev_info(&pf->pdev->dev, "MSI init failed - %d\n",
+                                vectors);
                        pf->flags &= ~I40E_FLAG_MSI_ENABLED;
                }
+               vectors = 1;  /* one MSI or Legacy vector */
        }
 
        if (!(pf->flags & (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED)))
                dev_info(&pf->pdev->dev, "MSI-X and MSI not available, falling back to Legacy IRQ\n");
 
+       /* set up vector assignment tracking */
+       size = sizeof(struct i40e_lump_tracking) + (sizeof(u16) * vectors);
+       pf->irq_pile = kzalloc(size, GFP_KERNEL);
+       pf->irq_pile->num_entries = vectors;
+       pf->irq_pile->search_hint = 0;
+
        /* track first vector for misc interrupts */
-       err = i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT-1);
+       (void)i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT - 1);
 }
 
 /**
@@ -7219,6 +7397,7 @@ static int i40e_setup_misc_vector(struct i40e_pf *pf)
 static int i40e_config_rss(struct i40e_pf *pf)
 {
        u32 rss_key[I40E_PFQF_HKEY_MAX_INDEX + 1];
+       struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
        struct i40e_hw *hw = &pf->hw;
        u32 lut = 0;
        int i, j;
@@ -7236,15 +7415,14 @@ static int i40e_config_rss(struct i40e_pf *pf)
        wr32(hw, I40E_PFQF_HENA(0), (u32)hena);
        wr32(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
 
+       vsi->rss_size = min_t(int, pf->rss_size, vsi->num_queue_pairs);
+
        /* Check capability and Set table size and register per hw expectation*/
        reg_val = rd32(hw, I40E_PFQF_CTL_0);
-       if (hw->func_caps.rss_table_size == 512) {
+       if (pf->rss_table_size == 512)
                reg_val |= I40E_PFQF_CTL_0_HASHLUTSIZE_512;
-               pf->rss_table_size = 512;
-       } else {
-               pf->rss_table_size = 128;
+       else
                reg_val &= ~I40E_PFQF_CTL_0_HASHLUTSIZE_512;
-       }
        wr32(hw, I40E_PFQF_CTL_0, reg_val);
 
        /* Populate the LUT with max no. of queues in round robin fashion */
@@ -7257,7 +7435,7 @@ static int i40e_config_rss(struct i40e_pf *pf)
                 * If LAN VSI is the only consumer for RSS then this requirement
                 * is not necessary.
                 */
-               if (j == pf->rss_size)
+               if (j == vsi->rss_size)
                        j = 0;
                /* lut = 4-byte sliding window of 4 lut entries */
                lut = (lut << 8) | (j &
@@ -7281,15 +7459,19 @@ static int i40e_config_rss(struct i40e_pf *pf)
  **/
 int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count)
 {
+       struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
+       int new_rss_size;
+
        if (!(pf->flags & I40E_FLAG_RSS_ENABLED))
                return 0;
 
-       queue_count = min_t(int, queue_count, pf->rss_size_max);
+       new_rss_size = min_t(int, queue_count, pf->rss_size_max);
 
-       if (queue_count != pf->rss_size) {
+       if (queue_count != vsi->num_queue_pairs) {
+               vsi->req_queue_pairs = queue_count;
                i40e_prep_for_reset(pf);
 
-               pf->rss_size = queue_count;
+               pf->rss_size = new_rss_size;
 
                i40e_reset_and_rebuild(pf, true);
                i40e_config_rss(pf);
@@ -7298,6 +7480,128 @@ int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count)
        return pf->rss_size;
 }
 
+/**
+ * i40e_get_npar_bw_setting - Retrieve BW settings for this PF partition
+ * @pf: board private structure
+ **/
+i40e_status i40e_get_npar_bw_setting(struct i40e_pf *pf)
+{
+       i40e_status status;
+       bool min_valid, max_valid;
+       u32 max_bw, min_bw;
+
+       status = i40e_read_bw_from_alt_ram(&pf->hw, &max_bw, &min_bw,
+                                          &min_valid, &max_valid);
+
+       if (!status) {
+               if (min_valid)
+                       pf->npar_min_bw = min_bw;
+               if (max_valid)
+                       pf->npar_max_bw = max_bw;
+       }
+
+       return status;
+}
+
+/**
+ * i40e_set_npar_bw_setting - Set BW settings for this PF partition
+ * @pf: board private structure
+ **/
+i40e_status i40e_set_npar_bw_setting(struct i40e_pf *pf)
+{
+       struct i40e_aqc_configure_partition_bw_data bw_data;
+       i40e_status status;
+
+       /* Set the valid bit for this PF */
+       bw_data.pf_valid_bits = cpu_to_le16(1 << pf->hw.pf_id);
+       bw_data.max_bw[pf->hw.pf_id] = pf->npar_max_bw & I40E_ALT_BW_VALUE_MASK;
+       bw_data.min_bw[pf->hw.pf_id] = pf->npar_min_bw & I40E_ALT_BW_VALUE_MASK;
+
+       /* Set the new bandwidths */
+       status = i40e_aq_configure_partition_bw(&pf->hw, &bw_data, NULL);
+
+       return status;
+}
+
+/**
+ * i40e_commit_npar_bw_setting - Commit BW settings for this PF partition
+ * @pf: board private structure
+ **/
+i40e_status i40e_commit_npar_bw_setting(struct i40e_pf *pf)
+{
+       /* Commit temporary BW setting to permanent NVM image */
+       enum i40e_admin_queue_err last_aq_status;
+       i40e_status ret;
+       u16 nvm_word;
+
+       if (pf->hw.partition_id != 1) {
+               dev_info(&pf->pdev->dev,
+                        "Commit BW only works on partition 1! This is partition %d",
+                        pf->hw.partition_id);
+               ret = I40E_NOT_SUPPORTED;
+               goto bw_commit_out;
+       }
+
+       /* Acquire NVM for read access */
+       ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_READ);
+       last_aq_status = pf->hw.aq.asq_last_status;
+       if (ret) {
+               dev_info(&pf->pdev->dev,
+                        "Cannot acquire NVM for read access, err %d: aq_err %d\n",
+                        ret, last_aq_status);
+               goto bw_commit_out;
+       }
+
+       /* Read word 0x10 of NVM - SW compatibility word 1 */
+       ret = i40e_aq_read_nvm(&pf->hw,
+                              I40E_SR_NVM_CONTROL_WORD,
+                              0x10, sizeof(nvm_word), &nvm_word,
+                              false, NULL);
+       /* Save off last admin queue command status before releasing
+        * the NVM
+        */
+       last_aq_status = pf->hw.aq.asq_last_status;
+       i40e_release_nvm(&pf->hw);
+       if (ret) {
+               dev_info(&pf->pdev->dev, "NVM read error, err %d aq_err %d\n",
+                        ret, last_aq_status);
+               goto bw_commit_out;
+       }
+
+       /* Wait a bit for NVM release to complete */
+       msleep(50);
+
+       /* Acquire NVM for write access */
+       ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_WRITE);
+       last_aq_status = pf->hw.aq.asq_last_status;
+       if (ret) {
+               dev_info(&pf->pdev->dev,
+                        "Cannot acquire NVM for write access, err %d: aq_err %d\n",
+                        ret, last_aq_status);
+               goto bw_commit_out;
+       }
+       /* Write it back out unchanged to initiate update NVM,
+        * which will force a write of the shadow (alt) RAM to
+        * the NVM - thus storing the bandwidth values permanently.
+        */
+       ret = i40e_aq_update_nvm(&pf->hw,
+                                I40E_SR_NVM_CONTROL_WORD,
+                                0x10, sizeof(nvm_word),
+                                &nvm_word, true, NULL);
+       /* Save off last admin queue command status before releasing
+        * the NVM
+        */
+       last_aq_status = pf->hw.aq.asq_last_status;
+       i40e_release_nvm(&pf->hw);
+       if (ret)
+               dev_info(&pf->pdev->dev,
+                        "BW settings NOT SAVED, err %d aq_err %d\n",
+                        ret, last_aq_status);
+bw_commit_out:
+
+       return ret;
+}
+
 /**
  * i40e_sw_init - Initialize general software structures (struct i40e_pf)
  * @pf: board private structure to initialize
@@ -7324,8 +7628,12 @@ static int i40e_sw_init(struct i40e_pf *pf)
        /* Set default capability flags */
        pf->flags = I40E_FLAG_RX_CSUM_ENABLED |
                    I40E_FLAG_MSI_ENABLED     |
-                   I40E_FLAG_MSIX_ENABLED    |
-                   I40E_FLAG_RX_1BUF_ENABLED;
+                   I40E_FLAG_MSIX_ENABLED;
+
+       if (iommu_present(&pci_bus_type))
+               pf->flags |= I40E_FLAG_RX_PS_ENABLED;
+       else
+               pf->flags |= I40E_FLAG_RX_1BUF_ENABLED;
 
        /* Set default ITR */
        pf->rx_itr_default = I40E_ITR_DYNAMIC | I40E_ITR_RX_DEF;
@@ -7336,6 +7644,7 @@ static int i40e_sw_init(struct i40e_pf *pf)
         */
        pf->rss_size_max = 0x1 << pf->hw.func_caps.rss_table_entry_width;
        pf->rss_size = 1;
+       pf->rss_table_size = pf->hw.func_caps.rss_table_size;
        pf->rss_size_max = min_t(int, pf->rss_size_max,
                                 pf->hw.func_caps.num_tx_qp);
        if (pf->hw.func_caps.rss) {
@@ -7347,6 +7656,13 @@ static int i40e_sw_init(struct i40e_pf *pf)
        if (pf->hw.func_caps.npar_enable || pf->hw.func_caps.mfp_mode_1) {
                pf->flags |= I40E_FLAG_MFP_ENABLED;
                dev_info(&pf->pdev->dev, "MFP mode Enabled\n");
+               if (i40e_get_npar_bw_setting(pf))
+                       dev_warn(&pf->pdev->dev,
+                                "Could not get NPAR bw settings\n");
+               else
+                       dev_info(&pf->pdev->dev,
+                                "Min BW = %8.8x, Max BW = %8.8x\n",
+                                pf->npar_min_bw, pf->npar_max_bw);
        }
 
        /* FW/NVM is not yet fixed in this regard */
@@ -7354,11 +7670,11 @@ static int i40e_sw_init(struct i40e_pf *pf)
            (pf->hw.func_caps.fd_filters_best_effort > 0)) {
                pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
                pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE;
-               /* Setup a counter for fd_atr per pf */
+               /* Setup a counter for fd_atr per PF */
                pf->fd_atr_cnt_idx = I40E_FD_ATR_STAT_IDX(pf->hw.pf_id);
                if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) {
                        pf->flags |= I40E_FLAG_FD_SB_ENABLED;
-                       /* Setup a counter for fd_sb per pf */
+                       /* Setup a counter for fd_sb per PF */
                        pf->fd_sb_cnt_idx = I40E_FD_SB_STAT_IDX(pf->hw.pf_id);
                } else {
                        dev_info(&pf->pdev->dev,
@@ -7406,22 +7722,14 @@ static int i40e_sw_init(struct i40e_pf *pf)
        pf->qp_pile->num_entries = pf->hw.func_caps.num_tx_qp;
        pf->qp_pile->search_hint = 0;
 
-       /* set up vector assignment tracking */
-       size = sizeof(struct i40e_lump_tracking)
-               + (sizeof(u16) * pf->hw.func_caps.num_msix_vectors);
-       pf->irq_pile = kzalloc(size, GFP_KERNEL);
-       if (!pf->irq_pile) {
-               kfree(pf->qp_pile);
-               err = -ENOMEM;
-               goto sw_init_done;
-       }
-       pf->irq_pile->num_entries = pf->hw.func_caps.num_msix_vectors;
-       pf->irq_pile->search_hint = 0;
-
        pf->tx_timeout_recovery_level = 1;
 
        mutex_init(&pf->switch_mutex);
 
+       /* If NPAR is enabled nudge the Tx scheduler */
+       if (pf->hw.func_caps.npar_enable && (!i40e_get_npar_bw_setting(pf)))
+               i40e_set_npar_bw_setting(pf);
+
 sw_init_done:
        return err;
 }
@@ -7534,7 +7842,8 @@ static void i40e_add_vxlan_port(struct net_device *netdev,
 
        /* Check if port already exists */
        if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
-               netdev_info(netdev, "Port %d already offloaded\n", ntohs(port));
+               netdev_info(netdev, "vxlan port %d already offloaded\n",
+                           ntohs(port));
                return;
        }
 
@@ -7542,7 +7851,7 @@ static void i40e_add_vxlan_port(struct net_device *netdev,
        next_idx = i40e_get_vxlan_port_idx(pf, 0);
 
        if (next_idx == I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
-               netdev_info(netdev, "Maximum number of UDP ports reached, not adding port %d\n",
+               netdev_info(netdev, "maximum number of vxlan UDP ports reached, not adding port %d\n",
                            ntohs(port));
                return;
        }
@@ -7550,8 +7859,9 @@ static void i40e_add_vxlan_port(struct net_device *netdev,
        /* New port: add it and mark its index in the bitmap */
        pf->vxlan_ports[next_idx] = port;
        pf->pending_vxlan_bitmap |= (1 << next_idx);
-
        pf->flags |= I40E_FLAG_VXLAN_FILTER_SYNC;
+
+       dev_info(&pf->pdev->dev, "adding vxlan port %d\n", ntohs(port));
 }
 
 /**
@@ -7579,12 +7889,13 @@ static void i40e_del_vxlan_port(struct net_device *netdev,
                 * and make it pending
                 */
                pf->vxlan_ports[idx] = 0;
-
                pf->pending_vxlan_bitmap |= (1 << idx);
-
                pf->flags |= I40E_FLAG_VXLAN_FILTER_SYNC;
+
+               dev_info(&pf->pdev->dev, "deleting vxlan port %d\n",
+                        ntohs(port));
        } else {
-               netdev_warn(netdev, "Port %d was not found, not deleting\n",
+               netdev_warn(netdev, "vxlan port %d was not found, not deleting\n",
                            ntohs(port));
        }
 }
@@ -7653,6 +7964,118 @@ static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
        return err;
 }
 
+#ifdef HAVE_BRIDGE_ATTRIBS
+/**
+ * i40e_ndo_bridge_setlink - Set the hardware bridge mode
+ * @dev: the netdev being configured
+ * @nlh: RTNL message
+ *
+ * Inserts a new hardware bridge if not already created and
+ * enables the bridging mode requested (VEB or VEPA). If the
+ * hardware bridge has already been inserted and the request
+ * is to change the mode then that requires a PF reset to
+ * allow rebuild of the components with required hardware
+ * bridge mode enabled.
+ **/
+static int i40e_ndo_bridge_setlink(struct net_device *dev,
+                                  struct nlmsghdr *nlh)
+{
+       struct i40e_netdev_priv *np = netdev_priv(dev);
+       struct i40e_vsi *vsi = np->vsi;
+       struct i40e_pf *pf = vsi->back;
+       struct i40e_veb *veb = NULL;
+       struct nlattr *attr, *br_spec;
+       int i, rem;
+
+       /* Only for PF VSI for now */
+       if (vsi->seid != pf->vsi[pf->lan_vsi]->seid)
+               return -EOPNOTSUPP;
+
+       /* Find the HW bridge for PF VSI */
+       for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
+               if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
+                       veb = pf->veb[i];
+       }
+
+       br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
+
+       nla_for_each_nested(attr, br_spec, rem) {
+               __u16 mode;
+
+               if (nla_type(attr) != IFLA_BRIDGE_MODE)
+                       continue;
+
+               mode = nla_get_u16(attr);
+               if ((mode != BRIDGE_MODE_VEPA) &&
+                   (mode != BRIDGE_MODE_VEB))
+                       return -EINVAL;
+
+               /* Insert a new HW bridge */
+               if (!veb) {
+                       veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid,
+                                            vsi->tc_config.enabled_tc);
+                       if (veb) {
+                               veb->bridge_mode = mode;
+                               i40e_config_bridge_mode(veb);
+                       } else {
+                               /* No Bridge HW offload available */
+                               return -ENOENT;
+                       }
+                       break;
+               } else if (mode != veb->bridge_mode) {
+                       /* Existing HW bridge but different mode needs reset */
+                       veb->bridge_mode = mode;
+                       i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED));
+                       break;
+               }
+       }
+
+       return 0;
+}
+
+/**
+ * i40e_ndo_bridge_getlink - Get the hardware bridge mode
+ * @skb: skb buff
+ * @pid: process id
+ * @seq: RTNL message seq #
+ * @dev: the netdev being configured
+ * @filter_mask: unused
+ *
+ * Return the mode in which the hardware bridge is operating in
+ * i.e VEB or VEPA.
+ **/
+#ifdef HAVE_BRIDGE_FILTER
+static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
+                                  struct net_device *dev,
+                                  u32 __always_unused filter_mask)
+#else
+static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
+                                  struct net_device *dev)
+#endif /* HAVE_BRIDGE_FILTER */
+{
+       struct i40e_netdev_priv *np = netdev_priv(dev);
+       struct i40e_vsi *vsi = np->vsi;
+       struct i40e_pf *pf = vsi->back;
+       struct i40e_veb *veb = NULL;
+       int i;
+
+       /* Only for PF VSI for now */
+       if (vsi->seid != pf->vsi[pf->lan_vsi]->seid)
+               return -EOPNOTSUPP;
+
+       /* Find the HW bridge for the PF VSI */
+       for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
+               if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
+                       veb = pf->veb[i];
+       }
+
+       if (!veb)
+               return 0;
+
+       return ndo_dflt_bridge_getlink(skb, pid, seq, dev, veb->bridge_mode);
+}
+#endif /* HAVE_BRIDGE_ATTRIBS */
+
 static const struct net_device_ops i40e_netdev_ops = {
        .ndo_open               = i40e_open,
        .ndo_stop               = i40e_close,
@@ -7687,6 +8110,10 @@ static const struct net_device_ops i40e_netdev_ops = {
 #endif
        .ndo_get_phys_port_id   = i40e_get_phys_port_id,
        .ndo_fdb_add            = i40e_ndo_fdb_add,
+#ifdef HAVE_BRIDGE_ATTRIBS
+       .ndo_bridge_getlink     = i40e_ndo_bridge_getlink,
+       .ndo_bridge_setlink     = i40e_ndo_bridge_setlink,
+#endif /* HAVE_BRIDGE_ATTRIBS */
 };
 
 /**
@@ -7798,6 +8225,30 @@ static void i40e_vsi_delete(struct i40e_vsi *vsi)
        i40e_aq_delete_element(&vsi->back->hw, vsi->seid, NULL);
 }
 
+/**
+ * i40e_is_vsi_uplink_mode_veb - Check if the VSI's uplink bridge mode is VEB
+ * @vsi: the VSI being queried
+ *
+ * Returns 1 if HW bridge mode is VEB and return 0 in case of VEPA mode
+ **/
+int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi)
+{
+       struct i40e_veb *veb;
+       struct i40e_pf *pf = vsi->back;
+
+       /* Uplink is not a bridge so default to VEB */
+       if (vsi->veb_idx == I40E_NO_VEB)
+               return 1;
+
+       veb = pf->veb[vsi->veb_idx];
+       /* Uplink is a bridge in VEPA mode */
+       if (veb && (veb->bridge_mode & BRIDGE_MODE_VEPA))
+               return 0;
+
+       /* Uplink is a bridge in VEB mode */
+       return 1;
+}
+
 /**
  * i40e_add_vsi - Add a VSI to the switch
  * @vsi: the VSI being configured
@@ -7830,11 +8281,11 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
                ctxt.flags = I40E_AQ_VSI_TYPE_PF;
                if (ret) {
                        dev_info(&pf->pdev->dev,
-                                "couldn't get pf vsi config, err %d, aq_err %d\n",
+                                "couldn't get PF vsi config, err %d, aq_err %d\n",
                                 ret, pf->hw.aq.asq_last_status);
                        return -ENOENT;
                }
-               memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info));
+               vsi->info = ctxt.info;
                vsi->info.valid_sections = 0;
 
                vsi->seid = ctxt.seid;
@@ -7883,12 +8334,14 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
                ctxt.pf_num = hw->pf_id;
                ctxt.vf_num = 0;
                ctxt.uplink_seid = vsi->uplink_seid;
-               ctxt.connection_type = 0x1;     /* regular data port */
+               ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
                ctxt.flags = I40E_AQ_VSI_TYPE_PF;
-               ctxt.info.valid_sections |=
+               if (i40e_is_vsi_uplink_mode_veb(vsi)) {
+                       ctxt.info.valid_sections |=
                                cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
-               ctxt.info.switch_id =
+                       ctxt.info.switch_id =
                                cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
+               }
                i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
                break;
 
@@ -7896,16 +8349,18 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
                ctxt.pf_num = hw->pf_id;
                ctxt.vf_num = 0;
                ctxt.uplink_seid = vsi->uplink_seid;
-               ctxt.connection_type = 0x1;     /* regular data port */
+               ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
                ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2;
 
-               ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
-
                /* This VSI is connected to VEB so the switch_id
                 * should be set to zero by default.
                 */
-               ctxt.info.switch_id = 0;
-               ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
+               if (i40e_is_vsi_uplink_mode_veb(vsi)) {
+                       ctxt.info.valid_sections |=
+                               cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
+                       ctxt.info.switch_id =
+                               cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
+               }
 
                /* Setup the VSI tx/rx queue map for TC0 only for now */
                i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
@@ -7915,15 +8370,18 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
                ctxt.pf_num = hw->pf_id;
                ctxt.vf_num = vsi->vf_id + hw->func_caps.vf_base_id;
                ctxt.uplink_seid = vsi->uplink_seid;
-               ctxt.connection_type = 0x1;     /* regular data port */
+               ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
                ctxt.flags = I40E_AQ_VSI_TYPE_VF;
 
-               ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
-
                /* This VSI is connected to VEB so the switch_id
                 * should be set to zero by default.
                 */
-               ctxt.info.switch_id = cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
+               if (i40e_is_vsi_uplink_mode_veb(vsi)) {
+                       ctxt.info.valid_sections |=
+                               cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
+                       ctxt.info.switch_id =
+                               cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
+               }
 
                ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
                ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
@@ -7961,7 +8419,7 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
                        ret = -ENOENT;
                        goto err;
                }
-               memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info));
+               vsi->info = ctxt.info;
                vsi->info.valid_sections = 0;
                vsi->seid = ctxt.seid;
                vsi->id = ctxt.vsi_number;
@@ -8281,7 +8739,7 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
                                         __func__);
                                return NULL;
                        }
-                       i40e_enable_pf_switch_lb(pf);
+                       i40e_config_bridge_mode(veb);
                }
                for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
                        if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
@@ -8724,7 +9182,7 @@ err_alloc:
 }
 
 /**
- * i40e_setup_pf_switch_element - set pf vars based on switch type
+ * i40e_setup_pf_switch_element - set PF vars based on switch type
  * @pf: board private structure
  * @ele: element we are building info from
  * @num_reported: total number of elements
@@ -8930,15 +9388,7 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit)
                i40e_config_rss(pf);
 
        /* fill in link information and enable LSE reporting */
-       i40e_update_link_info(&pf->hw, true);
-       i40e_link_event(pf);
-
-       /* Initialize user-specific link properties */
-       pf->fc_autoneg_status = ((pf->hw.phy.link_info.an_info &
-                                 I40E_AQ_AN_COMPLETED) ? true : false);
-
-       /* fill in link information and enable LSE reporting */
-       i40e_update_link_info(&pf->hw, true);
+       i40e_aq_get_link_info(&pf->hw, true, NULL, NULL);
        i40e_link_event(pf);
 
        /* Initialize user-specific link properties */
@@ -9008,7 +9458,11 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
                        pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
                        dev_info(&pf->pdev->dev, "not enough queues for DCB. DCB is disabled.\n");
                }
-               pf->num_lan_qps = pf->rss_size_max;
+               pf->num_lan_qps = max_t(int, pf->rss_size_max,
+                                       num_online_cpus());
+               pf->num_lan_qps = min_t(int, pf->num_lan_qps,
+                                       pf->hw.func_caps.num_tx_qp);
+
                queues_left -= pf->num_lan_qps;
        }
 
@@ -9061,7 +9515,7 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
  * i40e_setup_pf_filter_control - Setup PF static filter control
  * @pf: PF to be setup
  *
- * i40e_setup_pf_filter_control sets up a pf's initial filter control
+ * i40e_setup_pf_filter_control sets up a PF's initial filter control
  * settings. If PE/FCoE are enabled then it will also set the per PF
  * based filter sizes required for them. It also enables Flow director,
  * ethertype and macvlan type filter settings for the pf.
@@ -9106,8 +9560,10 @@ static void i40e_print_features(struct i40e_pf *pf)
 #ifdef CONFIG_PCI_IOV
        buf += sprintf(buf, "VFs: %d ", pf->num_req_vfs);
 #endif
-       buf += sprintf(buf, "VSIs: %d QP: %d ", pf->hw.func_caps.num_vsis,
-                      pf->vsi[pf->lan_vsi]->num_queue_pairs);
+       buf += sprintf(buf, "VSIs: %d QP: %d RX: %s ",
+                      pf->hw.func_caps.num_vsis,
+                      pf->vsi[pf->lan_vsi]->num_queue_pairs,
+                      pf->flags & I40E_FLAG_RX_PS_ENABLED ? "PS" : "1BUF");
 
        if (pf->flags & I40E_FLAG_RSS_ENABLED)
                buf += sprintf(buf, "RSS ");
@@ -9136,14 +9592,16 @@ static void i40e_print_features(struct i40e_pf *pf)
  * @pdev: PCI device information struct
  * @ent: entry in i40e_pci_tbl
  *
- * i40e_probe initializes a pf identified by a pci_dev structure.
- * The OS initialization, configuring of the pf private structure,
+ * i40e_probe initializes a PF identified by a pci_dev structure.
+ * The OS initialization, configuring of the PF private structure,
  * and a hardware reset occur.
  *
  * Returns 0 on success, negative on failure
  **/
 static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 {
+       struct i40e_aq_get_phy_abilities_resp abilities;
+       unsigned long ioremap_len;
        struct i40e_pf *pf;
        struct i40e_hw *hw;
        static u16 pfs_found;
@@ -9195,8 +9653,11 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 
        hw = &pf->hw;
        hw->back = pf;
-       hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
-                             pci_resource_len(pdev, 0));
+
+       ioremap_len = min_t(unsigned long, pci_resource_len(pdev, 0),
+                           I40E_MAX_CSR_SPACE);
+
+       hw->hw_addr = ioremap(pci_resource_start(pdev, 0), ioremap_len);
        if (!hw->hw_addr) {
                err = -EIO;
                dev_info(&pdev->dev, "ioremap(0x%04x, 0x%04x) failed: 0x%x\n",
@@ -9274,7 +9735,6 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                dev_info(&pdev->dev,
                         "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n");
 
-
        i40e_verify_eeprom(pf);
 
        /* Rev 0 hardware was never productized */
@@ -9409,13 +9869,14 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        if (err)
                dev_info(&pf->pdev->dev, "set phy mask fail, aq_err %d\n", err);
 
-       msleep(75);
-       err = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
-       if (err) {
-               dev_info(&pf->pdev->dev, "link restart failed, aq_err=%d\n",
-                        pf->hw.aq.asq_last_status);
+       if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) ||
+           (pf->hw.aq.fw_maj_ver < 4)) {
+               msleep(75);
+               err = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
+               if (err)
+                       dev_info(&pf->pdev->dev, "link restart failed, aq_err=%d\n",
+                                pf->hw.aq.asq_last_status);
        }
-
        /* The main driver is (mostly) up and happy. We need to set this state
         * before setting up the misc vector or we get a race and the vector
         * ends up disabled forever.
@@ -9499,6 +9960,13 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                dev_warn(&pdev->dev, "Please move the device to a different PCI-e link with more lanes and/or higher transfer rate.\n");
        }
 
+       /* get the requested speeds from the fw */
+       err = i40e_aq_get_phy_capabilities(hw, false, false, &abilities, NULL);
+       if (err)
+               dev_info(&pf->pdev->dev, "get phy abilities failed, aq_err %d, advertised speed settings may not be correct\n",
+                        err);
+       pf->hw.phy.link_info.requested_speeds = abilities.link_speed;
+
        /* print a string summarizing features */
        i40e_print_features(pf);
 
@@ -9517,7 +9985,6 @@ err_configure_lan_hmc:
        (void)i40e_shutdown_lan_hmc(hw);
 err_init_lan_hmc:
        kfree(pf->qp_pile);
-       kfree(pf->irq_pile);
 err_sw_init:
 err_adminq_setup:
        (void)i40e_shutdown_adminq(hw);
@@ -9617,7 +10084,6 @@ static void i40e_remove(struct pci_dev *pdev)
        }
 
        kfree(pf->qp_pile);
-       kfree(pf->irq_pile);
        kfree(pf->vsi);
 
        iounmap(pf->hw.hw_addr);
@@ -9760,6 +10226,8 @@ static int i40e_suspend(struct pci_dev *pdev, pm_message_t state)
        set_bit(__I40E_DOWN, &pf->state);
        del_timer_sync(&pf->service_timer);
        cancel_work_sync(&pf->service_task);
+       i40e_fdir_teardown(pf);
+
        rtnl_lock();
        i40e_prep_for_reset(pf);
        rtnl_unlock();
@@ -9844,6 +10312,7 @@ static int __init i40e_init_module(void)
        pr_info("%s: %s - version %s\n", i40e_driver_name,
                i40e_driver_string, i40e_driver_version_str);
        pr_info("%s: %s\n", i40e_driver_name, i40e_copyright);
+
        i40e_dbg_init();
        return pci_register_driver(&i40e_driver);
 }
index 5defe0d635141ed5c886cb1086995f2b2d3d31a1..e49acd2accd30917aff74dcb681fcede09949112 100644 (file)
@@ -164,15 +164,15 @@ static i40e_status i40e_poll_sr_srctl_done_bit(struct i40e_hw *hw)
 }
 
 /**
- * i40e_read_nvm_word - Reads Shadow RAM
+ * i40e_read_nvm_word_srctl - Reads Shadow RAM via SRCTL register
  * @hw: pointer to the HW structure
  * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
  * @data: word read from the Shadow RAM
  *
  * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.
  **/
-i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
-                                        u16 *data)
+static i40e_status i40e_read_nvm_word_srctl(struct i40e_hw *hw, u16 offset,
+                                           u16 *data)
 {
        i40e_status ret_code = I40E_ERR_TIMEOUT;
        u32 sr_reg;
@@ -212,7 +212,21 @@ read_nvm_exit:
 }
 
 /**
- * i40e_read_nvm_buffer - Reads Shadow RAM buffer
+ * i40e_read_nvm_word - Reads Shadow RAM
+ * @hw: pointer to the HW structure
+ * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
+ * @data: word read from the Shadow RAM
+ *
+ * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.
+ **/
+i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
+                              u16 *data)
+{
+       return i40e_read_nvm_word_srctl(hw, offset, data);
+}
+
+/**
+ * i40e_read_nvm_buffer_srctl - Reads Shadow RAM buffer via SRCTL register
  * @hw: pointer to the HW structure
  * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
  * @words: (in) number of words to read; (out) number of words actually read
@@ -222,8 +236,8 @@ read_nvm_exit:
  * method. The buffer read is preceded by the NVM ownership take
  * and followed by the release.
  **/
-i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
-                                          u16 *words, u16 *data)
+static i40e_status i40e_read_nvm_buffer_srctl(struct i40e_hw *hw, u16 offset,
+                                             u16 *words, u16 *data)
 {
        i40e_status ret_code = 0;
        u16 index, word;
@@ -231,7 +245,7 @@ i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
        /* Loop thru the selected region */
        for (word = 0; word < *words; word++) {
                index = offset + word;
-               ret_code = i40e_read_nvm_word(hw, index, &data[word]);
+               ret_code = i40e_read_nvm_word_srctl(hw, index, &data[word]);
                if (ret_code)
                        break;
        }
@@ -242,6 +256,23 @@ i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
        return ret_code;
 }
 
+/**
+ * i40e_read_nvm_buffer - Reads Shadow RAM buffer
+ * @hw: pointer to the HW structure
+ * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
+ * @words: (in) number of words to read; (out) number of words actually read
+ * @data: words read from the Shadow RAM
+ *
+ * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd()
+ * method. The buffer read is preceded by the NVM ownership take
+ * and followed by the release.
+ **/
+i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
+                                u16 *words, u16 *data)
+{
+       return i40e_read_nvm_buffer_srctl(hw, offset, words, data);
+}
+
 /**
  * i40e_write_nvm_aq - Writes Shadow RAM.
  * @hw: pointer to the HW structure.
@@ -302,11 +333,18 @@ static i40e_status i40e_calc_nvm_checksum(struct i40e_hw *hw,
                                                    u16 *checksum)
 {
        i40e_status ret_code = 0;
+       struct i40e_virt_mem vmem;
        u16 pcie_alt_module = 0;
        u16 checksum_local = 0;
        u16 vpd_module = 0;
-       u16 word = 0;
-       u32 i = 0;
+       u16 *data;
+       u16 i = 0;
+
+       ret_code = i40e_allocate_virt_mem(hw, &vmem,
+                                   I40E_SR_SECTOR_SIZE_IN_WORDS * sizeof(u16));
+       if (ret_code)
+               goto i40e_calc_nvm_checksum_exit;
+       data = (u16 *)vmem.va;
 
        /* read pointer to VPD area */
        ret_code = i40e_read_nvm_word(hw, I40E_SR_VPD_PTR, &vpd_module);
@@ -317,7 +355,7 @@ static i40e_status i40e_calc_nvm_checksum(struct i40e_hw *hw,
 
        /* read pointer to PCIe Alt Auto-load module */
        ret_code = i40e_read_nvm_word(hw, I40E_SR_PCIE_ALT_AUTO_LOAD_PTR,
-                                      &pcie_alt_module);
+                                     &pcie_alt_module);
        if (ret_code) {
                ret_code = I40E_ERR_NVM_CHECKSUM;
                goto i40e_calc_nvm_checksum_exit;
@@ -327,33 +365,40 @@ static i40e_status i40e_calc_nvm_checksum(struct i40e_hw *hw,
         * except the VPD and PCIe ALT Auto-load modules
         */
        for (i = 0; i < hw->nvm.sr_size; i++) {
+               /* Read SR page */
+               if ((i % I40E_SR_SECTOR_SIZE_IN_WORDS) == 0) {
+                       u16 words = I40E_SR_SECTOR_SIZE_IN_WORDS;
+
+                       ret_code = i40e_read_nvm_buffer(hw, i, &words, data);
+                       if (ret_code) {
+                               ret_code = I40E_ERR_NVM_CHECKSUM;
+                               goto i40e_calc_nvm_checksum_exit;
+                       }
+               }
+
                /* Skip Checksum word */
                if (i == I40E_SR_SW_CHECKSUM_WORD)
-                       i++;
+                       continue;
                /* Skip VPD module (convert byte size to word count) */
-               if (i == (u32)vpd_module) {
-                       i += (I40E_SR_VPD_MODULE_MAX_SIZE / 2);
-                       if (i >= hw->nvm.sr_size)
-                               break;
+               if ((i >= (u32)vpd_module) &&
+                   (i < ((u32)vpd_module +
+                    (I40E_SR_VPD_MODULE_MAX_SIZE / 2)))) {
+                       continue;
                }
                /* Skip PCIe ALT module (convert byte size to word count) */
-               if (i == (u32)pcie_alt_module) {
-                       i += (I40E_SR_PCIE_ALT_MODULE_MAX_SIZE / 2);
-                       if (i >= hw->nvm.sr_size)
-                               break;
+               if ((i >= (u32)pcie_alt_module) &&
+                   (i < ((u32)pcie_alt_module +
+                    (I40E_SR_PCIE_ALT_MODULE_MAX_SIZE / 2)))) {
+                       continue;
                }
 
-               ret_code = i40e_read_nvm_word(hw, (u16)i, &word);
-               if (ret_code) {
-                       ret_code = I40E_ERR_NVM_CHECKSUM;
-                       goto i40e_calc_nvm_checksum_exit;
-               }
-               checksum_local += word;
+               checksum_local += data[i % I40E_SR_SECTOR_SIZE_IN_WORDS];
        }
 
        *checksum = (u16)I40E_SR_SW_CHECKSUM_BASE - checksum_local;
 
 i40e_calc_nvm_checksum_exit:
+       i40e_free_virt_mem(hw, &vmem);
        return ret_code;
 }
 
index 68e852a96680229818cb3f4d99dc4f69b529c33a..fea0d37ecc722af58d052476829c5f8ec53ef916 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
  *
  * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2014 Intel Corporation.
+ * Copyright(c) 2013 - 2015 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -66,6 +66,7 @@ void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink);
 
 i40e_status i40e_aq_get_firmware_version(struct i40e_hw *hw,
                                u16 *fw_major_version, u16 *fw_minor_version,
+                               u32 *fw_build,
                                u16 *api_major_version, u16 *api_minor_version,
                                struct i40e_asq_cmd_details *cmd_details);
 i40e_status i40e_aq_debug_write_register(struct i40e_hw *hw,
@@ -97,7 +98,6 @@ i40e_status i40e_aq_set_link_restart_an(struct i40e_hw *hw,
 i40e_status i40e_aq_get_link_info(struct i40e_hw *hw,
                                bool enable_lse, struct i40e_link_status *link,
                                struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_update_link_info(struct i40e_hw *hw, bool enable_lse);
 i40e_status i40e_aq_set_local_advt_reg(struct i40e_hw *hw,
                                u64 advt_reg,
                                struct i40e_asq_cmd_details *cmd_details);
@@ -247,6 +247,12 @@ void i40e_clear_hw(struct i40e_hw *hw);
 void i40e_clear_pxe_mode(struct i40e_hw *hw);
 bool i40e_get_link_status(struct i40e_hw *hw);
 i40e_status i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr);
+i40e_status i40e_read_bw_from_alt_ram(struct i40e_hw *hw,
+                                     u32 *max_bw, u32 *min_bw, bool *min_valid,
+                                     bool *max_valid);
+i40e_status i40e_aq_configure_partition_bw(struct i40e_hw *hw,
+                       struct i40e_aqc_configure_partition_bw_data *bw_data,
+                       struct i40e_asq_cmd_details *cmd_details);
 i40e_status i40e_get_port_mac_addr(struct i40e_hw *hw, u8 *mac_addr);
 i40e_status i40e_read_pba_string(struct i40e_hw *hw, u8 *pba_num,
                                 u32 pba_num_size);
@@ -260,8 +266,6 @@ i40e_status i40e_init_nvm(struct i40e_hw *hw);
 i40e_status i40e_acquire_nvm(struct i40e_hw *hw,
                                      enum i40e_aq_resource_access_type access);
 void i40e_release_nvm(struct i40e_hw *hw);
-i40e_status i40e_read_nvm_srrd(struct i40e_hw *hw, u16 offset,
-                                        u16 *data);
 i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
                                         u16 *data);
 i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
index fabcfa1b45b28817c0d2d3d1ed71c0c7e3054caf..a92b7725dec3910964e5807a88d4f31622b55763 100644 (file)
@@ -57,7 +57,7 @@
  * timespec. However, since the registers are 64 bits of nanoseconds, we must
  * convert the result to a timespec before we can return.
  **/
-static void i40e_ptp_read(struct i40e_pf *pf, struct timespec *ts)
+static void i40e_ptp_read(struct i40e_pf *pf, struct timespec64 *ts)
 {
        struct i40e_hw *hw = &pf->hw;
        u32 hi, lo;
@@ -69,7 +69,7 @@ static void i40e_ptp_read(struct i40e_pf *pf, struct timespec *ts)
 
        ns = (((u64)hi) << 32) | lo;
 
-       *ts = ns_to_timespec(ns);
+       *ts = ns_to_timespec64(ns);
 }
 
 /**
@@ -81,10 +81,10 @@ static void i40e_ptp_read(struct i40e_pf *pf, struct timespec *ts)
  * we receive a timespec from the stack, we must convert that timespec into
  * nanoseconds before programming the registers.
  **/
-static void i40e_ptp_write(struct i40e_pf *pf, const struct timespec *ts)
+static void i40e_ptp_write(struct i40e_pf *pf, const struct timespec64 *ts)
 {
        struct i40e_hw *hw = &pf->hw;
-       u64 ns = timespec_to_ns(ts);
+       u64 ns = timespec64_to_ns(ts);
 
        /* The timer will not update until the high register is written, so
         * write the low register first.
@@ -159,14 +159,14 @@ static int i40e_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
 static int i40e_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
 {
        struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps);
-       struct timespec now, then = ns_to_timespec(delta);
+       struct timespec64 now, then = ns_to_timespec64(delta);
        unsigned long flags;
 
        spin_lock_irqsave(&pf->tmreg_lock, flags);
 
        i40e_ptp_read(pf, &now);
-       now = timespec_add(now, then);
-       i40e_ptp_write(pf, (const struct timespec *)&now);
+       now = timespec64_add(now, then);
+       i40e_ptp_write(pf, (const struct timespec64 *)&now);
 
        spin_unlock_irqrestore(&pf->tmreg_lock, flags);
 
@@ -181,7 +181,7 @@ static int i40e_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
  * Read the device clock and return the correct value on ns, after converting it
  * into a timespec struct.
  **/
-static int i40e_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
+static int i40e_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
 {
        struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps);
        unsigned long flags;
@@ -202,7 +202,7 @@ static int i40e_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
  * to ns happens in the write function.
  **/
 static int i40e_ptp_settime(struct ptp_clock_info *ptp,
-                           const struct timespec *ts)
+                           const struct timespec64 *ts)
 {
        struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps);
        unsigned long flags;
@@ -613,8 +613,8 @@ static long i40e_ptp_create_clock(struct i40e_pf *pf)
        pf->ptp_caps.pps = 0;
        pf->ptp_caps.adjfreq = i40e_ptp_adjfreq;
        pf->ptp_caps.adjtime = i40e_ptp_adjtime;
-       pf->ptp_caps.gettime = i40e_ptp_gettime;
-       pf->ptp_caps.settime = i40e_ptp_settime;
+       pf->ptp_caps.gettime64 = i40e_ptp_gettime;
+       pf->ptp_caps.settime64 = i40e_ptp_settime;
        pf->ptp_caps.enable = i40e_ptp_feature_enable;
 
        /* Attempt to register the clock before enabling the hardware. */
@@ -673,7 +673,7 @@ void i40e_ptp_init(struct i40e_pf *pf)
                dev_err(&pf->pdev->dev, "%s: ptp_clock_register failed\n",
                        __func__);
        } else {
-               struct timespec ts;
+               struct timespec64 ts;
                u32 regval;
 
                dev_info(&pf->pdev->dev, "%s: added PHC on %s\n", __func__,
@@ -695,7 +695,7 @@ void i40e_ptp_init(struct i40e_pf *pf)
                i40e_ptp_set_timestamp_mode(pf, &pf->tstamp_config);
 
                /* Set the clock value. */
-               ts = ktime_to_timespec(ktime_get_real());
+               ts = ktime_to_timespec64(ktime_get_real());
                i40e_ptp_settime(&pf->ptp_caps, &ts);
        }
 }
index 65d3c8bb2d5b4f05eb4124ca61b041f0b7ae78aa..522d6df513300fffec88a2c2494a66c0d8e03db8 100644 (file)
 #define I40E_PRTDCB_RUP2TC_UP6TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP6TC_SHIFT)
 #define I40E_PRTDCB_RUP2TC_UP7TC_SHIFT 21
 #define I40E_PRTDCB_RUP2TC_UP7TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP7TC_SHIFT)
+#define I40E_PRTDCB_RUPTQ(_i) (0x00122400 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
+#define I40E_PRTDCB_RUPTQ_MAX_INDEX 7
+#define I40E_PRTDCB_RUPTQ_RXQNUM_SHIFT 0
+#define I40E_PRTDCB_RUPTQ_RXQNUM_MASK I40E_MASK(0x3FFF, I40E_PRTDCB_RUPTQ_RXQNUM_SHIFT)
 #define I40E_PRTDCB_TC2PFC 0x001C0980 /* Reset: CORER */
 #define I40E_PRTDCB_TC2PFC_TC2PFC_SHIFT 0
 #define I40E_PRTDCB_TC2PFC_TC2PFC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TC2PFC_TC2PFC_SHIFT)
 #define I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_SHIFT)
 #define I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_SHIFT 20
 #define I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_MASK I40E_MASK(0x3F, I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_PRT_BIT_MAP_SHIFT 26
+#define I40E_GLGEN_GPIO_CTL_PRT_BIT_MAP_MASK I40E_MASK(0xF, I40E_GLGEN_GPIO_CTL_PRT_BIT_MAP_SHIFT)
 #define I40E_GLGEN_GPIO_SET 0x00088184 /* Reset: POR */
 #define I40E_GLGEN_GPIO_SET_GPIO_INDX_SHIFT 0
 #define I40E_GLGEN_GPIO_SET_GPIO_INDX_MASK I40E_MASK(0x1F, I40E_GLGEN_GPIO_SET_GPIO_INDX_SHIFT)
 #define I40E_GLGEN_MDIO_CTRL_CONTMDC_SHIFT 17
 #define I40E_GLGEN_MDIO_CTRL_CONTMDC_MASK I40E_MASK(0x1, I40E_GLGEN_MDIO_CTRL_CONTMDC_SHIFT)
 #define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_SHIFT 18
-#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_MASK I40E_MASK(0x3FFF, I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_SHIFT)
+#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_MASK I40E_MASK(0x7FF, I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_SHIFT)
+#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD0_SHIFT 29
+#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD0_MASK I40E_MASK(0x7, I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD0_SHIFT)
 #define I40E_GLGEN_MDIO_I2C_SEL(_i) (0x000881C0 + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */
 #define I40E_GLGEN_MDIO_I2C_SEL_MAX_INDEX 3
 #define I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_SHIFT 0
 #define I40E_GLGEN_RSTCTL_GRSTDEL_MASK I40E_MASK(0x3F, I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT)
 #define I40E_GLGEN_RSTCTL_ECC_RST_ENA_SHIFT 8
 #define I40E_GLGEN_RSTCTL_ECC_RST_ENA_MASK I40E_MASK(0x1, I40E_GLGEN_RSTCTL_ECC_RST_ENA_SHIFT)
-#define I40E_GLGEN_RSTENA_EMP 0x000B818C /* Reset: POR */
-#define I40E_GLGEN_RSTENA_EMP_EMP_RST_ENA_SHIFT 0
-#define I40E_GLGEN_RSTENA_EMP_EMP_RST_ENA_MASK I40E_MASK(0x1, I40E_GLGEN_RSTENA_EMP_EMP_RST_ENA_SHIFT)
 #define I40E_GLGEN_RTRIG 0x000B8190 /* Reset: CORER */
 #define I40E_GLGEN_RTRIG_CORER_SHIFT 0
 #define I40E_GLGEN_RTRIG_CORER_MASK I40E_MASK(0x1, I40E_GLGEN_RTRIG_CORER_SHIFT)
 #define I40E_PFINT_RATEN_INTERVAL_MASK I40E_MASK(0x3F, I40E_PFINT_RATEN_INTERVAL_SHIFT)
 #define I40E_PFINT_RATEN_INTRL_ENA_SHIFT 6
 #define I40E_PFINT_RATEN_INTRL_ENA_MASK I40E_MASK(0x1, I40E_PFINT_RATEN_INTRL_ENA_SHIFT)
-#define I40E_PFINT_STAT_CTL0 0x00038400 /* Reset: PFR */
+#define I40E_PFINT_STAT_CTL0 0x00038400 /* Reset: CORER */
 #define I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT 2
 #define I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT)
 #define I40E_QINT_RQCTL(_Q) (0x0003A000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */
 #define I40E_VFINT_ITRN_MAX_INDEX 2
 #define I40E_VFINT_ITRN_INTERVAL_SHIFT 0
 #define I40E_VFINT_ITRN_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_ITRN_INTERVAL_SHIFT)
-#define I40E_VFINT_STAT_CTL0(_VF) (0x0002A000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFINT_STAT_CTL0(_VF) (0x0002A000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
 #define I40E_VFINT_STAT_CTL0_MAX_INDEX 127
 #define I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT 2
 #define I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT)
 #define I40E_GLPCI_GSCN_0_3_MAX_INDEX 3
 #define I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_SHIFT 0
 #define I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_SHIFT)
-#define I40E_GLPCI_LATCT 0x0009C4B4 /* Reset: PCIR */
-#define I40E_GLPCI_LATCT_PCI_COUNT_LAT_CT_SHIFT 0
-#define I40E_GLPCI_LATCT_PCI_COUNT_LAT_CT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_LATCT_PCI_COUNT_LAT_CT_SHIFT)
 #define I40E_GLPCI_LBARCTRL 0x000BE484 /* Reset: POR */
 #define I40E_GLPCI_LBARCTRL_PREFBAR_SHIFT 0
 #define I40E_GLPCI_LBARCTRL_PREFBAR_MASK I40E_MASK(0x1, I40E_GLPCI_LBARCTRL_PREFBAR_SHIFT)
 #define I40E_GLPCI_VFSUP_VF_PREFETCH_MASK I40E_MASK(0x1, I40E_GLPCI_VFSUP_VF_PREFETCH_SHIFT)
 #define I40E_GLPCI_VFSUP_VR_BAR_TYPE_SHIFT 1
 #define I40E_GLPCI_VFSUP_VR_BAR_TYPE_MASK I40E_MASK(0x1, I40E_GLPCI_VFSUP_VR_BAR_TYPE_SHIFT)
+#define I40E_GLTPH_CTRL 0x000BE480 /* Reset: PCIR */
+#define I40E_GLTPH_CTRL_DESC_PH_SHIFT 9
+#define I40E_GLTPH_CTRL_DESC_PH_MASK I40E_MASK(0x3, I40E_GLTPH_CTRL_DESC_PH_SHIFT)
+#define I40E_GLTPH_CTRL_DATA_PH_SHIFT 11
+#define I40E_GLTPH_CTRL_DATA_PH_MASK I40E_MASK(0x3, I40E_GLTPH_CTRL_DATA_PH_SHIFT)
 #define I40E_PF_FUNC_RID 0x0009C000 /* Reset: PCIR */
 #define I40E_PF_FUNC_RID_FUNCTION_NUMBER_SHIFT 0
 #define I40E_PF_FUNC_RID_FUNCTION_NUMBER_MASK I40E_MASK(0x7, I40E_PF_FUNC_RID_FUNCTION_NUMBER_SHIFT)
 #define I40E_GL_RXERR2_L_FCOEDIXAC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_RXERR2_L_FCOEDIXAC_SHIFT)
 #define I40E_GLPRT_BPRCH(_i) (0x003005E4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_BPRCH_MAX_INDEX 3
-#define I40E_GLPRT_BPRCH_UPRCH_SHIFT 0
-#define I40E_GLPRT_BPRCH_UPRCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_BPRCH_UPRCH_SHIFT)
+#define I40E_GLPRT_BPRCH_BPRCH_SHIFT 0
+#define I40E_GLPRT_BPRCH_BPRCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_BPRCH_BPRCH_SHIFT)
 #define I40E_GLPRT_BPRCL(_i) (0x003005E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_BPRCL_MAX_INDEX 3
-#define I40E_GLPRT_BPRCL_UPRCH_SHIFT 0
-#define I40E_GLPRT_BPRCL_UPRCH_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_BPRCL_UPRCH_SHIFT)
+#define I40E_GLPRT_BPRCL_BPRCL_SHIFT 0
+#define I40E_GLPRT_BPRCL_BPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_BPRCL_BPRCL_SHIFT)
 #define I40E_GLPRT_BPTCH(_i) (0x00300A04 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_BPTCH_MAX_INDEX 3
-#define I40E_GLPRT_BPTCH_UPRCH_SHIFT 0
-#define I40E_GLPRT_BPTCH_UPRCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_BPTCH_UPRCH_SHIFT)
+#define I40E_GLPRT_BPTCH_BPTCH_SHIFT 0
+#define I40E_GLPRT_BPTCH_BPTCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_BPTCH_BPTCH_SHIFT)
 #define I40E_GLPRT_BPTCL(_i) (0x00300A00 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_BPTCL_MAX_INDEX 3
-#define I40E_GLPRT_BPTCL_UPRCH_SHIFT 0
-#define I40E_GLPRT_BPTCL_UPRCH_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_BPTCL_UPRCH_SHIFT)
+#define I40E_GLPRT_BPTCL_BPTCL_SHIFT 0
+#define I40E_GLPRT_BPTCL_BPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_BPTCL_BPTCL_SHIFT)
 #define I40E_GLPRT_CRCERRS(_i) (0x00300080 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_CRCERRS_MAX_INDEX 3
 #define I40E_GLPRT_CRCERRS_CRCERRS_SHIFT 0
 #define I40E_GLPRT_TDOLD_MAX_INDEX 3
 #define I40E_GLPRT_TDOLD_GLPRT_TDOLD_SHIFT 0
 #define I40E_GLPRT_TDOLD_GLPRT_TDOLD_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_TDOLD_GLPRT_TDOLD_SHIFT)
-#define I40E_GLPRT_TDPC(_i) (0x00375400 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_TDPC_MAX_INDEX 3
-#define I40E_GLPRT_TDPC_TDPC_SHIFT 0
-#define I40E_GLPRT_TDPC_TDPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_TDPC_TDPC_SHIFT)
 #define I40E_GLPRT_UPRCH(_i) (0x003005A4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_UPRCH_MAX_INDEX 3
 #define I40E_GLPRT_UPRCH_UPRCH_SHIFT 0
 #define I40E_PRTTSYN_TXTIME_L 0x001E41C0 /* Reset: GLOBR */
 #define I40E_PRTTSYN_TXTIME_L_TXTIEM_L_SHIFT 0
 #define I40E_PRTTSYN_TXTIME_L_TXTIEM_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TXTIME_L_TXTIEM_L_SHIFT)
-#define I40E_GLSCD_QUANTA 0x000B2080 /* Reset: CORER */
-#define I40E_GLSCD_QUANTA_TSCDQUANTA_SHIFT 0
-#define I40E_GLSCD_QUANTA_TSCDQUANTA_MASK I40E_MASK(0x7, I40E_GLSCD_QUANTA_TSCDQUANTA_SHIFT)
 #define I40E_GL_MDET_RX 0x0012A510 /* Reset: CORER */
 #define I40E_GL_MDET_RX_FUNCTION_SHIFT 0
 #define I40E_GL_MDET_RX_FUNCTION_MASK I40E_MASK(0xFF, I40E_GL_MDET_RX_FUNCTION_SHIFT)
 #define I40E_VFINT_ITRN1_MAX_INDEX 2
 #define I40E_VFINT_ITRN1_INTERVAL_SHIFT 0
 #define I40E_VFINT_ITRN1_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_ITRN1_INTERVAL_SHIFT)
-#define I40E_VFINT_STAT_CTL01 0x00005400 /* Reset: VFR */
+#define I40E_VFINT_STAT_CTL01 0x00005400 /* Reset: CORER */
 #define I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_SHIFT 2
 #define I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_SHIFT)
 #define I40E_QRX_TAIL1(_Q) (0x00002000 + ((_Q) * 4)) /* _i=0...15 */ /* Reset: CORER */
index bbf1b1247ac471bb712ed1397956db83cf80e4f3..d8989f9d1798cfa7279c3216a08a4cdbfafd03d9 100644 (file)
@@ -25,6 +25,7 @@
  ******************************************************************************/
 
 #include <linux/prefetch.h>
+#include <net/busy_poll.h>
 #include "i40e.h"
 #include "i40e_prototype.h"
 
@@ -44,7 +45,7 @@ static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size,
  * i40e_program_fdir_filter - Program a Flow Director filter
  * @fdir_data: Packet data that will be filter parameters
  * @raw_packet: the pre-allocated packet buffer for FDir
- * @pf: The pf pointer
+ * @pf: The PF pointer
  * @add: True for add/update, False for remove
  **/
 int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data, u8 *raw_packet,
@@ -227,7 +228,7 @@ static int i40e_add_del_fdir_udpv4(struct i40e_vsi *vsi,
                         "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
                         fd_data->pctype, fd_data->fd_id, ret);
                err = true;
-       } else {
+       } else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
                if (add)
                        dev_info(&pf->pdev->dev,
                                 "Filter OK for PCTYPE %d loc = %d\n",
@@ -302,7 +303,7 @@ static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
                         "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
                         fd_data->pctype, fd_data->fd_id, ret);
                err = true;
-       } else {
+       } else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
                if (add)
                        dev_info(&pf->pdev->dev, "Filter OK for PCTYPE %d loc = %d)\n",
                                 fd_data->pctype, fd_data->fd_id);
@@ -375,7 +376,7 @@ static int i40e_add_del_fdir_ipv4(struct i40e_vsi *vsi,
                                 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
                                 fd_data->pctype, fd_data->fd_id, ret);
                        err = true;
-               } else {
+               } else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
                        if (add)
                                dev_info(&pf->pdev->dev,
                                         "Filter OK for PCTYPE %d loc = %d\n",
@@ -470,12 +471,27 @@ static void i40e_fd_handle_status(struct i40e_ring *rx_ring,
                        dev_warn(&pdev->dev, "ntuple filter loc = %d, could not be added\n",
                                 rx_desc->wb.qword0.hi_dword.fd_id);
 
+               /* Check if the programming error is for ATR.
+                * If so, auto disable ATR and set a state for
+                * flush in progress. Next time we come here if flush is in
+                * progress do nothing, once flush is complete the state will
+                * be cleared.
+                */
+               if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state))
+                       return;
+
                pf->fd_add_err++;
                /* store the current atr filter count */
                pf->fd_atr_cnt = i40e_get_current_atr_cnt(pf);
 
+               if ((rx_desc->wb.qword0.hi_dword.fd_id == 0) &&
+                   (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) {
+                       pf->auto_disable_flags |= I40E_FLAG_FD_ATR_ENABLED;
+                       set_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state);
+               }
+
                /* filter programming failed most likely due to table full */
-               fcnt_prog = i40e_get_cur_guaranteed_fd_count(pf);
+               fcnt_prog = i40e_get_global_fd_count(pf);
                fcnt_avail = pf->fdir_pf_filter_count;
                /* If ATR is running fcnt_prog can quickly change,
                 * if we are very close to full, it makes sense to disable
@@ -754,6 +770,8 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
                        tx_desc = I40E_TX_DESC(tx_ring, 0);
                }
 
+               prefetch(tx_desc);
+
                /* update budget accounting */
                budget--;
        } while (likely(budget));
@@ -841,6 +859,7 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
 static void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector)
 {
        u32 val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
+                 I40E_PFINT_DYN_CTLN_ITR_INDX_MASK | /* set noitr */
                  I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK |
                  I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK;
                  /* allow 00 to be written to the index */
@@ -1031,6 +1050,22 @@ void i40e_clean_rx_ring(struct i40e_ring *rx_ring)
        if (!rx_ring->rx_bi)
                return;
 
+       if (ring_is_ps_enabled(rx_ring)) {
+               int bufsz = ALIGN(rx_ring->rx_hdr_len, 256) * rx_ring->count;
+
+               rx_bi = &rx_ring->rx_bi[0];
+               if (rx_bi->hdr_buf) {
+                       dma_free_coherent(dev,
+                                         bufsz,
+                                         rx_bi->hdr_buf,
+                                         rx_bi->dma);
+                       for (i = 0; i < rx_ring->count; i++) {
+                               rx_bi = &rx_ring->rx_bi[i];
+                               rx_bi->dma = 0;
+                               rx_bi->hdr_buf = NULL;
+                       }
+               }
+       }
        /* Free all the Rx ring sk_buffs */
        for (i = 0; i < rx_ring->count; i++) {
                rx_bi = &rx_ring->rx_bi[i];
@@ -1088,6 +1123,37 @@ void i40e_free_rx_resources(struct i40e_ring *rx_ring)
        }
 }
 
+/**
+ * i40e_alloc_rx_headers - allocate rx header buffers
+ * @rx_ring: ring to alloc buffers
+ *
+ * Allocate rx header buffers for the entire ring. As these are static,
+ * this is only called when setting up a new ring.
+ **/
+void i40e_alloc_rx_headers(struct i40e_ring *rx_ring)
+{
+       struct device *dev = rx_ring->dev;
+       struct i40e_rx_buffer *rx_bi;
+       dma_addr_t dma;
+       void *buffer;
+       int buf_size;
+       int i;
+
+       if (rx_ring->rx_bi[0].hdr_buf)
+               return;
+       /* Make sure the buffers don't cross cache line boundaries. */
+       buf_size = ALIGN(rx_ring->rx_hdr_len, 256);
+       buffer = dma_alloc_coherent(dev, buf_size * rx_ring->count,
+                                   &dma, GFP_KERNEL);
+       if (!buffer)
+               return;
+       for (i = 0; i < rx_ring->count; i++) {
+               rx_bi = &rx_ring->rx_bi[i];
+               rx_bi->dma = dma + (i * buf_size);
+               rx_bi->hdr_buf = buffer + (i * buf_size);
+       }
+}
+
 /**
  * i40e_setup_rx_descriptors - Allocate Rx descriptors
  * @rx_ring: Rx descriptor ring (for a specific queue) to setup
@@ -1148,11 +1214,76 @@ static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
 }
 
 /**
- * i40e_alloc_rx_buffers - Replace used receive buffers; packet split
+ * i40e_alloc_rx_buffers_ps - Replace used receive buffers; packet split
  * @rx_ring: ring to place buffers on
  * @cleaned_count: number of buffers to replace
  **/
-void i40e_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count)
+void i40e_alloc_rx_buffers_ps(struct i40e_ring *rx_ring, u16 cleaned_count)
+{
+       u16 i = rx_ring->next_to_use;
+       union i40e_rx_desc *rx_desc;
+       struct i40e_rx_buffer *bi;
+
+       /* do nothing if no valid netdev defined */
+       if (!rx_ring->netdev || !cleaned_count)
+               return;
+
+       while (cleaned_count--) {
+               rx_desc = I40E_RX_DESC(rx_ring, i);
+               bi = &rx_ring->rx_bi[i];
+
+               if (bi->skb) /* desc is in use */
+                       goto no_buffers;
+               if (!bi->page) {
+                       bi->page = alloc_page(GFP_ATOMIC);
+                       if (!bi->page) {
+                               rx_ring->rx_stats.alloc_page_failed++;
+                               goto no_buffers;
+                       }
+               }
+
+               if (!bi->page_dma) {
+                       /* use a half page if we're re-using */
+                       bi->page_offset ^= PAGE_SIZE / 2;
+                       bi->page_dma = dma_map_page(rx_ring->dev,
+                                                   bi->page,
+                                                   bi->page_offset,
+                                                   PAGE_SIZE / 2,
+                                                   DMA_FROM_DEVICE);
+                       if (dma_mapping_error(rx_ring->dev,
+                                             bi->page_dma)) {
+                               rx_ring->rx_stats.alloc_page_failed++;
+                               bi->page_dma = 0;
+                               goto no_buffers;
+                       }
+               }
+
+               dma_sync_single_range_for_device(rx_ring->dev,
+                                                bi->dma,
+                                                0,
+                                                rx_ring->rx_hdr_len,
+                                                DMA_FROM_DEVICE);
+               /* Refresh the desc even if buffer_addrs didn't change
+                * because each write-back erases this info.
+                */
+               rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma);
+               rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
+               i++;
+               if (i == rx_ring->count)
+                       i = 0;
+       }
+
+no_buffers:
+       if (rx_ring->next_to_use != i)
+               i40e_release_rx_desc(rx_ring, i);
+}
+
+/**
+ * i40e_alloc_rx_buffers_1buf - Replace used receive buffers; single buffer
+ * @rx_ring: ring to place buffers on
+ * @cleaned_count: number of buffers to replace
+ **/
+void i40e_alloc_rx_buffers_1buf(struct i40e_ring *rx_ring, u16 cleaned_count)
 {
        u16 i = rx_ring->next_to_use;
        union i40e_rx_desc *rx_desc;
@@ -1192,40 +1323,8 @@ void i40e_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count)
                        }
                }
 
-               if (ring_is_ps_enabled(rx_ring)) {
-                       if (!bi->page) {
-                               bi->page = alloc_page(GFP_ATOMIC);
-                               if (!bi->page) {
-                                       rx_ring->rx_stats.alloc_page_failed++;
-                                       goto no_buffers;
-                               }
-                       }
-
-                       if (!bi->page_dma) {
-                               /* use a half page if we're re-using */
-                               bi->page_offset ^= PAGE_SIZE / 2;
-                               bi->page_dma = dma_map_page(rx_ring->dev,
-                                                           bi->page,
-                                                           bi->page_offset,
-                                                           PAGE_SIZE / 2,
-                                                           DMA_FROM_DEVICE);
-                               if (dma_mapping_error(rx_ring->dev,
-                                                     bi->page_dma)) {
-                                       rx_ring->rx_stats.alloc_page_failed++;
-                                       bi->page_dma = 0;
-                                       goto no_buffers;
-                               }
-                       }
-
-                       /* Refresh the desc even if buffer_addrs didn't change
-                        * because each write-back erases this info.
-                        */
-                       rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma);
-                       rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
-               } else {
-                       rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
-                       rx_desc->read.hdr_addr = 0;
-               }
+               rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
+               rx_desc->read.hdr_addr = 0;
                i++;
                if (i == rx_ring->count)
                        i = 0;
@@ -1279,10 +1378,10 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
        struct iphdr *iph;
        __sum16 csum;
 
-       ipv4_tunnel = (rx_ptype > I40E_RX_PTYPE_GRENAT4_MAC_PAY3) &&
-                     (rx_ptype < I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4);
-       ipv6_tunnel = (rx_ptype > I40E_RX_PTYPE_GRENAT6_MAC_PAY3) &&
-                     (rx_ptype < I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4);
+       ipv4_tunnel = (rx_ptype >= I40E_RX_PTYPE_GRENAT4_MAC_PAY3) &&
+                    (rx_ptype <= I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4);
+       ipv6_tunnel = (rx_ptype >= I40E_RX_PTYPE_GRENAT6_MAC_PAY3) &&
+                    (rx_ptype <= I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4);
 
        skb->ip_summed = CHECKSUM_NONE;
 
@@ -1410,13 +1509,13 @@ static inline enum pkt_hash_types i40e_ptype_to_hash(u8 ptype)
 }
 
 /**
- * i40e_clean_rx_irq - Reclaim resources after receive completes
+ * i40e_clean_rx_irq_ps - Reclaim resources after receive; packet split
  * @rx_ring:  rx ring to clean
  * @budget:   how many cleans we're allowed
  *
  * Returns true if there's any budget left (e.g. the clean is finished)
  **/
-static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
+static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
 {
        unsigned int total_rx_bytes = 0, total_rx_packets = 0;
        u16 rx_packet_len, rx_header_len, rx_sph, rx_hbo;
@@ -1432,25 +1531,54 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
        if (budget <= 0)
                return 0;
 
-       rx_desc = I40E_RX_DESC(rx_ring, i);
-       qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
-       rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
-                   I40E_RXD_QW1_STATUS_SHIFT;
-
-       while (rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)) {
-               union i40e_rx_desc *next_rxd;
+       do {
                struct i40e_rx_buffer *rx_bi;
                struct sk_buff *skb;
                u16 vlan_tag;
+               /* return some buffers to hardware, one at a time is too slow */
+               if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
+                       i40e_alloc_rx_buffers_ps(rx_ring, cleaned_count);
+                       cleaned_count = 0;
+               }
+
+               i = rx_ring->next_to_clean;
+               rx_desc = I40E_RX_DESC(rx_ring, i);
+               qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
+               rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
+                       I40E_RXD_QW1_STATUS_SHIFT;
+
+               if (!(rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)))
+                       break;
+
+               /* This memory barrier is needed to keep us from reading
+                * any other fields out of the rx_desc until we know the
+                * DD bit is set.
+                */
+               rmb();
                if (i40e_rx_is_programming_status(qword)) {
                        i40e_clean_programming_status(rx_ring, rx_desc);
-                       I40E_RX_NEXT_DESC_PREFETCH(rx_ring, i, next_rxd);
-                       goto next_desc;
+                       I40E_RX_INCREMENT(rx_ring, i);
+                       continue;
                }
                rx_bi = &rx_ring->rx_bi[i];
                skb = rx_bi->skb;
-               prefetch(skb->data);
+               if (likely(!skb)) {
+                       skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
+                                                       rx_ring->rx_hdr_len);
+                       if (!skb) {
+                               rx_ring->rx_stats.alloc_buff_failed++;
+                               break;
+                       }
 
+                       /* initialize queue mapping */
+                       skb_record_rx_queue(skb, rx_ring->queue_index);
+                       /* we are reusing so sync this buffer for CPU use */
+                       dma_sync_single_range_for_cpu(rx_ring->dev,
+                                                     rx_bi->dma,
+                                                     0,
+                                                     rx_ring->rx_hdr_len,
+                                                     DMA_FROM_DEVICE);
+               }
                rx_packet_len = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
                                I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
                rx_header_len = (qword & I40E_RXD_QW1_LENGTH_HBUF_MASK) >>
@@ -1465,40 +1593,30 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
 
                rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
                           I40E_RXD_QW1_PTYPE_SHIFT;
+               prefetch(rx_bi->page);
                rx_bi->skb = NULL;
-
-               /* This memory barrier is needed to keep us from reading
-                * any other fields out of the rx_desc until we know the
-                * STATUS_DD bit is set
-                */
-               rmb();
-
-               /* Get the header and possibly the whole packet
-                * If this is an skb from previous receive dma will be 0
-                */
-               if (rx_bi->dma) {
-                       u16 len;
-
+               cleaned_count++;
+               if (rx_hbo || rx_sph) {
+                       int len;
                        if (rx_hbo)
                                len = I40E_RX_HDR_SIZE;
-                       else if (rx_sph)
-                               len = rx_header_len;
-                       else if (rx_packet_len)
-                               len = rx_packet_len;   /* 1buf/no split found */
                        else
-                               len = rx_header_len;   /* split always mode */
-
-                       skb_put(skb, len);
-                       dma_unmap_single(rx_ring->dev,
-                                        rx_bi->dma,
-                                        rx_ring->rx_buf_len,
-                                        DMA_FROM_DEVICE);
-                       rx_bi->dma = 0;
+                               len = rx_header_len;
+                       memcpy(__skb_put(skb, len), rx_bi->hdr_buf, len);
+               } else if (skb->len == 0) {
+                       int len;
+
+                       len = (rx_packet_len > skb_headlen(skb) ?
+                               skb_headlen(skb) : rx_packet_len);
+                       memcpy(__skb_put(skb, len),
+                              rx_bi->page + rx_bi->page_offset,
+                              len);
+                       rx_bi->page_offset += len;
+                       rx_packet_len -= len;
                }
 
                /* Get the rest of the data if this was a header split */
-               if (ring_is_ps_enabled(rx_ring) && rx_packet_len) {
-
+               if (rx_packet_len) {
                        skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
                                           rx_bi->page,
                                           rx_bi->page_offset,
@@ -1520,22 +1638,16 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
                                       DMA_FROM_DEVICE);
                        rx_bi->page_dma = 0;
                }
-               I40E_RX_NEXT_DESC_PREFETCH(rx_ring, i, next_rxd);
+               I40E_RX_INCREMENT(rx_ring, i);
 
                if (unlikely(
                    !(rx_status & (1 << I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
                        struct i40e_rx_buffer *next_buffer;
 
                        next_buffer = &rx_ring->rx_bi[i];
-
-                       if (ring_is_ps_enabled(rx_ring)) {
-                               rx_bi->skb = next_buffer->skb;
-                               rx_bi->dma = next_buffer->dma;
-                               next_buffer->skb = skb;
-                               next_buffer->dma = 0;
-                       }
+                       next_buffer->skb = skb;
                        rx_ring->rx_stats.non_eop_descs++;
-                       goto next_desc;
+                       continue;
                }
 
                /* ERR_MASK will only have valid bits if EOP set */
@@ -1544,7 +1656,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
                        /* TODO: shouldn't we increment a counter indicating the
                         * drop?
                         */
-                       goto next_desc;
+                       continue;
                }
 
                skb_set_hash(skb, i40e_rx_hash(rx_ring, rx_desc),
@@ -1570,33 +1682,149 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
 #ifdef I40E_FCOE
                if (!i40e_fcoe_handle_offload(rx_ring, rx_desc, skb)) {
                        dev_kfree_skb_any(skb);
-                       goto next_desc;
+                       continue;
                }
 #endif
+               skb_mark_napi_id(skb, &rx_ring->q_vector->napi);
                i40e_receive_skb(rx_ring, skb, vlan_tag);
 
                rx_ring->netdev->last_rx = jiffies;
-               budget--;
-next_desc:
                rx_desc->wb.qword1.status_error_len = 0;
-               if (!budget)
-                       break;
 
-               cleaned_count++;
+       } while (likely(total_rx_packets < budget));
+
+       u64_stats_update_begin(&rx_ring->syncp);
+       rx_ring->stats.packets += total_rx_packets;
+       rx_ring->stats.bytes += total_rx_bytes;
+       u64_stats_update_end(&rx_ring->syncp);
+       rx_ring->q_vector->rx.total_packets += total_rx_packets;
+       rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
+
+       return total_rx_packets;
+}
+
+/**
+ * i40e_clean_rx_irq_1buf - Reclaim resources after receive; single buffer
+ * @rx_ring:  rx ring to clean
+ * @budget:   how many cleans we're allowed
+ *
+ * Returns number of packets cleaned
+ **/
+static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
+{
+       unsigned int total_rx_bytes = 0, total_rx_packets = 0;
+       u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
+       struct i40e_vsi *vsi = rx_ring->vsi;
+       union i40e_rx_desc *rx_desc;
+       u32 rx_error, rx_status;
+       u16 rx_packet_len;
+       u8 rx_ptype;
+       u64 qword;
+       u16 i;
+
+       do {
+               struct i40e_rx_buffer *rx_bi;
+               struct sk_buff *skb;
+               u16 vlan_tag;
                /* return some buffers to hardware, one at a time is too slow */
                if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
-                       i40e_alloc_rx_buffers(rx_ring, cleaned_count);
+                       i40e_alloc_rx_buffers_1buf(rx_ring, cleaned_count);
                        cleaned_count = 0;
                }
 
-               /* use prefetched values */
-               rx_desc = next_rxd;
+               i = rx_ring->next_to_clean;
+               rx_desc = I40E_RX_DESC(rx_ring, i);
                qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
                rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
-                           I40E_RXD_QW1_STATUS_SHIFT;
-       }
+                       I40E_RXD_QW1_STATUS_SHIFT;
+
+               if (!(rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)))
+                       break;
+
+               /* This memory barrier is needed to keep us from reading
+                * any other fields out of the rx_desc until we know the
+                * DD bit is set.
+                */
+               rmb();
+
+               if (i40e_rx_is_programming_status(qword)) {
+                       i40e_clean_programming_status(rx_ring, rx_desc);
+                       I40E_RX_INCREMENT(rx_ring, i);
+                       continue;
+               }
+               rx_bi = &rx_ring->rx_bi[i];
+               skb = rx_bi->skb;
+               prefetch(skb->data);
+
+               rx_packet_len = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
+                               I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
+
+               rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
+                          I40E_RXD_QW1_ERROR_SHIFT;
+               rx_error &= ~(1 << I40E_RX_DESC_ERROR_HBO_SHIFT);
+
+               rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
+                          I40E_RXD_QW1_PTYPE_SHIFT;
+               rx_bi->skb = NULL;
+               cleaned_count++;
+
+               /* Get the header and possibly the whole packet
+                * If this is an skb from previous receive dma will be 0
+                */
+               skb_put(skb, rx_packet_len);
+               dma_unmap_single(rx_ring->dev, rx_bi->dma, rx_ring->rx_buf_len,
+                                DMA_FROM_DEVICE);
+               rx_bi->dma = 0;
+
+               I40E_RX_INCREMENT(rx_ring, i);
+
+               if (unlikely(
+                   !(rx_status & (1 << I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
+                       rx_ring->rx_stats.non_eop_descs++;
+                       continue;
+               }
+
+               /* ERR_MASK will only have valid bits if EOP set */
+               if (unlikely(rx_error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) {
+                       dev_kfree_skb_any(skb);
+                       /* TODO: shouldn't we increment a counter indicating the
+                        * drop?
+                        */
+                       continue;
+               }
+
+               skb_set_hash(skb, i40e_rx_hash(rx_ring, rx_desc),
+                            i40e_ptype_to_hash(rx_ptype));
+               if (unlikely(rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK)) {
+                       i40e_ptp_rx_hwtstamp(vsi->back, skb, (rx_status &
+                                          I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >>
+                                          I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT);
+                       rx_ring->last_rx_timestamp = jiffies;
+               }
+
+               /* probably a little skewed due to removing CRC */
+               total_rx_bytes += skb->len;
+               total_rx_packets++;
+
+               skb->protocol = eth_type_trans(skb, rx_ring->netdev);
+
+               i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype);
+
+               vlan_tag = rx_status & (1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
+                        ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1)
+                        : 0;
+#ifdef I40E_FCOE
+               if (!i40e_fcoe_handle_offload(rx_ring, rx_desc, skb)) {
+                       dev_kfree_skb_any(skb);
+                       continue;
+               }
+#endif
+               i40e_receive_skb(rx_ring, skb, vlan_tag);
+
+               rx_ring->netdev->last_rx = jiffies;
+               rx_desc->wb.qword1.status_error_len = 0;
+       } while (likely(total_rx_packets < budget));
 
-       rx_ring->next_to_clean = i;
        u64_stats_update_begin(&rx_ring->syncp);
        rx_ring->stats.packets += total_rx_packets;
        rx_ring->stats.bytes += total_rx_bytes;
@@ -1604,10 +1832,7 @@ next_desc:
        rx_ring->q_vector->rx.total_packets += total_rx_packets;
        rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
 
-       if (cleaned_count)
-               i40e_alloc_rx_buffers(rx_ring, cleaned_count);
-
-       return budget > 0;
+       return total_rx_packets;
 }
 
 /**
@@ -1628,6 +1853,7 @@ int i40e_napi_poll(struct napi_struct *napi, int budget)
        bool clean_complete = true;
        bool arm_wb = false;
        int budget_per_ring;
+       int cleaned;
 
        if (test_bit(__I40E_DOWN, &vsi->state)) {
                napi_complete(napi);
@@ -1647,8 +1873,14 @@ int i40e_napi_poll(struct napi_struct *napi, int budget)
         */
        budget_per_ring = max(budget/q_vector->num_ringpairs, 1);
 
-       i40e_for_each_ring(ring, q_vector->rx)
-               clean_complete &= i40e_clean_rx_irq(ring, budget_per_ring);
+       i40e_for_each_ring(ring, q_vector->rx) {
+               if (ring_is_ps_enabled(ring))
+                       cleaned = i40e_clean_rx_irq_ps(ring, budget_per_ring);
+               else
+                       cleaned = i40e_clean_rx_irq_1buf(ring, budget_per_ring);
+               /* if we didn't clean as many as budgeted, we must be done */
+               clean_complete &= (budget_per_ring != cleaned);
+       }
 
        /* If work not completed, return budget and polling will return */
        if (!clean_complete) {
@@ -1715,6 +1947,9 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
        if (!(pf->flags & I40E_FLAG_FD_ATR_ENABLED))
                return;
 
+       if ((pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED))
+               return;
+
        /* if sampling is disabled do nothing */
        if (!tx_ring->atr_sample_rate)
                return;
@@ -1822,6 +2057,19 @@ static int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
        __be16 protocol = skb->protocol;
        u32  tx_flags = 0;
 
+       if (protocol == htons(ETH_P_8021Q) &&
+           !(tx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) {
+               /* When HW VLAN acceleration is turned off by the user the
+                * stack sets the protocol to 8021q so that the driver
+                * can take any steps required to support the SW only
+                * VLAN handling.  In our case the driver doesn't need
+                * to take any further steps so just set the protocol
+                * to the encapsulated ethertype.
+                */
+               skb->protocol = vlan_get_protocol(skb);
+               goto out;
+       }
+
        /* if we have a HW VLAN tag being added, default to the HW one */
        if (skb_vlan_tag_present(skb)) {
                tx_flags |= skb_vlan_tag_get(skb) << I40E_TX_FLAGS_VLAN_SHIFT;
@@ -1838,6 +2086,9 @@ static int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
                tx_flags |= I40E_TX_FLAGS_SW_VLAN;
        }
 
+       if (!(tx_ring->vsi->back->flags & I40E_FLAG_DCB_ENABLED))
+               goto out;
+
        /* Insert 802.1p priority into VLAN header */
        if ((tx_flags & (I40E_TX_FLAGS_HW_VLAN | I40E_TX_FLAGS_SW_VLAN)) ||
            (skb->priority != TC_PRIO_CONTROL)) {
@@ -1858,6 +2109,8 @@ static int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
                        tx_flags |= I40E_TX_FLAGS_HW_VLAN;
                }
        }
+
+out:
        *flags = tx_flags;
        return 0;
 }
@@ -1982,8 +2235,16 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
        struct iphdr *this_ip_hdr;
        u32 network_hdr_len;
        u8 l4_hdr = 0;
+       u32 l4_tunnel = 0;
 
        if (skb->encapsulation) {
+               switch (ip_hdr(skb)->protocol) {
+               case IPPROTO_UDP:
+                       l4_tunnel = I40E_TXD_CTX_UDP_TUNNELING;
+                       break;
+               default:
+                       return;
+               }
                network_hdr_len = skb_inner_network_header_len(skb);
                this_ip_hdr = inner_ip_hdr(skb);
                this_ipv6_hdr = inner_ipv6_hdr(skb);
@@ -2006,8 +2267,8 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
 
                /* Now set the ctx descriptor fields */
                *cd_tunneling |= (skb_network_header_len(skb) >> 2) <<
-                                       I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT |
-                                  I40E_TXD_CTX_UDP_TUNNELING            |
+                                  I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT      |
+                                  l4_tunnel                             |
                                   ((skb_inner_network_offset(skb) -
                                        skb_transport_offset(skb)) >> 1) <<
                                   I40E_TXD_CTX_QW0_NATLEN_SHIFT;
index dff0baeb1ecc092e53ef22ea373be46e79df13a2..4b0b8102cdc39c2529f49c18d6b1cbc61c48c341 100644 (file)
@@ -96,6 +96,14 @@ enum i40e_dyn_idx_t {
 
 /* How many Rx Buffers do we bundle into one write to the hardware ? */
 #define I40E_RX_BUFFER_WRITE   16      /* Must be power of 2 */
+#define I40E_RX_INCREMENT(r, i) \
+       do {                                    \
+               (i)++;                          \
+               if ((i) == (r)->count)          \
+                       i = 0;                  \
+               r->next_to_clean = i;           \
+       } while (0)
+
 #define I40E_RX_NEXT_DESC(r, i, n)             \
        do {                                    \
                (i)++;                          \
@@ -152,6 +160,7 @@ struct i40e_tx_buffer {
 
 struct i40e_rx_buffer {
        struct sk_buff *skb;
+       void *hdr_buf;
        dma_addr_t dma;
        struct page *page;
        dma_addr_t page_dma;
@@ -224,8 +233,8 @@ struct i40e_ring {
        u16 rx_buf_len;
        u8  dtype;
 #define I40E_RX_DTYPE_NO_SPLIT      0
-#define I40E_RX_DTYPE_SPLIT_ALWAYS  1
-#define I40E_RX_DTYPE_HEADER_SPLIT  2
+#define I40E_RX_DTYPE_HEADER_SPLIT  1
+#define I40E_RX_DTYPE_SPLIT_ALWAYS  2
        u8  hsplit;
 #define I40E_RX_SPLIT_L2      0x1
 #define I40E_RX_SPLIT_IP      0x2
@@ -281,7 +290,9 @@ struct i40e_ring_container {
 #define i40e_for_each_ring(pos, head) \
        for (pos = (head).ring; pos != NULL; pos = pos->next)
 
-void i40e_alloc_rx_buffers(struct i40e_ring *rxr, u16 cleaned_count);
+void i40e_alloc_rx_buffers_ps(struct i40e_ring *rxr, u16 cleaned_count);
+void i40e_alloc_rx_buffers_1buf(struct i40e_ring *rxr, u16 cleaned_count);
+void i40e_alloc_rx_headers(struct i40e_ring *rxr);
 netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
 void i40e_clean_tx_ring(struct i40e_ring *tx_ring);
 void i40e_clean_rx_ring(struct i40e_ring *rx_ring);
index e9901ef06a6361f9e2a1f56004764eb51ed60513..67c7bc9e9c21d2ecc6a7e79bf500346f86c0e34d 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
  *
  * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2014 Intel Corporation.
+ * Copyright(c) 2013 - 2015 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -44,6 +44,7 @@
 #define I40E_DEV_ID_QSFP_B             0x1584
 #define I40E_DEV_ID_QSFP_C             0x1585
 #define I40E_DEV_ID_10G_BASE_T         0x1586
+#define I40E_DEV_ID_20G_KR2            0x1587
 #define I40E_DEV_ID_VF                 0x154C
 #define I40E_DEV_ID_VF_HV              0x1571
 
@@ -175,12 +176,12 @@ struct i40e_link_status {
        u8 an_info;
        u8 ext_info;
        u8 loopback;
-       bool an_enabled;
        /* is Link Status Event notification to SW enabled */
        bool lse_enable;
        u16 max_frame_size;
        bool crc_enable;
        u8 pacing;
+       u8 requested_speeds;
 };
 
 struct i40e_phy_info {
@@ -1143,7 +1144,7 @@ struct i40e_hw_port_stats {
 #define I40E_SR_EMP_MODULE_PTR                 0x0F
 #define I40E_SR_PBA_FLAGS                      0x15
 #define I40E_SR_PBA_BLOCK_PTR                  0x16
-#define I40E_SR_NVM_IMAGE_VERSION              0x18
+#define I40E_SR_NVM_DEV_STARTER_VERSION                0x18
 #define I40E_SR_NVM_WAKE_ON_LAN                        0x19
 #define I40E_SR_ALTERNATE_SAN_MAC_ADDRESS_PTR  0x27
 #define I40E_SR_NVM_EETRACK_LO                 0x2D
@@ -1401,6 +1402,19 @@ struct i40e_lldp_variables {
        u16 crc8;
 };
 
+/* Offsets into Alternate Ram */
+#define I40E_ALT_STRUCT_FIRST_PF_OFFSET                0   /* in dwords */
+#define I40E_ALT_STRUCT_DWORDS_PER_PF          64   /* in dwords */
+#define I40E_ALT_STRUCT_OUTER_VLAN_TAG_OFFSET  0xD  /* in dwords */
+#define I40E_ALT_STRUCT_USER_PRIORITY_OFFSET   0xC  /* in dwords */
+#define I40E_ALT_STRUCT_MIN_BW_OFFSET          0xE  /* in dwords */
+#define I40E_ALT_STRUCT_MAX_BW_OFFSET          0xF  /* in dwords */
+
+/* Alternate Ram Bandwidth Masks */
+#define I40E_ALT_BW_VALUE_MASK         0xFF
+#define I40E_ALT_BW_RELATIVE_MASK      0x40000000
+#define I40E_ALT_BW_VALID_MASK         0x80000000
+
 /* RSS Hash Table Size */
 #define I40E_PFQF_CTL_0_HASHLUTSIZE_512        0x00010000
 #endif /* _I40E_TYPE_H_ */
index 61dd1b18762476fd34312c2eafced083c2c86bfb..2d20af290fbf20bc9fc0dbdc88486fea61ec978f 100644 (file)
  * of the virtchnl_msg structure.
  */
 enum i40e_virtchnl_ops {
-/* VF sends req. to pf for the following
- * ops.
+/* The PF sends status change events to VFs using
+ * the I40E_VIRTCHNL_OP_EVENT opcode.
+ * VFs send requests to the PF using the other ops.
  */
        I40E_VIRTCHNL_OP_UNKNOWN = 0,
        I40E_VIRTCHNL_OP_VERSION = 1, /* must ALWAYS be 1 */
-       I40E_VIRTCHNL_OP_RESET_VF,
-       I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
-       I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE,
-       I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE,
-       I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
-       I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
-       I40E_VIRTCHNL_OP_ENABLE_QUEUES,
-       I40E_VIRTCHNL_OP_DISABLE_QUEUES,
-       I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
-       I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS,
-       I40E_VIRTCHNL_OP_ADD_VLAN,
-       I40E_VIRTCHNL_OP_DEL_VLAN,
-       I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
-       I40E_VIRTCHNL_OP_GET_STATS,
-       I40E_VIRTCHNL_OP_FCOE,
-       I40E_VIRTCHNL_OP_CONFIG_RSS,
-/* PF sends status change events to vfs using
- * the following op.
- */
-       I40E_VIRTCHNL_OP_EVENT,
+       I40E_VIRTCHNL_OP_RESET_VF = 2,
+       I40E_VIRTCHNL_OP_GET_VF_RESOURCES = 3,
+       I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE = 4,
+       I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE = 5,
+       I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES = 6,
+       I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP = 7,
+       I40E_VIRTCHNL_OP_ENABLE_QUEUES = 8,
+       I40E_VIRTCHNL_OP_DISABLE_QUEUES = 9,
+       I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS = 10,
+       I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS = 11,
+       I40E_VIRTCHNL_OP_ADD_VLAN = 12,
+       I40E_VIRTCHNL_OP_DEL_VLAN = 13,
+       I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE = 14,
+       I40E_VIRTCHNL_OP_GET_STATS = 15,
+       I40E_VIRTCHNL_OP_FCOE = 16,
+       I40E_VIRTCHNL_OP_EVENT = 17,
+       I40E_VIRTCHNL_OP_CONFIG_RSS = 18,
 };
 
 /* Virtual channel message descriptor. This overlays the admin queue
index 40f042af413172e57d805228e9cec92dfa935e45..4d69e1f04901553d4efe7a9e5357c1c8229235c8 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
  *
  * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2014 Intel Corporation.
+ * Copyright(c) 2013 - 2015 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -30,8 +30,8 @@
 
 /**
  * i40e_vc_disable_vf
- * @pf: pointer to the pf info
- * @vf: pointer to the vf info
+ * @pf: pointer to the PF info
+ * @vf: pointer to the VF info
  *
  * Disable the VF through a SW reset
  **/
@@ -48,38 +48,40 @@ static inline void i40e_vc_disable_vf(struct i40e_pf *pf, struct i40e_vf *vf)
 
 /**
  * i40e_vc_isvalid_vsi_id
- * @vf: pointer to the vf info
- * @vsi_id: vf relative vsi id
+ * @vf: pointer to the VF info
+ * @vsi_id: VF relative VSI id
  *
- * check for the valid vsi id
+ * check for the valid VSI id
  **/
-static inline bool i40e_vc_isvalid_vsi_id(struct i40e_vf *vf, u8 vsi_id)
+static inline bool i40e_vc_isvalid_vsi_id(struct i40e_vf *vf, u16 vsi_id)
 {
        struct i40e_pf *pf = vf->pf;
+       struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id);
 
-       return pf->vsi[vsi_id]->vf_id == vf->vf_id;
+       return (vsi && (vsi->vf_id == vf->vf_id));
 }
 
 /**
  * i40e_vc_isvalid_queue_id
- * @vf: pointer to the vf info
+ * @vf: pointer to the VF info
  * @vsi_id: vsi id
  * @qid: vsi relative queue id
  *
  * check for the valid queue id
  **/
-static inline bool i40e_vc_isvalid_queue_id(struct i40e_vf *vf, u8 vsi_id,
+static inline bool i40e_vc_isvalid_queue_id(struct i40e_vf *vf, u16 vsi_id,
                                            u8 qid)
 {
        struct i40e_pf *pf = vf->pf;
+       struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id);
 
-       return qid < pf->vsi[vsi_id]->alloc_queue_pairs;
+       return (vsi && (qid < vsi->alloc_queue_pairs));
 }
 
 /**
  * i40e_vc_isvalid_vector_id
- * @vf: pointer to the vf info
- * @vector_id: vf relative vector id
+ * @vf: pointer to the VF info
+ * @vector_id: VF relative vector id
  *
  * check for the valid vector id
  **/
@@ -94,19 +96,22 @@ static inline bool i40e_vc_isvalid_vector_id(struct i40e_vf *vf, u8 vector_id)
 
 /**
  * i40e_vc_get_pf_queue_id
- * @vf: pointer to the vf info
- * @vsi_idx: index of VSI in PF struct
+ * @vf: pointer to the VF info
+ * @vsi_id: id of VSI as provided by the FW
  * @vsi_queue_id: vsi relative queue id
  *
- * return pf relative queue id
+ * return PF relative queue id
  **/
-static u16 i40e_vc_get_pf_queue_id(struct i40e_vf *vf, u8 vsi_idx,
+static u16 i40e_vc_get_pf_queue_id(struct i40e_vf *vf, u16 vsi_id,
                                   u8 vsi_queue_id)
 {
        struct i40e_pf *pf = vf->pf;
-       struct i40e_vsi *vsi = pf->vsi[vsi_idx];
+       struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id);
        u16 pf_queue_id = I40E_QUEUE_END_OF_LIST;
 
+       if (!vsi)
+               return pf_queue_id;
+
        if (le16_to_cpu(vsi->info.mapping_flags) &
            I40E_AQ_VSI_QUE_MAP_NONCONTIG)
                pf_queue_id =
@@ -120,13 +125,13 @@ static u16 i40e_vc_get_pf_queue_id(struct i40e_vf *vf, u8 vsi_idx,
 
 /**
  * i40e_config_irq_link_list
- * @vf: pointer to the vf info
- * @vsi_idx: index of VSI in PF struct
+ * @vf: pointer to the VF info
+ * @vsi_id: id of VSI as given by the FW
  * @vecmap: irq map info
  *
  * configure irq link list from the map
  **/
-static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_idx,
+static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_id,
                                      struct i40e_virtchnl_vector_map *vecmap)
 {
        unsigned long linklistmap = 0, tempmap;
@@ -171,7 +176,7 @@ static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_idx,
                                 I40E_VIRTCHNL_SUPPORTED_QTYPES));
        vsi_queue_id = next_q/I40E_VIRTCHNL_SUPPORTED_QTYPES;
        qtype = next_q%I40E_VIRTCHNL_SUPPORTED_QTYPES;
-       pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_idx, vsi_queue_id);
+       pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id);
        reg = ((qtype << I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT) | pf_queue_id);
 
        wr32(hw, reg_idx, reg);
@@ -198,7 +203,7 @@ static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_idx,
                    (I40E_MAX_VSI_QP * I40E_VIRTCHNL_SUPPORTED_QTYPES)) {
                        vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES;
                        qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES;
-                       pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_idx,
+                       pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id,
                                                              vsi_queue_id);
                } else {
                        pf_queue_id = I40E_QUEUE_END_OF_LIST;
@@ -220,25 +225,27 @@ irq_list_done:
 
 /**
  * i40e_config_vsi_tx_queue
- * @vf: pointer to the vf info
- * @vsi_idx: index of VSI in PF struct
+ * @vf: pointer to the VF info
+ * @vsi_id: id of VSI as provided by the FW
  * @vsi_queue_id: vsi relative queue index
  * @info: config. info
  *
  * configure tx queue
  **/
-static int i40e_config_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_idx,
+static int i40e_config_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_id,
                                    u16 vsi_queue_id,
                                    struct i40e_virtchnl_txq_info *info)
 {
        struct i40e_pf *pf = vf->pf;
        struct i40e_hw *hw = &pf->hw;
        struct i40e_hmc_obj_txq tx_ctx;
+       struct i40e_vsi *vsi;
        u16 pf_queue_id;
        u32 qtx_ctl;
        int ret = 0;
 
-       pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_idx, vsi_queue_id);
+       pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id);
+       vsi = i40e_find_vsi_from_id(pf, vsi_id);
 
        /* clear the context structure first */
        memset(&tx_ctx, 0, sizeof(struct i40e_hmc_obj_txq));
@@ -246,7 +253,7 @@ static int i40e_config_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_idx,
        /* only set the required fields */
        tx_ctx.base = info->dma_ring_addr / 128;
        tx_ctx.qlen = info->ring_len;
-       tx_ctx.rdylist = le16_to_cpu(pf->vsi[vsi_idx]->info.qs_handle[0]);
+       tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[0]);
        tx_ctx.rdylist_act = 0;
        tx_ctx.head_wb_ena = info->headwb_enabled;
        tx_ctx.head_wb_addr = info->dma_headwb_addr;
@@ -287,14 +294,14 @@ error_context:
 
 /**
  * i40e_config_vsi_rx_queue
- * @vf: pointer to the vf info
- * @vsi_idx: index of VSI in PF struct
+ * @vf: pointer to the VF info
+ * @vsi_id: id of VSI  as provided by the FW
  * @vsi_queue_id: vsi relative queue index
  * @info: config. info
  *
  * configure rx queue
  **/
-static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_idx,
+static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_id,
                                    u16 vsi_queue_id,
                                    struct i40e_virtchnl_rxq_info *info)
 {
@@ -304,7 +311,7 @@ static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_idx,
        u16 pf_queue_id;
        int ret = 0;
 
-       pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_idx, vsi_queue_id);
+       pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id);
 
        /* clear the context structure first */
        memset(&rx_ctx, 0, sizeof(struct i40e_hmc_obj_rxq));
@@ -378,10 +385,10 @@ error_param:
 
 /**
  * i40e_alloc_vsi_res
- * @vf: pointer to the vf info
+ * @vf: pointer to the VF info
  * @type: type of VSI to allocate
  *
- * alloc vf vsi context & resources
+ * alloc VF vsi context & resources
  **/
 static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type)
 {
@@ -394,18 +401,15 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type)
 
        if (!vsi) {
                dev_err(&pf->pdev->dev,
-                       "add vsi failed for vf %d, aq_err %d\n",
+                       "add vsi failed for VF %d, aq_err %d\n",
                        vf->vf_id, pf->hw.aq.asq_last_status);
                ret = -ENOENT;
                goto error_alloc_vsi_res;
        }
        if (type == I40E_VSI_SRIOV) {
                u8 brdcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
-               vf->lan_vsi_index = vsi->idx;
+               vf->lan_vsi_idx = vsi->idx;
                vf->lan_vsi_id = vsi->id;
-               dev_info(&pf->pdev->dev,
-                        "VF %d assigned LAN VSI index %d, VSI id %d\n",
-                        vf->vf_id, vsi->idx, vsi->id);
                /* If the port VLAN has been configured and then the
                 * VF driver was removed then the VSI port VLAN
                 * configuration was destroyed.  Check if there is
@@ -446,9 +450,9 @@ error_alloc_vsi_res:
 
 /**
  * i40e_enable_vf_mappings
- * @vf: pointer to the vf info
+ * @vf: pointer to the VF info
  *
- * enable vf mappings
+ * enable VF mappings
  **/
 static void i40e_enable_vf_mappings(struct i40e_vf *vf)
 {
@@ -469,8 +473,8 @@ static void i40e_enable_vf_mappings(struct i40e_vf *vf)
        wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), reg);
 
        /* map PF queues to VF queues */
-       for (j = 0; j < pf->vsi[vf->lan_vsi_index]->alloc_queue_pairs; j++) {
-               u16 qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_index, j);
+       for (j = 0; j < pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs; j++) {
+               u16 qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_id, j);
                reg = (qid & I40E_VPLAN_QTABLE_QINDEX_MASK);
                wr32(hw, I40E_VPLAN_QTABLE(total_queue_pairs, vf->vf_id), reg);
                total_queue_pairs++;
@@ -478,13 +482,13 @@ static void i40e_enable_vf_mappings(struct i40e_vf *vf)
 
        /* map PF queues to VSI */
        for (j = 0; j < 7; j++) {
-               if (j * 2 >= pf->vsi[vf->lan_vsi_index]->alloc_queue_pairs) {
+               if (j * 2 >= pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs) {
                        reg = 0x07FF07FF;       /* unused */
                } else {
-                       u16 qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_index,
+                       u16 qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_id,
                                                          j * 2);
                        reg = qid;
-                       qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_index,
+                       qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_id,
                                                      (j * 2) + 1);
                        reg |= qid << 16;
                }
@@ -496,9 +500,9 @@ static void i40e_enable_vf_mappings(struct i40e_vf *vf)
 
 /**
  * i40e_disable_vf_mappings
- * @vf: pointer to the vf info
+ * @vf: pointer to the VF info
  *
- * disable vf mappings
+ * disable VF mappings
  **/
 static void i40e_disable_vf_mappings(struct i40e_vf *vf)
 {
@@ -516,9 +520,9 @@ static void i40e_disable_vf_mappings(struct i40e_vf *vf)
 
 /**
  * i40e_free_vf_res
- * @vf: pointer to the vf info
+ * @vf: pointer to the VF info
  *
- * free vf resources
+ * free VF resources
  **/
 static void i40e_free_vf_res(struct i40e_vf *vf)
 {
@@ -528,9 +532,9 @@ static void i40e_free_vf_res(struct i40e_vf *vf)
        int i, msix_vf;
 
        /* free vsi & disconnect it from the parent uplink */
-       if (vf->lan_vsi_index) {
-               i40e_vsi_release(pf->vsi[vf->lan_vsi_index]);
-               vf->lan_vsi_index = 0;
+       if (vf->lan_vsi_idx) {
+               i40e_vsi_release(pf->vsi[vf->lan_vsi_idx]);
+               vf->lan_vsi_idx = 0;
                vf->lan_vsi_id = 0;
        }
        msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
@@ -571,9 +575,9 @@ static void i40e_free_vf_res(struct i40e_vf *vf)
 
 /**
  * i40e_alloc_vf_res
- * @vf: pointer to the vf info
+ * @vf: pointer to the VF info
  *
- * allocate vf resources
+ * allocate VF resources
  **/
 static int i40e_alloc_vf_res(struct i40e_vf *vf)
 {
@@ -585,15 +589,15 @@ static int i40e_alloc_vf_res(struct i40e_vf *vf)
        ret = i40e_alloc_vsi_res(vf, I40E_VSI_SRIOV);
        if (ret)
                goto error_alloc;
-       total_queue_pairs += pf->vsi[vf->lan_vsi_index]->alloc_queue_pairs;
+       total_queue_pairs += pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs;
        set_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
 
        /* store the total qps number for the runtime
-        * vf req validation
+        * VF req validation
         */
        vf->num_queue_pairs = total_queue_pairs;
 
-       /* vf is now completely initialized */
+       /* VF is now completely initialized */
        set_bit(I40E_VF_STAT_INIT, &vf->vf_states);
 
 error_alloc:
@@ -607,7 +611,7 @@ error_alloc:
 #define VF_TRANS_PENDING_MASK 0x20
 /**
  * i40e_quiesce_vf_pci
- * @vf: pointer to the vf structure
+ * @vf: pointer to the VF structure
  *
  * Wait for VF PCI transactions to be cleared after reset. Returns -EIO
  * if the transactions never clear.
@@ -634,10 +638,10 @@ static int i40e_quiesce_vf_pci(struct i40e_vf *vf)
 
 /**
  * i40e_reset_vf
- * @vf: pointer to the vf structure
+ * @vf: pointer to the VF structure
  * @flr: VFLR was issued or not
  *
- * reset the vf
+ * reset the VF
  **/
 void i40e_reset_vf(struct i40e_vf *vf, bool flr)
 {
@@ -657,7 +661,7 @@ void i40e_reset_vf(struct i40e_vf *vf, bool flr)
         * just need to clean up, so don't hit the VFRTRIG register.
         */
        if (!flr) {
-               /* reset vf using VPGEN_VFRTRIG reg */
+               /* reset VF using VPGEN_VFRTRIG reg */
                reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id));
                reg |= I40E_VPGEN_VFRTRIG_VFSWR_MASK;
                wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
@@ -695,12 +699,12 @@ void i40e_reset_vf(struct i40e_vf *vf, bool flr)
        wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
 
        /* On initial reset, we won't have any queues */
-       if (vf->lan_vsi_index == 0)
+       if (vf->lan_vsi_idx == 0)
                goto complete_reset;
 
-       i40e_vsi_control_rings(pf->vsi[vf->lan_vsi_index], false);
+       i40e_vsi_control_rings(pf->vsi[vf->lan_vsi_idx], false);
 complete_reset:
-       /* reallocate vf resources to reset the VSI state */
+       /* reallocate VF resources to reset the VSI state */
        i40e_free_vf_res(vf);
        i40e_alloc_vf_res(vf);
        i40e_enable_vf_mappings(vf);
@@ -712,79 +716,11 @@ complete_reset:
        clear_bit(__I40E_VF_DISABLE, &pf->state);
 }
 
-/**
- * i40e_enable_pf_switch_lb
- * @pf: pointer to the pf structure
- *
- * enable switch loop back or die - no point in a return value
- **/
-void i40e_enable_pf_switch_lb(struct i40e_pf *pf)
-{
-       struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
-       struct i40e_vsi_context ctxt;
-       int aq_ret;
-
-       ctxt.seid = pf->main_vsi_seid;
-       ctxt.pf_num = pf->hw.pf_id;
-       ctxt.vf_num = 0;
-       aq_ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
-       if (aq_ret) {
-               dev_info(&pf->pdev->dev,
-                        "%s couldn't get pf vsi config, err %d, aq_err %d\n",
-                        __func__, aq_ret, pf->hw.aq.asq_last_status);
-               return;
-       }
-       ctxt.flags = I40E_AQ_VSI_TYPE_PF;
-       ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
-       ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
-
-       aq_ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
-       if (aq_ret) {
-               dev_info(&pf->pdev->dev,
-                        "%s: update vsi switch failed, aq_err=%d\n",
-                        __func__, vsi->back->hw.aq.asq_last_status);
-       }
-}
-
-/**
- * i40e_disable_pf_switch_lb
- * @pf: pointer to the pf structure
- *
- * disable switch loop back or die - no point in a return value
- **/
-static void i40e_disable_pf_switch_lb(struct i40e_pf *pf)
-{
-       struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
-       struct i40e_vsi_context ctxt;
-       int aq_ret;
-
-       ctxt.seid = pf->main_vsi_seid;
-       ctxt.pf_num = pf->hw.pf_id;
-       ctxt.vf_num = 0;
-       aq_ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
-       if (aq_ret) {
-               dev_info(&pf->pdev->dev,
-                        "%s couldn't get pf vsi config, err %d, aq_err %d\n",
-                        __func__, aq_ret, pf->hw.aq.asq_last_status);
-               return;
-       }
-       ctxt.flags = I40E_AQ_VSI_TYPE_PF;
-       ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
-       ctxt.info.switch_id &= ~cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
-
-       aq_ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
-       if (aq_ret) {
-               dev_info(&pf->pdev->dev,
-                        "%s: update vsi switch failed, aq_err=%d\n",
-                        __func__, vsi->back->hw.aq.asq_last_status);
-       }
-}
-
 /**
  * i40e_free_vfs
- * @pf: pointer to the pf structure
+ * @pf: pointer to the PF structure
  *
- * free vf resources
+ * free VF resources
  **/
 void i40e_free_vfs(struct i40e_pf *pf)
 {
@@ -803,10 +739,12 @@ void i40e_free_vfs(struct i40e_pf *pf)
         */
        if (!pci_vfs_assigned(pf->pdev))
                pci_disable_sriov(pf->pdev);
+       else
+               dev_warn(&pf->pdev->dev, "VFs are assigned - not disabling SR-IOV\n");
 
        msleep(20); /* let any messages in transit get finished up */
 
-       /* free up vf resources */
+       /* free up VF resources */
        tmp = pf->num_alloc_vfs;
        pf->num_alloc_vfs = 0;
        for (i = 0; i < tmp; i++) {
@@ -832,10 +770,6 @@ void i40e_free_vfs(struct i40e_pf *pf)
                        bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
                        wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), (1 << bit_idx));
                }
-               i40e_disable_pf_switch_lb(pf);
-       } else {
-               dev_warn(&pf->pdev->dev,
-                        "unable to disable SR-IOV because VFs are assigned.\n");
        }
        clear_bit(__I40E_VF_DISABLE, &pf->state);
 }
@@ -843,10 +777,10 @@ void i40e_free_vfs(struct i40e_pf *pf)
 #ifdef CONFIG_PCI_IOV
 /**
  * i40e_alloc_vfs
- * @pf: pointer to the pf structure
- * @num_alloc_vfs: number of vfs to allocate
+ * @pf: pointer to the PF structure
+ * @num_alloc_vfs: number of VFs to allocate
  *
- * allocate vf resources
+ * allocate VF resources
  **/
 int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs)
 {
@@ -883,15 +817,14 @@ int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs)
                /* assign default capabilities */
                set_bit(I40E_VIRTCHNL_VF_CAP_L2, &vfs[i].vf_caps);
                vfs[i].spoofchk = true;
-               /* vf resources get allocated during reset */
+               /* VF resources get allocated during reset */
                i40e_reset_vf(&vfs[i], false);
 
-               /* enable vf vplan_qtable mappings */
+               /* enable VF vplan_qtable mappings */
                i40e_enable_vf_mappings(&vfs[i]);
        }
        pf->num_alloc_vfs = num_alloc_vfs;
 
-       i40e_enable_pf_switch_lb(pf);
 err_alloc:
        if (ret)
                i40e_free_vfs(pf);
@@ -905,7 +838,7 @@ err_iov:
 /**
  * i40e_pci_sriov_enable
  * @pdev: pointer to a pci_dev structure
- * @num_vfs: number of vfs to allocate
+ * @num_vfs: number of VFs to allocate
  *
  * Enable or change the number of VFs
  **/
@@ -945,7 +878,7 @@ err_out:
 /**
  * i40e_pci_sriov_configure
  * @pdev: pointer to a pci_dev structure
- * @num_vfs: number of vfs to allocate
+ * @num_vfs: number of VFs to allocate
  *
  * Enable or change the number of VFs. Called when the user updates the number
  * of VFs in sysfs.
@@ -970,13 +903,13 @@ int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
 
 /**
  * i40e_vc_send_msg_to_vf
- * @vf: pointer to the vf info
+ * @vf: pointer to the VF info
  * @v_opcode: virtual channel opcode
  * @v_retval: virtual channel return value
  * @msg: pointer to the msg buffer
  * @msglen: msg length
  *
- * send msg to vf
+ * send msg to VF
  **/
 static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode,
                                  u32 v_retval, u8 *msg, u16 msglen)
@@ -1025,11 +958,11 @@ static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode,
 
 /**
  * i40e_vc_send_resp_to_vf
- * @vf: pointer to the vf info
+ * @vf: pointer to the VF info
  * @opcode: operation code
  * @retval: return value
  *
- * send resp msg to vf
+ * send resp msg to VF
  **/
 static int i40e_vc_send_resp_to_vf(struct i40e_vf *vf,
                                   enum i40e_virtchnl_ops opcode,
@@ -1040,9 +973,9 @@ static int i40e_vc_send_resp_to_vf(struct i40e_vf *vf,
 
 /**
  * i40e_vc_get_version_msg
- * @vf: pointer to the vf info
+ * @vf: pointer to the VF info
  *
- * called from the vf to request the API version used by the PF
+ * called from the VF to request the API version used by the PF
  **/
 static int i40e_vc_get_version_msg(struct i40e_vf *vf)
 {
@@ -1058,11 +991,11 @@ static int i40e_vc_get_version_msg(struct i40e_vf *vf)
 
 /**
  * i40e_vc_get_vf_resources_msg
- * @vf: pointer to the vf info
+ * @vf: pointer to the VF info
  * @msg: pointer to the msg buffer
  * @msglen: msg length
  *
- * called from the vf to request its resources
+ * called from the VF to request its resources
  **/
 static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf)
 {
@@ -1090,18 +1023,18 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf)
        }
 
        vfres->vf_offload_flags = I40E_VIRTCHNL_VF_OFFLOAD_L2;
-       vsi = pf->vsi[vf->lan_vsi_index];
+       vsi = pf->vsi[vf->lan_vsi_idx];
        if (!vsi->info.pvid)
                vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_VLAN;
 
        vfres->num_vsis = num_vsis;
        vfres->num_queue_pairs = vf->num_queue_pairs;
        vfres->max_vectors = pf->hw.func_caps.num_msix_vectors_vf;
-       if (vf->lan_vsi_index) {
-               vfres->vsi_res[i].vsi_id = vf->lan_vsi_index;
+       if (vf->lan_vsi_idx) {
+               vfres->vsi_res[i].vsi_id = vf->lan_vsi_id;
                vfres->vsi_res[i].vsi_type = I40E_VSI_SRIOV;
                vfres->vsi_res[i].num_queue_pairs =
-                   pf->vsi[vf->lan_vsi_index]->alloc_queue_pairs;
+                   pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs;
                memcpy(vfres->vsi_res[i].default_mac_addr,
                       vf->default_lan_addr.addr, ETH_ALEN);
                i++;
@@ -1109,7 +1042,7 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf)
        set_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states);
 
 err:
-       /* send the response back to the vf */
+       /* send the response back to the VF */
        ret = i40e_vc_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
                                     aq_ret, (u8 *)vfres, len);
 
@@ -1119,13 +1052,13 @@ err:
 
 /**
  * i40e_vc_reset_vf_msg
- * @vf: pointer to the vf info
+ * @vf: pointer to the VF info
  * @msg: pointer to the msg buffer
  * @msglen: msg length
  *
- * called from the vf to reset itself,
- * unlike other virtchnl messages, pf driver
- * doesn't send the response back to the vf
+ * called from the VF to reset itself,
+ * unlike other virtchnl messages, PF driver
+ * doesn't send the response back to the VF
  **/
 static void i40e_vc_reset_vf_msg(struct i40e_vf *vf)
 {
@@ -1135,12 +1068,12 @@ static void i40e_vc_reset_vf_msg(struct i40e_vf *vf)
 
 /**
  * i40e_vc_config_promiscuous_mode_msg
- * @vf: pointer to the vf info
+ * @vf: pointer to the VF info
  * @msg: pointer to the msg buffer
  * @msglen: msg length
  *
- * called from the vf to configure the promiscuous mode of
- * vf vsis
+ * called from the VF to configure the promiscuous mode of
+ * VF vsis
  **/
 static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf,
                                               u8 *msg, u16 msglen)
@@ -1153,21 +1086,21 @@ static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf,
        bool allmulti = false;
        i40e_status aq_ret;
 
+       vsi = i40e_find_vsi_from_id(pf, info->vsi_id);
        if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
            !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) ||
            !i40e_vc_isvalid_vsi_id(vf, info->vsi_id) ||
-           (pf->vsi[info->vsi_id]->type != I40E_VSI_FCOE)) {
+           (vsi->type != I40E_VSI_FCOE)) {
                aq_ret = I40E_ERR_PARAM;
                goto error_param;
        }
-       vsi = pf->vsi[info->vsi_id];
        if (info->flags & I40E_FLAG_VF_MULTICAST_PROMISC)
                allmulti = true;
        aq_ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
                                                       allmulti, NULL);
 
 error_param:
-       /* send the response to the vf */
+       /* send the response to the VF */
        return i40e_vc_send_resp_to_vf(vf,
                                       I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
                                       aq_ret);
@@ -1175,11 +1108,11 @@ error_param:
 
 /**
  * i40e_vc_config_queues_msg
- * @vf: pointer to the vf info
+ * @vf: pointer to the VF info
  * @msg: pointer to the msg buffer
  * @msglen: msg length
  *
- * called from the vf to configure the rx/tx
+ * called from the VF to configure the rx/tx
  * queues
  **/
 static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
@@ -1221,22 +1154,22 @@ static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
                        goto error_param;
                }
        }
-       /* set vsi num_queue_pairs in use to num configured by vf */
-       pf->vsi[vf->lan_vsi_index]->num_queue_pairs = qci->num_queue_pairs;
+       /* set vsi num_queue_pairs in use to num configured by VF */
+       pf->vsi[vf->lan_vsi_idx]->num_queue_pairs = qci->num_queue_pairs;
 
 error_param:
-       /* send the response to the vf */
+       /* send the response to the VF */
        return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
                                       aq_ret);
 }
 
 /**
  * i40e_vc_config_irq_map_msg
- * @vf: pointer to the vf info
+ * @vf: pointer to the VF info
  * @msg: pointer to the msg buffer
  * @msglen: msg length
  *
- * called from the vf to configure the irq to
+ * called from the VF to configure the irq to
  * queue map
  **/
 static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
@@ -1288,18 +1221,18 @@ static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
                i40e_config_irq_link_list(vf, vsi_id, map);
        }
 error_param:
-       /* send the response to the vf */
+       /* send the response to the VF */
        return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
                                       aq_ret);
 }
 
 /**
  * i40e_vc_enable_queues_msg
- * @vf: pointer to the vf info
+ * @vf: pointer to the VF info
  * @msg: pointer to the msg buffer
  * @msglen: msg length
  *
- * called from the vf to enable all or specific queue(s)
+ * called from the VF to enable all or specific queue(s)
  **/
 static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
 {
@@ -1323,21 +1256,22 @@ static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
                aq_ret = I40E_ERR_PARAM;
                goto error_param;
        }
-       if (i40e_vsi_control_rings(pf->vsi[vsi_id], true))
+
+       if (i40e_vsi_control_rings(pf->vsi[vf->lan_vsi_idx], true))
                aq_ret = I40E_ERR_TIMEOUT;
 error_param:
-       /* send the response to the vf */
+       /* send the response to the VF */
        return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
                                       aq_ret);
 }
 
 /**
  * i40e_vc_disable_queues_msg
- * @vf: pointer to the vf info
+ * @vf: pointer to the VF info
  * @msg: pointer to the msg buffer
  * @msglen: msg length
  *
- * called from the vf to disable all or specific
+ * called from the VF to disable all or specific
  * queue(s)
  **/
 static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
@@ -1345,7 +1279,6 @@ static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
        struct i40e_virtchnl_queue_select *vqs =
            (struct i40e_virtchnl_queue_select *)msg;
        struct i40e_pf *pf = vf->pf;
-       u16 vsi_id = vqs->vsi_id;
        i40e_status aq_ret = 0;
 
        if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) {
@@ -1362,22 +1295,23 @@ static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
                aq_ret = I40E_ERR_PARAM;
                goto error_param;
        }
-       if (i40e_vsi_control_rings(pf->vsi[vsi_id], false))
+
+       if (i40e_vsi_control_rings(pf->vsi[vf->lan_vsi_idx], false))
                aq_ret = I40E_ERR_TIMEOUT;
 
 error_param:
-       /* send the response to the vf */
+       /* send the response to the VF */
        return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
                                       aq_ret);
 }
 
 /**
  * i40e_vc_get_stats_msg
- * @vf: pointer to the vf info
+ * @vf: pointer to the VF info
  * @msg: pointer to the msg buffer
  * @msglen: msg length
  *
- * called from the vf to get vsi stats
+ * called from the VF to get vsi stats
  **/
 static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
 {
@@ -1400,7 +1334,7 @@ static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
                goto error_param;
        }
 
-       vsi = pf->vsi[vqs->vsi_id];
+       vsi = pf->vsi[vf->lan_vsi_idx];
        if (!vsi) {
                aq_ret = I40E_ERR_PARAM;
                goto error_param;
@@ -1409,14 +1343,14 @@ static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
        stats = vsi->eth_stats;
 
 error_param:
-       /* send the response back to the vf */
+       /* send the response back to the VF */
        return i40e_vc_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_GET_STATS, aq_ret,
                                      (u8 *)&stats, sizeof(stats));
 }
 
 /**
  * i40e_check_vf_permission
- * @vf: pointer to the vf info
+ * @vf: pointer to the VF info
  * @macaddr: pointer to the MAC Address being checked
  *
  * Check if the VF has permission to add or delete unicast MAC address
@@ -1450,7 +1384,7 @@ static inline int i40e_check_vf_permission(struct i40e_vf *vf, u8 *macaddr)
 
 /**
  * i40e_vc_add_mac_addr_msg
- * @vf: pointer to the vf info
+ * @vf: pointer to the VF info
  * @msg: pointer to the msg buffer
  * @msglen: msg length
  *
@@ -1478,7 +1412,7 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
                if (ret)
                        goto error_param;
        }
-       vsi = pf->vsi[vsi_id];
+       vsi = pf->vsi[vf->lan_vsi_idx];
 
        /* add new addresses to the list */
        for (i = 0; i < al->num_elements; i++) {
@@ -1507,14 +1441,14 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
                dev_err(&pf->pdev->dev, "Unable to program VF MAC filters\n");
 
 error_param:
-       /* send the response to the vf */
+       /* send the response to the VF */
        return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
                                       ret);
 }
 
 /**
  * i40e_vc_del_mac_addr_msg
- * @vf: pointer to the vf info
+ * @vf: pointer to the VF info
  * @msg: pointer to the msg buffer
  * @msglen: msg length
  *
@@ -1546,7 +1480,7 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
                        goto error_param;
                }
        }
-       vsi = pf->vsi[vsi_id];
+       vsi = pf->vsi[vf->lan_vsi_idx];
 
        /* delete addresses from the list */
        for (i = 0; i < al->num_elements; i++)
@@ -1558,14 +1492,14 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
                dev_err(&pf->pdev->dev, "Unable to program VF MAC filters\n");
 
 error_param:
-       /* send the response to the vf */
+       /* send the response to the VF */
        return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS,
                                       ret);
 }
 
 /**
  * i40e_vc_add_vlan_msg
- * @vf: pointer to the vf info
+ * @vf: pointer to the VF info
  * @msg: pointer to the msg buffer
  * @msglen: msg length
  *
@@ -1596,7 +1530,7 @@ static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
                        goto error_param;
                }
        }
-       vsi = pf->vsi[vsi_id];
+       vsi = pf->vsi[vf->lan_vsi_idx];
        if (vsi->info.pvid) {
                aq_ret = I40E_ERR_PARAM;
                goto error_param;
@@ -1613,13 +1547,13 @@ static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
        }
 
 error_param:
-       /* send the response to the vf */
+       /* send the response to the VF */
        return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_ADD_VLAN, aq_ret);
 }
 
 /**
  * i40e_vc_remove_vlan_msg
- * @vf: pointer to the vf info
+ * @vf: pointer to the VF info
  * @msg: pointer to the msg buffer
  * @msglen: msg length
  *
@@ -1649,7 +1583,7 @@ static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
                }
        }
 
-       vsi = pf->vsi[vsi_id];
+       vsi = pf->vsi[vf->lan_vsi_idx];
        if (vsi->info.pvid) {
                aq_ret = I40E_ERR_PARAM;
                goto error_param;
@@ -1664,13 +1598,13 @@ static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
        }
 
 error_param:
-       /* send the response to the vf */
+       /* send the response to the VF */
        return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_DEL_VLAN, aq_ret);
 }
 
 /**
  * i40e_vc_validate_vf_msg
- * @vf: pointer to the vf info
+ * @vf: pointer to the VF info
  * @msg: pointer to the msg buffer
  * @msglen: msg length
  * @msghndl: msg handle
@@ -1776,14 +1710,14 @@ static int i40e_vc_validate_vf_msg(struct i40e_vf *vf, u32 v_opcode,
 
 /**
  * i40e_vc_process_vf_msg
- * @pf: pointer to the pf structure
- * @vf_id: source vf id
+ * @pf: pointer to the PF structure
+ * @vf_id: source VF id
  * @msg: pointer to the msg buffer
  * @msglen: msg length
  * @msghndl: msg handle
  *
  * called from the common aeq/arq handler to
- * process request from vf
+ * process request from VF
  **/
 int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode,
                           u32 v_retval, u8 *msg, u16 msglen)
@@ -1801,7 +1735,7 @@ int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode,
        ret = i40e_vc_validate_vf_msg(vf, v_opcode, v_retval, msg, msglen);
 
        if (ret) {
-               dev_err(&pf->pdev->dev, "Invalid message from vf %d, opcode %d, len %d\n",
+               dev_err(&pf->pdev->dev, "Invalid message from VF %d, opcode %d, len %d\n",
                        local_vf_id, v_opcode, msglen);
                return ret;
        }
@@ -1849,7 +1783,7 @@ int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode,
                break;
        case I40E_VIRTCHNL_OP_UNKNOWN:
        default:
-               dev_err(&pf->pdev->dev, "Unsupported opcode %d from vf %d\n",
+               dev_err(&pf->pdev->dev, "Unsupported opcode %d from VF %d\n",
                        v_opcode, local_vf_id);
                ret = i40e_vc_send_resp_to_vf(vf, v_opcode,
                                              I40E_ERR_NOT_IMPLEMENTED);
@@ -1861,10 +1795,10 @@ int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode,
 
 /**
  * i40e_vc_process_vflr_event
- * @pf: pointer to the pf structure
+ * @pf: pointer to the PF structure
  *
  * called from the vlfr irq handler to
- * free up vf resources and state variables
+ * free up VF resources and state variables
  **/
 int i40e_vc_process_vflr_event(struct i40e_pf *pf)
 {
@@ -1885,7 +1819,7 @@ int i40e_vc_process_vflr_event(struct i40e_pf *pf)
        for (vf_id = 0; vf_id < pf->num_alloc_vfs; vf_id++) {
                reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
                bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
-               /* read GLGEN_VFLRSTAT register to find out the flr vfs */
+               /* read GLGEN_VFLRSTAT register to find out the flr VFs */
                vf = &pf->vf[vf_id];
                reg = rd32(hw, I40E_GLGEN_VFLRSTAT(reg_idx));
                if (reg & (1 << bit_idx)) {
@@ -1902,7 +1836,7 @@ int i40e_vc_process_vflr_event(struct i40e_pf *pf)
 
 /**
  * i40e_vc_vf_broadcast
- * @pf: pointer to the pf structure
+ * @pf: pointer to the PF structure
  * @opcode: operation code
  * @retval: return value
  * @msg: pointer to the msg buffer
@@ -1921,7 +1855,7 @@ static void i40e_vc_vf_broadcast(struct i40e_pf *pf,
 
        for (i = 0; i < pf->num_alloc_vfs; i++, vf++) {
                int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
-               /* Not all vfs are enabled so skip the ones that are not */
+               /* Not all VFs are enabled so skip the ones that are not */
                if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states) &&
                    !test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states))
                        continue;
@@ -1936,7 +1870,7 @@ static void i40e_vc_vf_broadcast(struct i40e_pf *pf,
 
 /**
  * i40e_vc_notify_link_state
- * @pf: pointer to the pf structure
+ * @pf: pointer to the PF structure
  *
  * send a link status message to all VFs on a given PF
  **/
@@ -1969,7 +1903,7 @@ void i40e_vc_notify_link_state(struct i40e_pf *pf)
 
 /**
  * i40e_vc_notify_reset
- * @pf: pointer to the pf structure
+ * @pf: pointer to the PF structure
  *
  * indicate a pending reset to all VFs on a given PF
  **/
@@ -1985,7 +1919,7 @@ void i40e_vc_notify_reset(struct i40e_pf *pf)
 
 /**
  * i40e_vc_notify_vf_reset
- * @vf: pointer to the vf structure
+ * @vf: pointer to the VF structure
  *
  * indicate a pending reset to the given VF
  **/
@@ -2015,10 +1949,10 @@ void i40e_vc_notify_vf_reset(struct i40e_vf *vf)
 /**
  * i40e_ndo_set_vf_mac
  * @netdev: network interface device structure
- * @vf_id: vf identifier
+ * @vf_id: VF identifier
  * @mac: mac address
  *
- * program vf mac address
+ * program VF mac address
  **/
 int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
 {
@@ -2038,7 +1972,7 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
        }
 
        vf = &(pf->vf[vf_id]);
-       vsi = pf->vsi[vf->lan_vsi_index];
+       vsi = pf->vsi[vf->lan_vsi_idx];
        if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) {
                dev_err(&pf->pdev->dev,
                        "Uninitialized VF %d\n", vf_id);
@@ -2083,11 +2017,11 @@ error_param:
 /**
  * i40e_ndo_set_vf_port_vlan
  * @netdev: network interface device structure
- * @vf_id: vf identifier
+ * @vf_id: VF identifier
  * @vlan_id: mac address
  * @qos: priority setting
  *
- * program vf vlan id and/or qos
+ * program VF vlan id and/or qos
  **/
 int i40e_ndo_set_vf_port_vlan(struct net_device *netdev,
                              int vf_id, u16 vlan_id, u8 qos)
@@ -2112,7 +2046,7 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev,
        }
 
        vf = &(pf->vf[vf_id]);
-       vsi = pf->vsi[vf->lan_vsi_index];
+       vsi = pf->vsi[vf->lan_vsi_idx];
        if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) {
                dev_err(&pf->pdev->dev, "Uninitialized VF %d\n", vf_id);
                ret = -EINVAL;
@@ -2196,10 +2130,10 @@ error_pvid:
 /**
  * i40e_ndo_set_vf_bw
  * @netdev: network interface device structure
- * @vf_id: vf identifier
- * @tx_rate: tx rate
+ * @vf_id: VF identifier
+ * @tx_rate: Tx rate
  *
- * configure vf tx rate
+ * configure VF Tx rate
  **/
 int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
                       int max_tx_rate)
@@ -2219,13 +2153,13 @@ int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
        }
 
        if (min_tx_rate) {
-               dev_err(&pf->pdev->dev, "Invalid min tx rate (%d) (greater than 0) specified for vf %d.\n",
+               dev_err(&pf->pdev->dev, "Invalid min tx rate (%d) (greater than 0) specified for VF %d.\n",
                        min_tx_rate, vf_id);
                return -EINVAL;
        }
 
        vf = &(pf->vf[vf_id]);
-       vsi = pf->vsi[vf->lan_vsi_index];
+       vsi = pf->vsi[vf->lan_vsi_idx];
        if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) {
                dev_err(&pf->pdev->dev, "Uninitialized VF %d.\n", vf_id);
                ret = -EINVAL;
@@ -2247,7 +2181,7 @@ int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
        }
 
        if (max_tx_rate > speed) {
-               dev_err(&pf->pdev->dev, "Invalid max tx rate %d specified for vf %d.",
+               dev_err(&pf->pdev->dev, "Invalid max tx rate %d specified for VF %d.",
                        max_tx_rate, vf->vf_id);
                ret = -EINVAL;
                goto error;
@@ -2276,10 +2210,10 @@ error:
 /**
  * i40e_ndo_get_vf_config
  * @netdev: network interface device structure
- * @vf_id: vf identifier
- * @ivi: vf configuration structure
+ * @vf_id: VF identifier
+ * @ivi: VF configuration structure
  *
- * return vf configuration
+ * return VF configuration
  **/
 int i40e_ndo_get_vf_config(struct net_device *netdev,
                           int vf_id, struct ifla_vf_info *ivi)
@@ -2299,7 +2233,7 @@ int i40e_ndo_get_vf_config(struct net_device *netdev,
 
        vf = &(pf->vf[vf_id]);
        /* first vsi is always the LAN vsi */
-       vsi = pf->vsi[vf->lan_vsi_index];
+       vsi = pf->vsi[vf->lan_vsi_idx];
        if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) {
                dev_err(&pf->pdev->dev, "Uninitialized VF %d\n", vf_id);
                ret = -EINVAL;
@@ -2331,7 +2265,7 @@ error_param:
 /**
  * i40e_ndo_set_vf_link_state
  * @netdev: network interface device structure
- * @vf_id: vf identifier
+ * @vf_id: VF identifier
  * @link: required link state
  *
  * Set the link state of a specified VF, regardless of physical link state
@@ -2394,7 +2328,7 @@ error_out:
 /**
  * i40e_ndo_set_vf_spoofchk
  * @netdev: network interface device structure
- * @vf_id: vf identifier
+ * @vf_id: VF identifier
  * @enable: flag to enable or disable feature
  *
  * Enable or disable VF spoof checking
@@ -2423,11 +2357,12 @@ int i40e_ndo_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool enable)
 
        vf->spoofchk = enable;
        memset(&ctxt, 0, sizeof(ctxt));
-       ctxt.seid = pf->vsi[vf->lan_vsi_index]->seid;
+       ctxt.seid = pf->vsi[vf->lan_vsi_idx]->seid;
        ctxt.pf_num = pf->hw.pf_id;
        ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
        if (enable)
-               ctxt.info.sec_flags |= I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK;
+               ctxt.info.sec_flags |= (I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK |
+                                       I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK);
        ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
        if (ret) {
                dev_err(&pf->pdev->dev, "Error %d updating VSI parameters\n",
index 9452f5247cffb7f03803aee6230b93be6873ccf9..09043c1aae5435109fcd2bab8b4059c52c105961 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
  *
  * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2014 Intel Corporation.
+ * Copyright(c) 2013 - 2015 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -71,12 +71,12 @@ enum i40e_vf_capabilities {
 struct i40e_vf {
        struct i40e_pf *pf;
 
-       /* vf id in the pf space */
+       /* VF id in the PF space */
        u16 vf_id;
-       /* all vf vsis connect to the same parent */
+       /* all VF vsis connect to the same parent */
        enum i40e_switch_element_types parent_type;
 
-       /* vf Port Extender (PE) stag if used */
+       /* VF Port Extender (PE) stag if used */
        u16 stag;
 
        struct i40e_virtchnl_ether_addr default_lan_addr;
@@ -88,10 +88,10 @@ struct i40e_vf {
         * When assigned, these will be non-zero, because VSI 0 is always
         * the main LAN VSI for the PF.
         */
-       u8 lan_vsi_index;       /* index into PF struct */
+       u8 lan_vsi_idx;         /* index into PF struct */
        u8 lan_vsi_id;          /* ID as used by firmware */
 
-       u8 num_queue_pairs;     /* num of qps assigned to vf vsis */
+       u8 num_queue_pairs;     /* num of qps assigned to VF vsis */
        u64 num_mdd_events;     /* num of mdd events detected */
        u64 num_invalid_msgs;   /* num of malformed or invalid msgs detected */
        u64 num_valid_msgs;     /* num of valid msgs detected */
@@ -100,7 +100,7 @@ struct i40e_vf {
        unsigned long vf_states;        /* vf's runtime states */
        unsigned int tx_rate;   /* Tx bandwidth limit in Mbps */
        bool link_forced;
-       bool link_up;           /* only valid if vf link is forced */
+       bool link_up;           /* only valid if VF link is forced */
        bool spoofchk;
 };
 
@@ -113,7 +113,7 @@ int i40e_vc_process_vflr_event(struct i40e_pf *pf);
 void i40e_reset_vf(struct i40e_vf *vf, bool flr);
 void i40e_vc_notify_vf_reset(struct i40e_vf *vf);
 
-/* vf configuration related iplink handlers */
+/* VF configuration related iplink handlers */
 int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac);
 int i40e_ndo_set_vf_port_vlan(struct net_device *netdev,
                              int vf_id, u16 vlan_id, u8 qos);
@@ -126,6 +126,5 @@ int i40e_ndo_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool enable);
 
 void i40e_vc_notify_link_state(struct i40e_pf *pf);
 void i40e_vc_notify_reset(struct i40e_pf *pf);
-void i40e_enable_pf_switch_lb(struct i40e_pf *pf);
 
 #endif /* _I40E_VIRTCHNL_PF_H_ */
index 60f04e96a80e0a440faacf2c620777992259426b..ef43d68f67b30632b504962f1fd627799f16dd7f 100644 (file)
@@ -93,6 +93,7 @@ struct i40e_adminq_info {
        u16 asq_buf_size;               /* send queue buffer size */
        u16 fw_maj_ver;                 /* firmware major version */
        u16 fw_min_ver;                 /* firmware minor version */
+       u32 fw_build;                   /* firmware build number */
        u16 api_maj_ver;                /* api major version */
        u16 api_min_ver;                /* api minor version */
        bool nvm_release_on_done;
index 28c40c57d4f520afc3179e36dbb5d2a99558aa68..39fcb1dc4ea64d80b3601a3d62ff1052b22907c6 100644 (file)
@@ -51,6 +51,7 @@ i40e_status i40e_set_mac_type(struct i40e_hw *hw)
                case I40E_DEV_ID_QSFP_B:
                case I40E_DEV_ID_QSFP_C:
                case I40E_DEV_ID_10G_BASE_T:
+               case I40E_DEV_ID_20G_KR2:
                        hw->mac.type = I40E_MAC_XL710;
                        break;
                case I40E_DEV_ID_VF:
@@ -85,46 +86,53 @@ void i40evf_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc,
 {
        struct i40e_aq_desc *aq_desc = (struct i40e_aq_desc *)desc;
        u16 len = le16_to_cpu(aq_desc->datalen);
-       u8 *aq_buffer = (u8 *)buffer;
-       u32 data[4];
-       u32 i = 0;
+       u8 *buf = (u8 *)buffer;
+       u16 i = 0;
 
        if ((!(mask & hw->debug_mask)) || (desc == NULL))
                return;
 
        i40e_debug(hw, mask,
                   "AQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n",
-                  aq_desc->opcode, aq_desc->flags, aq_desc->datalen,
-                  aq_desc->retval);
+                  le16_to_cpu(aq_desc->opcode),
+                  le16_to_cpu(aq_desc->flags),
+                  le16_to_cpu(aq_desc->datalen),
+                  le16_to_cpu(aq_desc->retval));
        i40e_debug(hw, mask, "\tcookie (h,l) 0x%08X 0x%08X\n",
-                  aq_desc->cookie_high, aq_desc->cookie_low);
+                  le32_to_cpu(aq_desc->cookie_high),
+                  le32_to_cpu(aq_desc->cookie_low));
        i40e_debug(hw, mask, "\tparam (0,1)  0x%08X 0x%08X\n",
-                  aq_desc->params.internal.param0,
-                  aq_desc->params.internal.param1);
+                  le32_to_cpu(aq_desc->params.internal.param0),
+                  le32_to_cpu(aq_desc->params.internal.param1));
        i40e_debug(hw, mask, "\taddr (h,l)   0x%08X 0x%08X\n",
-                  aq_desc->params.external.addr_high,
-                  aq_desc->params.external.addr_low);
+                  le32_to_cpu(aq_desc->params.external.addr_high),
+                  le32_to_cpu(aq_desc->params.external.addr_low));
 
        if ((buffer != NULL) && (aq_desc->datalen != 0)) {
-               memset(data, 0, sizeof(data));
                i40e_debug(hw, mask, "AQ CMD Buffer:\n");
                if (buf_len < len)
                        len = buf_len;
-               for (i = 0; i < len; i++) {
-                       data[((i % 16) / 4)] |=
-                               ((u32)aq_buffer[i]) << (8 * (i % 4));
-                       if ((i % 16) == 15) {
-                               i40e_debug(hw, mask,
-                                          "\t0x%04X  %08X %08X %08X %08X\n",
-                                          i - 15, data[0], data[1], data[2],
-                                          data[3]);
-                               memset(data, 0, sizeof(data));
-                       }
+               /* write the full 16-byte chunks */
+               for (i = 0; i < (len - 16); i += 16)
+                       i40e_debug(hw, mask,
+                                  "\t0x%04X  %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X\n",
+                                  i, buf[i], buf[i + 1], buf[i + 2],
+                                  buf[i + 3], buf[i + 4], buf[i + 5],
+                                  buf[i + 6], buf[i + 7], buf[i + 8],
+                                  buf[i + 9], buf[i + 10], buf[i + 11],
+                                  buf[i + 12], buf[i + 13], buf[i + 14],
+                                  buf[i + 15]);
+               /* write whatever's left over without overrunning the buffer */
+               if (i < len) {
+                       char d_buf[80];
+                       int j = 0;
+
+                       memset(d_buf, 0, sizeof(d_buf));
+                       j += sprintf(d_buf, "\t0x%04X ", i);
+                       while (i < len)
+                               j += sprintf(&d_buf[j], " %02X", buf[i++]);
+                       i40e_debug(hw, mask, "%s\n", d_buf);
                }
-               if ((i % 16) != 0)
-                       i40e_debug(hw, mask, "\t0x%04X  %08X %08X %08X %08X\n",
-                                  i - (i % 16), data[0], data[1], data[2],
-                                  data[3]);
        }
 }
 
@@ -535,7 +543,6 @@ struct i40e_rx_ptype_decoded i40evf_ptype_lookup[] = {
        I40E_PTT_UNUSED_ENTRY(255)
 };
 
-
 /**
  * i40e_aq_send_msg_to_pf
  * @hw: pointer to the hardware structure
index 9173834825ac4cc1bbe5c3ecbc85e77db693c76f..58e37a44b80a10233f00d004ee8fb9f5d496c12e 100644 (file)
@@ -59,8 +59,7 @@ void i40evf_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask,
 void i40e_idle_aq(struct i40e_hw *hw);
 void i40evf_resume_aq(struct i40e_hw *hw);
 bool i40evf_check_asq_alive(struct i40e_hw *hw);
-i40e_status i40evf_aq_queue_shutdown(struct i40e_hw *hw,
-                                            bool unloading);
+i40e_status i40evf_aq_queue_shutdown(struct i40e_hw *hw, bool unloading);
 
 i40e_status i40e_set_mac_type(struct i40e_hw *hw);
 
index c1f6a59bfea017cb9bcd064f238d5d08a415fb30..3cc737629bf74030c6b4b824eee5905706d37eac 100644 (file)
 #define I40E_PRTDCB_RUP2TC_UP6TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP6TC_SHIFT)
 #define I40E_PRTDCB_RUP2TC_UP7TC_SHIFT 21
 #define I40E_PRTDCB_RUP2TC_UP7TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP7TC_SHIFT)
+#define I40E_PRTDCB_RUPTQ(_i) (0x00122400 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
+#define I40E_PRTDCB_RUPTQ_MAX_INDEX 7
+#define I40E_PRTDCB_RUPTQ_RXQNUM_SHIFT 0
+#define I40E_PRTDCB_RUPTQ_RXQNUM_MASK I40E_MASK(0x3FFF, I40E_PRTDCB_RUPTQ_RXQNUM_SHIFT)
 #define I40E_PRTDCB_TC2PFC 0x001C0980 /* Reset: CORER */
 #define I40E_PRTDCB_TC2PFC_TC2PFC_SHIFT 0
 #define I40E_PRTDCB_TC2PFC_TC2PFC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TC2PFC_TC2PFC_SHIFT)
 #define I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_SHIFT)
 #define I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_SHIFT 20
 #define I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_MASK I40E_MASK(0x3F, I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_PRT_BIT_MAP_SHIFT 26
+#define I40E_GLGEN_GPIO_CTL_PRT_BIT_MAP_MASK I40E_MASK(0xF, I40E_GLGEN_GPIO_CTL_PRT_BIT_MAP_SHIFT)
 #define I40E_GLGEN_GPIO_SET 0x00088184 /* Reset: POR */
 #define I40E_GLGEN_GPIO_SET_GPIO_INDX_SHIFT 0
 #define I40E_GLGEN_GPIO_SET_GPIO_INDX_MASK I40E_MASK(0x1F, I40E_GLGEN_GPIO_SET_GPIO_INDX_SHIFT)
 #define I40E_GLGEN_MDIO_CTRL_CONTMDC_SHIFT 17
 #define I40E_GLGEN_MDIO_CTRL_CONTMDC_MASK I40E_MASK(0x1, I40E_GLGEN_MDIO_CTRL_CONTMDC_SHIFT)
 #define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_SHIFT 18
-#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_MASK I40E_MASK(0x3FFF, I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_SHIFT)
+#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_MASK I40E_MASK(0x7FF, I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_SHIFT)
+#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD0_SHIFT 29
+#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD0_MASK I40E_MASK(0x7, I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD0_SHIFT)
 #define I40E_GLGEN_MDIO_I2C_SEL(_i) (0x000881C0 + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */
 #define I40E_GLGEN_MDIO_I2C_SEL_MAX_INDEX 3
 #define I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_SHIFT 0
 #define I40E_GLGEN_RSTCTL_GRSTDEL_MASK I40E_MASK(0x3F, I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT)
 #define I40E_GLGEN_RSTCTL_ECC_RST_ENA_SHIFT 8
 #define I40E_GLGEN_RSTCTL_ECC_RST_ENA_MASK I40E_MASK(0x1, I40E_GLGEN_RSTCTL_ECC_RST_ENA_SHIFT)
-#define I40E_GLGEN_RSTENA_EMP 0x000B818C /* Reset: POR */
-#define I40E_GLGEN_RSTENA_EMP_EMP_RST_ENA_SHIFT 0
-#define I40E_GLGEN_RSTENA_EMP_EMP_RST_ENA_MASK I40E_MASK(0x1, I40E_GLGEN_RSTENA_EMP_EMP_RST_ENA_SHIFT)
 #define I40E_GLGEN_RTRIG 0x000B8190 /* Reset: CORER */
 #define I40E_GLGEN_RTRIG_CORER_SHIFT 0
 #define I40E_GLGEN_RTRIG_CORER_MASK I40E_MASK(0x1, I40E_GLGEN_RTRIG_CORER_SHIFT)
 #define I40E_PFINT_RATEN_INTERVAL_MASK I40E_MASK(0x3F, I40E_PFINT_RATEN_INTERVAL_SHIFT)
 #define I40E_PFINT_RATEN_INTRL_ENA_SHIFT 6
 #define I40E_PFINT_RATEN_INTRL_ENA_MASK I40E_MASK(0x1, I40E_PFINT_RATEN_INTRL_ENA_SHIFT)
-#define I40E_PFINT_STAT_CTL0 0x00038400 /* Reset: PFR */
+#define I40E_PFINT_STAT_CTL0 0x00038400 /* Reset: CORER */
 #define I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT 2
 #define I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT)
 #define I40E_QINT_RQCTL(_Q) (0x0003A000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */
 #define I40E_VFINT_ITRN_MAX_INDEX 2
 #define I40E_VFINT_ITRN_INTERVAL_SHIFT 0
 #define I40E_VFINT_ITRN_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_ITRN_INTERVAL_SHIFT)
-#define I40E_VFINT_STAT_CTL0(_VF) (0x0002A000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFINT_STAT_CTL0(_VF) (0x0002A000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
 #define I40E_VFINT_STAT_CTL0_MAX_INDEX 127
 #define I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT 2
 #define I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT)
 #define I40E_GLPCI_GSCN_0_3_MAX_INDEX 3
 #define I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_SHIFT 0
 #define I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_SHIFT)
-#define I40E_GLPCI_LATCT 0x0009C4B4 /* Reset: PCIR */
-#define I40E_GLPCI_LATCT_PCI_COUNT_LAT_CT_SHIFT 0
-#define I40E_GLPCI_LATCT_PCI_COUNT_LAT_CT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_LATCT_PCI_COUNT_LAT_CT_SHIFT)
 #define I40E_GLPCI_LBARCTRL 0x000BE484 /* Reset: POR */
 #define I40E_GLPCI_LBARCTRL_PREFBAR_SHIFT 0
 #define I40E_GLPCI_LBARCTRL_PREFBAR_MASK I40E_MASK(0x1, I40E_GLPCI_LBARCTRL_PREFBAR_SHIFT)
 #define I40E_GLPCI_VFSUP_VF_PREFETCH_MASK I40E_MASK(0x1, I40E_GLPCI_VFSUP_VF_PREFETCH_SHIFT)
 #define I40E_GLPCI_VFSUP_VR_BAR_TYPE_SHIFT 1
 #define I40E_GLPCI_VFSUP_VR_BAR_TYPE_MASK I40E_MASK(0x1, I40E_GLPCI_VFSUP_VR_BAR_TYPE_SHIFT)
+#define I40E_GLTPH_CTRL 0x000BE480 /* Reset: PCIR */
+#define I40E_GLTPH_CTRL_DESC_PH_SHIFT 9
+#define I40E_GLTPH_CTRL_DESC_PH_MASK I40E_MASK(0x3, I40E_GLTPH_CTRL_DESC_PH_SHIFT)
+#define I40E_GLTPH_CTRL_DATA_PH_SHIFT 11
+#define I40E_GLTPH_CTRL_DATA_PH_MASK I40E_MASK(0x3, I40E_GLTPH_CTRL_DATA_PH_SHIFT)
 #define I40E_PF_FUNC_RID 0x0009C000 /* Reset: PCIR */
 #define I40E_PF_FUNC_RID_FUNCTION_NUMBER_SHIFT 0
 #define I40E_PF_FUNC_RID_FUNCTION_NUMBER_MASK I40E_MASK(0x7, I40E_PF_FUNC_RID_FUNCTION_NUMBER_SHIFT)
 #define I40E_GL_RXERR2_L_FCOEDIXAC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_RXERR2_L_FCOEDIXAC_SHIFT)
 #define I40E_GLPRT_BPRCH(_i) (0x003005E4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_BPRCH_MAX_INDEX 3
-#define I40E_GLPRT_BPRCH_UPRCH_SHIFT 0
-#define I40E_GLPRT_BPRCH_UPRCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_BPRCH_UPRCH_SHIFT)
+#define I40E_GLPRT_BPRCH_BPRCH_SHIFT 0
+#define I40E_GLPRT_BPRCH_BPRCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_BPRCH_BPRCH_SHIFT)
 #define I40E_GLPRT_BPRCL(_i) (0x003005E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_BPRCL_MAX_INDEX 3
-#define I40E_GLPRT_BPRCL_UPRCH_SHIFT 0
-#define I40E_GLPRT_BPRCL_UPRCH_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_BPRCL_UPRCH_SHIFT)
+#define I40E_GLPRT_BPRCL_BPRCL_SHIFT 0
+#define I40E_GLPRT_BPRCL_BPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_BPRCL_BPRCL_SHIFT)
 #define I40E_GLPRT_BPTCH(_i) (0x00300A04 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_BPTCH_MAX_INDEX 3
-#define I40E_GLPRT_BPTCH_UPRCH_SHIFT 0
-#define I40E_GLPRT_BPTCH_UPRCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_BPTCH_UPRCH_SHIFT)
+#define I40E_GLPRT_BPTCH_BPTCH_SHIFT 0
+#define I40E_GLPRT_BPTCH_BPTCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_BPTCH_BPTCH_SHIFT)
 #define I40E_GLPRT_BPTCL(_i) (0x00300A00 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_BPTCL_MAX_INDEX 3
-#define I40E_GLPRT_BPTCL_UPRCH_SHIFT 0
-#define I40E_GLPRT_BPTCL_UPRCH_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_BPTCL_UPRCH_SHIFT)
+#define I40E_GLPRT_BPTCL_BPTCL_SHIFT 0
+#define I40E_GLPRT_BPTCL_BPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_BPTCL_BPTCL_SHIFT)
 #define I40E_GLPRT_CRCERRS(_i) (0x00300080 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_CRCERRS_MAX_INDEX 3
 #define I40E_GLPRT_CRCERRS_CRCERRS_SHIFT 0
 #define I40E_GLPRT_TDOLD_MAX_INDEX 3
 #define I40E_GLPRT_TDOLD_GLPRT_TDOLD_SHIFT 0
 #define I40E_GLPRT_TDOLD_GLPRT_TDOLD_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_TDOLD_GLPRT_TDOLD_SHIFT)
-#define I40E_GLPRT_TDPC(_i) (0x00375400 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_TDPC_MAX_INDEX 3
-#define I40E_GLPRT_TDPC_TDPC_SHIFT 0
-#define I40E_GLPRT_TDPC_TDPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_TDPC_TDPC_SHIFT)
 #define I40E_GLPRT_UPRCH(_i) (0x003005A4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_UPRCH_MAX_INDEX 3
 #define I40E_GLPRT_UPRCH_UPRCH_SHIFT 0
 #define I40E_PRTTSYN_TXTIME_L 0x001E41C0 /* Reset: GLOBR */
 #define I40E_PRTTSYN_TXTIME_L_TXTIEM_L_SHIFT 0
 #define I40E_PRTTSYN_TXTIME_L_TXTIEM_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TXTIME_L_TXTIEM_L_SHIFT)
-#define I40E_GLSCD_QUANTA 0x000B2080 /* Reset: CORER */
-#define I40E_GLSCD_QUANTA_TSCDQUANTA_SHIFT 0
-#define I40E_GLSCD_QUANTA_TSCDQUANTA_MASK I40E_MASK(0x7, I40E_GLSCD_QUANTA_TSCDQUANTA_SHIFT)
 #define I40E_GL_MDET_RX 0x0012A510 /* Reset: CORER */
 #define I40E_GL_MDET_RX_FUNCTION_SHIFT 0
 #define I40E_GL_MDET_RX_FUNCTION_MASK I40E_MASK(0xFF, I40E_GL_MDET_RX_FUNCTION_SHIFT)
 #define I40E_VFINT_ITRN1_MAX_INDEX 2
 #define I40E_VFINT_ITRN1_INTERVAL_SHIFT 0
 #define I40E_VFINT_ITRN1_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_ITRN1_INTERVAL_SHIFT)
-#define I40E_VFINT_STAT_CTL01 0x00005400 /* Reset: VFR */
+#define I40E_VFINT_STAT_CTL01 0x00005400 /* Reset: CORER */
 #define I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_SHIFT 2
 #define I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_SHIFT)
 #define I40E_QRX_TAIL1(_Q) (0x00002000 + ((_Q) * 4)) /* _i=0...15 */ /* Reset: CORER */
index 708891571dae328299e2b31e83cbf40b42726473..e2ddb30e96f594a210470b56621cf2ac3f19a0d0 100644 (file)
@@ -25,6 +25,7 @@
  ******************************************************************************/
 
 #include <linux/prefetch.h>
+#include <net/busy_poll.h>
 
 #include "i40evf.h"
 #include "i40e_prototype.h"
@@ -288,6 +289,8 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
                        tx_desc = I40E_TX_DESC(tx_ring, 0);
                }
 
+               prefetch(tx_desc);
+
                /* update budget accounting */
                budget--;
        } while (likely(budget));
@@ -368,6 +371,7 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
 static void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector)
 {
        u32 val = I40E_VFINT_DYN_CTLN_INTENA_MASK |
+                 I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK | /* set noitr */
                  I40E_VFINT_DYN_CTLN_SWINT_TRIG_MASK |
                  I40E_VFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK;
                  /* allow 00 to be written to the index */
@@ -529,6 +533,22 @@ void i40evf_clean_rx_ring(struct i40e_ring *rx_ring)
        if (!rx_ring->rx_bi)
                return;
 
+       if (ring_is_ps_enabled(rx_ring)) {
+               int bufsz = ALIGN(rx_ring->rx_hdr_len, 256) * rx_ring->count;
+
+               rx_bi = &rx_ring->rx_bi[0];
+               if (rx_bi->hdr_buf) {
+                       dma_free_coherent(dev,
+                                         bufsz,
+                                         rx_bi->hdr_buf,
+                                         rx_bi->dma);
+                       for (i = 0; i < rx_ring->count; i++) {
+                               rx_bi = &rx_ring->rx_bi[i];
+                               rx_bi->dma = 0;
+                               rx_bi->hdr_buf = NULL;
+                       }
+               }
+       }
        /* Free all the Rx ring sk_buffs */
        for (i = 0; i < rx_ring->count; i++) {
                rx_bi = &rx_ring->rx_bi[i];
@@ -586,6 +606,37 @@ void i40evf_free_rx_resources(struct i40e_ring *rx_ring)
        }
 }
 
+/**
+ * i40evf_alloc_rx_headers - allocate rx header buffers
+ * @rx_ring: ring to alloc buffers
+ *
+ * Allocate rx header buffers for the entire ring. As these are static,
+ * this is only called when setting up a new ring.
+ **/
+void i40evf_alloc_rx_headers(struct i40e_ring *rx_ring)
+{
+       struct device *dev = rx_ring->dev;
+       struct i40e_rx_buffer *rx_bi;
+       dma_addr_t dma;
+       void *buffer;
+       int buf_size;
+       int i;
+
+       if (rx_ring->rx_bi[0].hdr_buf)
+               return;
+       /* Make sure the buffers don't cross cache line boundaries. */
+       buf_size = ALIGN(rx_ring->rx_hdr_len, 256);
+       buffer = dma_alloc_coherent(dev, buf_size * rx_ring->count,
+                                   &dma, GFP_KERNEL);
+       if (!buffer)
+               return;
+       for (i = 0; i < rx_ring->count; i++) {
+               rx_bi = &rx_ring->rx_bi[i];
+               rx_bi->dma = dma + (i * buf_size);
+               rx_bi->hdr_buf = buffer + (i * buf_size);
+       }
+}
+
 /**
  * i40evf_setup_rx_descriptors - Allocate Rx descriptors
  * @rx_ring: Rx descriptor ring (for a specific queue) to setup
@@ -646,11 +697,76 @@ static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
 }
 
 /**
- * i40evf_alloc_rx_buffers - Replace used receive buffers; packet split
+ * i40evf_alloc_rx_buffers_ps - Replace used receive buffers; packet split
+ * @rx_ring: ring to place buffers on
+ * @cleaned_count: number of buffers to replace
+ **/
+void i40evf_alloc_rx_buffers_ps(struct i40e_ring *rx_ring, u16 cleaned_count)
+{
+       u16 i = rx_ring->next_to_use;
+       union i40e_rx_desc *rx_desc;
+       struct i40e_rx_buffer *bi;
+
+       /* do nothing if no valid netdev defined */
+       if (!rx_ring->netdev || !cleaned_count)
+               return;
+
+       while (cleaned_count--) {
+               rx_desc = I40E_RX_DESC(rx_ring, i);
+               bi = &rx_ring->rx_bi[i];
+
+               if (bi->skb) /* desc is in use */
+                       goto no_buffers;
+               if (!bi->page) {
+                       bi->page = alloc_page(GFP_ATOMIC);
+                       if (!bi->page) {
+                               rx_ring->rx_stats.alloc_page_failed++;
+                               goto no_buffers;
+                       }
+               }
+
+               if (!bi->page_dma) {
+                       /* use a half page if we're re-using */
+                       bi->page_offset ^= PAGE_SIZE / 2;
+                       bi->page_dma = dma_map_page(rx_ring->dev,
+                                                   bi->page,
+                                                   bi->page_offset,
+                                                   PAGE_SIZE / 2,
+                                                   DMA_FROM_DEVICE);
+                       if (dma_mapping_error(rx_ring->dev,
+                                             bi->page_dma)) {
+                               rx_ring->rx_stats.alloc_page_failed++;
+                               bi->page_dma = 0;
+                               goto no_buffers;
+                       }
+               }
+
+               dma_sync_single_range_for_device(rx_ring->dev,
+                                                bi->dma,
+                                                0,
+                                                rx_ring->rx_hdr_len,
+                                                DMA_FROM_DEVICE);
+               /* Refresh the desc even if buffer_addrs didn't change
+                * because each write-back erases this info.
+                */
+               rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma);
+               rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
+               i++;
+               if (i == rx_ring->count)
+                       i = 0;
+       }
+
+no_buffers:
+       if (rx_ring->next_to_use != i)
+               i40e_release_rx_desc(rx_ring, i);
+}
+
+/**
+ * i40evf_alloc_rx_buffers_1buf - Replace used receive buffers; single buffer
  * @rx_ring: ring to place buffers on
  * @cleaned_count: number of buffers to replace
  **/
-void i40evf_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count)
+void i40evf_alloc_rx_buffers_1buf(struct i40e_ring *rx_ring, u16 cleaned_count)
 {
        u16 i = rx_ring->next_to_use;
        union i40e_rx_desc *rx_desc;
@@ -690,40 +806,8 @@ void i40evf_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count)
                        }
                }
 
-               if (ring_is_ps_enabled(rx_ring)) {
-                       if (!bi->page) {
-                               bi->page = alloc_page(GFP_ATOMIC);
-                               if (!bi->page) {
-                                       rx_ring->rx_stats.alloc_page_failed++;
-                                       goto no_buffers;
-                               }
-                       }
-
-                       if (!bi->page_dma) {
-                               /* use a half page if we're re-using */
-                               bi->page_offset ^= PAGE_SIZE / 2;
-                               bi->page_dma = dma_map_page(rx_ring->dev,
-                                                           bi->page,
-                                                           bi->page_offset,
-                                                           PAGE_SIZE / 2,
-                                                           DMA_FROM_DEVICE);
-                               if (dma_mapping_error(rx_ring->dev,
-                                                     bi->page_dma)) {
-                                       rx_ring->rx_stats.alloc_page_failed++;
-                                       bi->page_dma = 0;
-                                       goto no_buffers;
-                               }
-                       }
-
-                       /* Refresh the desc even if buffer_addrs didn't change
-                        * because each write-back erases this info.
-                        */
-                       rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma);
-                       rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
-               } else {
-                       rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
-                       rx_desc->read.hdr_addr = 0;
-               }
+               rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
+               rx_desc->read.hdr_addr = 0;
                i++;
                if (i == rx_ring->count)
                        i = 0;
@@ -777,10 +861,10 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
        struct iphdr *iph;
        __sum16 csum;
 
-       ipv4_tunnel = (rx_ptype > I40E_RX_PTYPE_GRENAT4_MAC_PAY3) &&
-                     (rx_ptype < I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4);
-       ipv6_tunnel = (rx_ptype > I40E_RX_PTYPE_GRENAT6_MAC_PAY3) &&
-                     (rx_ptype < I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4);
+       ipv4_tunnel = (rx_ptype >= I40E_RX_PTYPE_GRENAT4_MAC_PAY3) &&
+                    (rx_ptype <= I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4);
+       ipv6_tunnel = (rx_ptype >= I40E_RX_PTYPE_GRENAT6_MAC_PAY3) &&
+                    (rx_ptype <= I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4);
 
        skb->ip_summed = CHECKSUM_NONE;
 
@@ -831,9 +915,7 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
         * so the total length of IPv4 header is IHL*4 bytes
         * The UDP_0 bit *may* bet set if the *inner* header is UDP
         */
-       if (ipv4_tunnel &&
-           (decoded.inner_prot != I40E_RX_PTYPE_INNER_PROT_UDP) &&
-           !(rx_status & (1 << I40E_RX_DESC_STATUS_UDP_0_SHIFT))) {
+       if (ipv4_tunnel) {
                skb->transport_header = skb->mac_header +
                                        sizeof(struct ethhdr) +
                                        (ip_hdr(skb)->ihl * 4);
@@ -843,15 +925,19 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
                                          skb->protocol == htons(ETH_P_8021AD))
                                          ? VLAN_HLEN : 0;
 
-               rx_udp_csum = udp_csum(skb);
-               iph = ip_hdr(skb);
-               csum = csum_tcpudp_magic(
-                               iph->saddr, iph->daddr,
-                               (skb->len - skb_transport_offset(skb)),
-                               IPPROTO_UDP, rx_udp_csum);
+               if ((ip_hdr(skb)->protocol == IPPROTO_UDP) &&
+                   (udp_hdr(skb)->check != 0)) {
+                       rx_udp_csum = udp_csum(skb);
+                       iph = ip_hdr(skb);
+                       csum = csum_tcpudp_magic(iph->saddr, iph->daddr,
+                                                (skb->len -
+                                                 skb_transport_offset(skb)),
+                                                IPPROTO_UDP, rx_udp_csum);
+
+                       if (udp_hdr(skb)->check != csum)
+                               goto checksum_fail;
 
-               if (udp_hdr(skb)->check != csum)
-                       goto checksum_fail;
+               } /* else its GRE and so no outer UDP header */
        }
 
        skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -906,13 +992,13 @@ static inline enum pkt_hash_types i40e_ptype_to_hash(u8 ptype)
 }
 
 /**
- * i40e_clean_rx_irq - Reclaim resources after receive completes
+ * i40e_clean_rx_irq_ps - Reclaim resources after receive; packet split
  * @rx_ring:  rx ring to clean
  * @budget:   how many cleans we're allowed
  *
  * Returns true if there's any budget left (e.g. the clean is finished)
  **/
-static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
+static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
 {
        unsigned int total_rx_bytes = 0, total_rx_packets = 0;
        u16 rx_packet_len, rx_header_len, rx_sph, rx_hbo;
@@ -925,20 +1011,49 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
        u8 rx_ptype;
        u64 qword;
 
-       rx_desc = I40E_RX_DESC(rx_ring, i);
-       qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
-       rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
-                   I40E_RXD_QW1_STATUS_SHIFT;
-
-       while (rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)) {
-               union i40e_rx_desc *next_rxd;
+       do {
                struct i40e_rx_buffer *rx_bi;
                struct sk_buff *skb;
                u16 vlan_tag;
+               /* return some buffers to hardware, one at a time is too slow */
+               if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
+                       i40evf_alloc_rx_buffers_ps(rx_ring, cleaned_count);
+                       cleaned_count = 0;
+               }
+
+               i = rx_ring->next_to_clean;
+               rx_desc = I40E_RX_DESC(rx_ring, i);
+               qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
+               rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
+                       I40E_RXD_QW1_STATUS_SHIFT;
+
+               if (!(rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)))
+                       break;
+
+               /* This memory barrier is needed to keep us from reading
+                * any other fields out of the rx_desc until we know the
+                * DD bit is set.
+                */
+               rmb();
                rx_bi = &rx_ring->rx_bi[i];
                skb = rx_bi->skb;
-               prefetch(skb->data);
+               if (likely(!skb)) {
+                       skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
+                                                       rx_ring->rx_hdr_len);
+                       if (!skb) {
+                               rx_ring->rx_stats.alloc_buff_failed++;
+                               break;
+                       }
 
+                       /* initialize queue mapping */
+                       skb_record_rx_queue(skb, rx_ring->queue_index);
+                       /* we are reusing so sync this buffer for CPU use */
+                       dma_sync_single_range_for_cpu(rx_ring->dev,
+                                                     rx_bi->dma,
+                                                     0,
+                                                     rx_ring->rx_hdr_len,
+                                                     DMA_FROM_DEVICE);
+               }
                rx_packet_len = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
                                I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
                rx_header_len = (qword & I40E_RXD_QW1_LENGTH_HBUF_MASK) >>
@@ -953,40 +1068,30 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
 
                rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
                           I40E_RXD_QW1_PTYPE_SHIFT;
+               prefetch(rx_bi->page);
                rx_bi->skb = NULL;
-
-               /* This memory barrier is needed to keep us from reading
-                * any other fields out of the rx_desc until we know the
-                * STATUS_DD bit is set
-                */
-               rmb();
-
-               /* Get the header and possibly the whole packet
-                * If this is an skb from previous receive dma will be 0
-                */
-               if (rx_bi->dma) {
-                       u16 len;
-
+               cleaned_count++;
+               if (rx_hbo || rx_sph) {
+                       int len;
                        if (rx_hbo)
                                len = I40E_RX_HDR_SIZE;
-                       else if (rx_sph)
-                               len = rx_header_len;
-                       else if (rx_packet_len)
-                               len = rx_packet_len;   /* 1buf/no split found */
                        else
-                               len = rx_header_len;   /* split always mode */
-
-                       skb_put(skb, len);
-                       dma_unmap_single(rx_ring->dev,
-                                        rx_bi->dma,
-                                        rx_ring->rx_buf_len,
-                                        DMA_FROM_DEVICE);
-                       rx_bi->dma = 0;
+                               len = rx_header_len;
+                       memcpy(__skb_put(skb, len), rx_bi->hdr_buf, len);
+               } else if (skb->len == 0) {
+                       int len;
+
+                       len = (rx_packet_len > skb_headlen(skb) ?
+                               skb_headlen(skb) : rx_packet_len);
+                       memcpy(__skb_put(skb, len),
+                              rx_bi->page + rx_bi->page_offset,
+                              len);
+                       rx_bi->page_offset += len;
+                       rx_packet_len -= len;
                }
 
                /* Get the rest of the data if this was a header split */
-               if (ring_is_ps_enabled(rx_ring) && rx_packet_len) {
-
+               if (rx_packet_len) {
                        skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
                                           rx_bi->page,
                                           rx_bi->page_offset,
@@ -1008,22 +1113,16 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
                                       DMA_FROM_DEVICE);
                        rx_bi->page_dma = 0;
                }
-               I40E_RX_NEXT_DESC_PREFETCH(rx_ring, i, next_rxd);
+               I40E_RX_INCREMENT(rx_ring, i);
 
                if (unlikely(
                    !(rx_status & (1 << I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
                        struct i40e_rx_buffer *next_buffer;
 
                        next_buffer = &rx_ring->rx_bi[i];
-
-                       if (ring_is_ps_enabled(rx_ring)) {
-                               rx_bi->skb = next_buffer->skb;
-                               rx_bi->dma = next_buffer->dma;
-                               next_buffer->skb = skb;
-                               next_buffer->dma = 0;
-                       }
+                       next_buffer->skb = skb;
                        rx_ring->rx_stats.non_eop_descs++;
-                       goto next_desc;
+                       continue;
                }
 
                /* ERR_MASK will only have valid bits if EOP set */
@@ -1032,7 +1131,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
                        /* TODO: shouldn't we increment a counter indicating the
                         * drop?
                         */
-                       goto next_desc;
+                       continue;
                }
 
                skb_set_hash(skb, i40e_rx_hash(rx_ring, rx_desc),
@@ -1048,30 +1147,134 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
                vlan_tag = rx_status & (1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
                         ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1)
                         : 0;
+#ifdef I40E_FCOE
+               if (!i40e_fcoe_handle_offload(rx_ring, rx_desc, skb)) {
+                       dev_kfree_skb_any(skb);
+                       continue;
+               }
+#endif
+               skb_mark_napi_id(skb, &rx_ring->q_vector->napi);
                i40e_receive_skb(rx_ring, skb, vlan_tag);
 
                rx_ring->netdev->last_rx = jiffies;
-               budget--;
-next_desc:
                rx_desc->wb.qword1.status_error_len = 0;
-               if (!budget)
-                       break;
 
-               cleaned_count++;
+       } while (likely(total_rx_packets < budget));
+
+       u64_stats_update_begin(&rx_ring->syncp);
+       rx_ring->stats.packets += total_rx_packets;
+       rx_ring->stats.bytes += total_rx_bytes;
+       u64_stats_update_end(&rx_ring->syncp);
+       rx_ring->q_vector->rx.total_packets += total_rx_packets;
+       rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
+
+       return total_rx_packets;
+}
+
+/**
+ * i40e_clean_rx_irq_1buf - Reclaim resources after receive; single buffer
+ * @rx_ring:  rx ring to clean
+ * @budget:   how many cleans we're allowed
+ *
+ * Returns number of packets cleaned
+ **/
+static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
+{
+       unsigned int total_rx_bytes = 0, total_rx_packets = 0;
+       u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
+       struct i40e_vsi *vsi = rx_ring->vsi;
+       union i40e_rx_desc *rx_desc;
+       u32 rx_error, rx_status;
+       u16 rx_packet_len;
+       u8 rx_ptype;
+       u64 qword;
+       u16 i;
+
+       do {
+               struct i40e_rx_buffer *rx_bi;
+               struct sk_buff *skb;
+               u16 vlan_tag;
                /* return some buffers to hardware, one at a time is too slow */
                if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
-                       i40evf_alloc_rx_buffers(rx_ring, cleaned_count);
+                       i40evf_alloc_rx_buffers_1buf(rx_ring, cleaned_count);
                        cleaned_count = 0;
                }
 
-               /* use prefetched values */
-               rx_desc = next_rxd;
+               i = rx_ring->next_to_clean;
+               rx_desc = I40E_RX_DESC(rx_ring, i);
                qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
                rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
-                           I40E_RXD_QW1_STATUS_SHIFT;
-       }
+                       I40E_RXD_QW1_STATUS_SHIFT;
+
+               if (!(rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)))
+                       break;
+
+               /* This memory barrier is needed to keep us from reading
+                * any other fields out of the rx_desc until we know the
+                * DD bit is set.
+                */
+               rmb();
+
+               rx_bi = &rx_ring->rx_bi[i];
+               skb = rx_bi->skb;
+               prefetch(skb->data);
+
+               rx_packet_len = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
+                               I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
+
+               rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
+                          I40E_RXD_QW1_ERROR_SHIFT;
+               rx_error &= ~(1 << I40E_RX_DESC_ERROR_HBO_SHIFT);
+
+               rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
+                          I40E_RXD_QW1_PTYPE_SHIFT;
+               rx_bi->skb = NULL;
+               cleaned_count++;
+
+               /* Get the header and possibly the whole packet
+                * If this is an skb from previous receive dma will be 0
+                */
+               skb_put(skb, rx_packet_len);
+               dma_unmap_single(rx_ring->dev, rx_bi->dma, rx_ring->rx_buf_len,
+                                DMA_FROM_DEVICE);
+               rx_bi->dma = 0;
+
+               I40E_RX_INCREMENT(rx_ring, i);
+
+               if (unlikely(
+                   !(rx_status & (1 << I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
+                       rx_ring->rx_stats.non_eop_descs++;
+                       continue;
+               }
+
+               /* ERR_MASK will only have valid bits if EOP set */
+               if (unlikely(rx_error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) {
+                       dev_kfree_skb_any(skb);
+                       /* TODO: shouldn't we increment a counter indicating the
+                        * drop?
+                        */
+                       continue;
+               }
+
+               skb_set_hash(skb, i40e_rx_hash(rx_ring, rx_desc),
+                            i40e_ptype_to_hash(rx_ptype));
+               /* probably a little skewed due to removing CRC */
+               total_rx_bytes += skb->len;
+               total_rx_packets++;
+
+               skb->protocol = eth_type_trans(skb, rx_ring->netdev);
+
+               i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype);
+
+               vlan_tag = rx_status & (1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
+                        ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1)
+                        : 0;
+               i40e_receive_skb(rx_ring, skb, vlan_tag);
+
+               rx_ring->netdev->last_rx = jiffies;
+               rx_desc->wb.qword1.status_error_len = 0;
+       } while (likely(total_rx_packets < budget));
 
-       rx_ring->next_to_clean = i;
        u64_stats_update_begin(&rx_ring->syncp);
        rx_ring->stats.packets += total_rx_packets;
        rx_ring->stats.bytes += total_rx_bytes;
@@ -1079,10 +1282,7 @@ next_desc:
        rx_ring->q_vector->rx.total_packets += total_rx_packets;
        rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
 
-       if (cleaned_count)
-               i40evf_alloc_rx_buffers(rx_ring, cleaned_count);
-
-       return budget > 0;
+       return total_rx_packets;
 }
 
 /**
@@ -1103,6 +1303,7 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget)
        bool clean_complete = true;
        bool arm_wb = false;
        int budget_per_ring;
+       int cleaned;
 
        if (test_bit(__I40E_DOWN, &vsi->state)) {
                napi_complete(napi);
@@ -1122,8 +1323,14 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget)
         */
        budget_per_ring = max(budget/q_vector->num_ringpairs, 1);
 
-       i40e_for_each_ring(ring, q_vector->rx)
-               clean_complete &= i40e_clean_rx_irq(ring, budget_per_ring);
+       i40e_for_each_ring(ring, q_vector->rx) {
+               if (ring_is_ps_enabled(ring))
+                       cleaned = i40e_clean_rx_irq_ps(ring, budget_per_ring);
+               else
+                       cleaned = i40e_clean_rx_irq_1buf(ring, budget_per_ring);
+               /* if we didn't clean as many as budgeted, we must be done */
+               clean_complete &= (budget_per_ring != cleaned);
+       }
 
        /* If work not completed, return budget and polling will return */
        if (!clean_complete) {
@@ -1163,6 +1370,19 @@ static int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
        __be16 protocol = skb->protocol;
        u32  tx_flags = 0;
 
+       if (protocol == htons(ETH_P_8021Q) &&
+           !(tx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) {
+               /* When HW VLAN acceleration is turned off by the user the
+                * stack sets the protocol to 8021q so that the driver
+                * can take any steps required to support the SW only
+                * VLAN handling.  In our case the driver doesn't need
+                * to take any further steps so just set the protocol
+                * to the encapsulated ethertype.
+                */
+               skb->protocol = vlan_get_protocol(skb);
+               goto out;
+       }
+
        /* if we have a HW VLAN tag being added, default to the HW one */
        if (skb_vlan_tag_present(skb)) {
                tx_flags |= skb_vlan_tag_get(skb) << I40E_TX_FLAGS_VLAN_SHIFT;
@@ -1179,6 +1399,7 @@ static int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
                tx_flags |= I40E_TX_FLAGS_SW_VLAN;
        }
 
+out:
        *flags = tx_flags;
        return 0;
 }
@@ -1262,8 +1483,16 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
        struct iphdr *this_ip_hdr;
        u32 network_hdr_len;
        u8 l4_hdr = 0;
+       u32 l4_tunnel = 0;
 
        if (skb->encapsulation) {
+               switch (ip_hdr(skb)->protocol) {
+               case IPPROTO_UDP:
+                       l4_tunnel = I40E_TXD_CTX_UDP_TUNNELING;
+                       break;
+               default:
+                       return;
+               }
                network_hdr_len = skb_inner_network_header_len(skb);
                this_ip_hdr = inner_ip_hdr(skb);
                this_ipv6_hdr = inner_ipv6_hdr(skb);
@@ -1286,8 +1515,8 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
 
                /* Now set the ctx descriptor fields */
                *cd_tunneling |= (skb_network_header_len(skb) >> 2) <<
-                                       I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT |
-                                  I40E_TXD_CTX_UDP_TUNNELING            |
+                                  I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT      |
+                                  l4_tunnel                             |
                                   ((skb_inner_network_offset(skb) -
                                        skb_transport_offset(skb)) >> 1) <<
                                   I40E_TXD_CTX_QW0_NATLEN_SHIFT;
index c950a038237c2c63dc66b9bf7be887556b80a5de..1e49bb1fbac1f0de59444626cc9645b72aeac0da 100644 (file)
@@ -96,6 +96,14 @@ enum i40e_dyn_idx_t {
 
 /* How many Rx Buffers do we bundle into one write to the hardware ? */
 #define I40E_RX_BUFFER_WRITE   16      /* Must be power of 2 */
+#define I40E_RX_INCREMENT(r, i) \
+       do {                                    \
+               (i)++;                          \
+               if ((i) == (r)->count)          \
+                       i = 0;                  \
+               r->next_to_clean = i;           \
+       } while (0)
+
 #define I40E_RX_NEXT_DESC(r, i, n)             \
        do {                                    \
                (i)++;                          \
@@ -151,6 +159,7 @@ struct i40e_tx_buffer {
 
 struct i40e_rx_buffer {
        struct sk_buff *skb;
+       void *hdr_buf;
        dma_addr_t dma;
        struct page *page;
        dma_addr_t page_dma;
@@ -223,8 +232,8 @@ struct i40e_ring {
        u16 rx_buf_len;
        u8  dtype;
 #define I40E_RX_DTYPE_NO_SPLIT      0
-#define I40E_RX_DTYPE_SPLIT_ALWAYS  1
-#define I40E_RX_DTYPE_HEADER_SPLIT  2
+#define I40E_RX_DTYPE_HEADER_SPLIT  1
+#define I40E_RX_DTYPE_SPLIT_ALWAYS  2
        u8  hsplit;
 #define I40E_RX_SPLIT_L2      0x1
 #define I40E_RX_SPLIT_IP      0x2
@@ -278,7 +287,9 @@ struct i40e_ring_container {
 #define i40e_for_each_ring(pos, head) \
        for (pos = (head).ring; pos != NULL; pos = pos->next)
 
-void i40evf_alloc_rx_buffers(struct i40e_ring *rxr, u16 cleaned_count);
+void i40evf_alloc_rx_buffers_ps(struct i40e_ring *rxr, u16 cleaned_count);
+void i40evf_alloc_rx_buffers_1buf(struct i40e_ring *rxr, u16 cleaned_count);
+void i40evf_alloc_rx_headers(struct i40e_ring *rxr);
 netdev_tx_t i40evf_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
 void i40evf_clean_tx_ring(struct i40e_ring *tx_ring);
 void i40evf_clean_rx_ring(struct i40e_ring *rx_ring);
index 3d0fdaab5cc8404e242b8ecfb0e12e12198f41cc..9c79cb6abb2b170955a9af27c73a42f4504f9c1a 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
  *
  * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
- * Copyright(c) 2013 - 2014 Intel Corporation.
+ * Copyright(c) 2013 - 2015 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -44,7 +44,8 @@
 #define I40E_DEV_ID_QSFP_B             0x1584
 #define I40E_DEV_ID_QSFP_C             0x1585
 #define I40E_DEV_ID_10G_BASE_T         0x1586
-#define I40E_DEV_ID_VF         0x154C
+#define I40E_DEV_ID_20G_KR2            0x1587
+#define I40E_DEV_ID_VF                 0x154C
 #define I40E_DEV_ID_VF_HV              0x1571
 
 #define i40e_is_40G_device(d)          ((d) == I40E_DEV_ID_QSFP_A  || \
@@ -175,12 +176,12 @@ struct i40e_link_status {
        u8 an_info;
        u8 ext_info;
        u8 loopback;
-       bool an_enabled;
        /* is Link Status Event notification to SW enabled */
        bool lse_enable;
        u16 max_frame_size;
        bool crc_enable;
        u8 pacing;
+       u8 requested_speeds;
 };
 
 struct i40e_phy_info {
@@ -1116,7 +1117,7 @@ struct i40e_hw_port_stats {
 /* Checksum and Shadow RAM pointers */
 #define I40E_SR_NVM_CONTROL_WORD               0x00
 #define I40E_SR_EMP_MODULE_PTR                 0x0F
-#define I40E_SR_NVM_IMAGE_VERSION              0x18
+#define I40E_SR_NVM_DEV_STARTER_VERSION                0x18
 #define I40E_SR_NVM_WAKE_ON_LAN                        0x19
 #define I40E_SR_ALTERNATE_SAN_MAC_ADDRESS_PTR  0x27
 #define I40E_SR_NVM_EETRACK_LO                 0x2D
index e0c8208138f4e45171bf47863b225e53dd994b27..59f62f0e65dd3ecaf230d2aa433452cc68bfa8d2 100644 (file)
  * of the virtchnl_msg structure.
  */
 enum i40e_virtchnl_ops {
-/* VF sends req. to pf for the following
- * ops.
+/* The PF sends status change events to VFs using
+ * the I40E_VIRTCHNL_OP_EVENT opcode.
+ * VFs send requests to the PF using the other ops.
  */
        I40E_VIRTCHNL_OP_UNKNOWN = 0,
        I40E_VIRTCHNL_OP_VERSION = 1, /* must ALWAYS be 1 */
-       I40E_VIRTCHNL_OP_RESET_VF,
-       I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
-       I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE,
-       I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE,
-       I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
-       I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
-       I40E_VIRTCHNL_OP_ENABLE_QUEUES,
-       I40E_VIRTCHNL_OP_DISABLE_QUEUES,
-       I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
-       I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS,
-       I40E_VIRTCHNL_OP_ADD_VLAN,
-       I40E_VIRTCHNL_OP_DEL_VLAN,
-       I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
-       I40E_VIRTCHNL_OP_GET_STATS,
-       I40E_VIRTCHNL_OP_FCOE,
-       I40E_VIRTCHNL_OP_CONFIG_RSS,
-/* PF sends status change events to vfs using
- * the following op.
- */
-       I40E_VIRTCHNL_OP_EVENT,
+       I40E_VIRTCHNL_OP_RESET_VF = 2,
+       I40E_VIRTCHNL_OP_GET_VF_RESOURCES = 3,
+       I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE = 4,
+       I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE = 5,
+       I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES = 6,
+       I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP = 7,
+       I40E_VIRTCHNL_OP_ENABLE_QUEUES = 8,
+       I40E_VIRTCHNL_OP_DISABLE_QUEUES = 9,
+       I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS = 10,
+       I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS = 11,
+       I40E_VIRTCHNL_OP_ADD_VLAN = 12,
+       I40E_VIRTCHNL_OP_DEL_VLAN = 13,
+       I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE = 14,
+       I40E_VIRTCHNL_OP_GET_STATS = 15,
+       I40E_VIRTCHNL_OP_FCOE = 16,
+       I40E_VIRTCHNL_OP_EVENT = 17,
+       I40E_VIRTCHNL_OP_CONFIG_RSS = 18,
 };
 
 /* Virtual channel message descriptor. This overlays the admin queue
index 981224743c73e472791136fea44bef6d02aff965..34c8565031f62e9a612b0c5c3d8a364a78fcf929 100644 (file)
@@ -272,6 +272,8 @@ void i40evf_update_stats(struct i40evf_adapter *adapter);
 void i40evf_reset_interrupt_capability(struct i40evf_adapter *adapter);
 int i40evf_init_interrupt_scheme(struct i40evf_adapter *adapter);
 void i40evf_irq_enable_queues(struct i40evf_adapter *adapter, u32 mask);
+void i40evf_free_all_tx_resources(struct i40evf_adapter *adapter);
+void i40evf_free_all_rx_resources(struct i40evf_adapter *adapter);
 
 void i40e_napi_add_all(struct i40evf_adapter *adapter);
 void i40e_napi_del_all(struct i40evf_adapter *adapter);
index 69b97bac182ce763eebb7b93fcb93fa6a55d0178..f4e77665bc54b9058c85c2c8e62add23fa49b9b8 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
  *
  * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
- * Copyright(c) 2013 - 2014 Intel Corporation.
+ * Copyright(c) 2013 - 2015 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -29,7 +29,6 @@
 
 #include <linux/uaccess.h>
 
-
 struct i40evf_stats {
        char stat_string[ETH_GSTRING_LEN];
        int stat_offset;
@@ -180,7 +179,7 @@ static u32 i40evf_get_msglevel(struct net_device *netdev)
 }
 
 /**
- * i40evf_get_msglevel - Set debug message level
+ * i40evf_set_msglevel - Set debug message level
  * @netdev: network interface device structure
  * @data: message level
  *
@@ -191,6 +190,8 @@ static void i40evf_set_msglevel(struct net_device *netdev, u32 data)
 {
        struct i40evf_adapter *adapter = netdev_priv(netdev);
 
+       if (I40E_DEBUG_USER & data)
+               adapter->hw.debug_mask = data;
        adapter->msg_enable = data;
 }
 
@@ -208,7 +209,7 @@ static void i40evf_get_drvinfo(struct net_device *netdev,
 
        strlcpy(drvinfo->driver, i40evf_driver_name, 32);
        strlcpy(drvinfo->version, i40evf_driver_version, 32);
-
+       strlcpy(drvinfo->fw_version, "N/A", 4);
        strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
 }
 
@@ -640,12 +641,14 @@ static int i40evf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
        if (!indir)
                return 0;
 
-       for (i = 0, j = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++) {
-               hlut_val = rd32(hw, I40E_VFQF_HLUT(i));
-               indir[j++] = hlut_val & 0xff;
-               indir[j++] = (hlut_val >> 8) & 0xff;
-               indir[j++] = (hlut_val >> 16) & 0xff;
-               indir[j++] = (hlut_val >> 24) & 0xff;
+       if (indir) {
+               for (i = 0, j = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++) {
+                       hlut_val = rd32(hw, I40E_VFQF_HLUT(i));
+                       indir[j++] = hlut_val & 0xff;
+                       indir[j++] = (hlut_val >> 8) & 0xff;
+                       indir[j++] = (hlut_val >> 16) & 0xff;
+                       indir[j++] = (hlut_val >> 24) & 0xff;
+               }
        }
        return 0;
 }
index 8d8c201c63c1cde7f798e657bcbfc15cd9f8858c..6d5f3b21c68a9939fa33f1c8ea7bd59948c22741 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
  *
  * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
- * Copyright(c) 2013 - 2014 Intel Corporation.
+ * Copyright(c) 2013 - 2015 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
 #include "i40e_prototype.h"
 static int i40evf_setup_all_tx_resources(struct i40evf_adapter *adapter);
 static int i40evf_setup_all_rx_resources(struct i40evf_adapter *adapter);
-static void i40evf_free_all_tx_resources(struct i40evf_adapter *adapter);
-static void i40evf_free_all_rx_resources(struct i40evf_adapter *adapter);
 static int i40evf_close(struct net_device *netdev);
 
 char i40evf_driver_name[] = "i40evf";
 static const char i40evf_driver_string[] =
        "Intel(R) XL710/X710 Virtual Function Network Driver";
 
-#define DRV_VERSION "1.2.0"
+#define DRV_VERSION "1.2.25"
 const char i40evf_driver_version[] = DRV_VERSION;
 static const char i40evf_copyright[] =
        "Copyright (c) 2013 - 2014 Intel Corporation.";
@@ -244,6 +242,7 @@ void i40evf_irq_enable_queues(struct i40evf_adapter *adapter, u32 mask)
                if (mask & (1 << (i - 1))) {
                        wr32(hw, I40E_VFINT_DYN_CTLN1(i - 1),
                             I40E_VFINT_DYN_CTLN1_INTENA_MASK |
+                            I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK |
                             I40E_VFINT_DYN_CTLN_CLEARPBA_MASK);
                }
        }
@@ -263,6 +262,7 @@ static void i40evf_fire_sw_int(struct i40evf_adapter *adapter, u32 mask)
        if (mask & 1) {
                dyn_ctl = rd32(hw, I40E_VFINT_DYN_CTL01);
                dyn_ctl |= I40E_VFINT_DYN_CTLN_SWINT_TRIG_MASK |
+                          I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK |
                           I40E_VFINT_DYN_CTLN_CLEARPBA_MASK;
                wr32(hw, I40E_VFINT_DYN_CTL01, dyn_ctl);
        }
@@ -270,6 +270,7 @@ static void i40evf_fire_sw_int(struct i40evf_adapter *adapter, u32 mask)
                if (mask & (1 << i)) {
                        dyn_ctl = rd32(hw, I40E_VFINT_DYN_CTLN1(i - 1));
                        dyn_ctl |= I40E_VFINT_DYN_CTLN_SWINT_TRIG_MASK |
+                                  I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK |
                                   I40E_VFINT_DYN_CTLN_CLEARPBA_MASK;
                        wr32(hw, I40E_VFINT_DYN_CTLN1(i - 1), dyn_ctl);
                }
@@ -524,7 +525,8 @@ static int i40evf_request_misc_irq(struct i40evf_adapter *adapter)
        int err;
 
        snprintf(adapter->misc_vector_name,
-                sizeof(adapter->misc_vector_name) - 1, "i40evf:mbx");
+                sizeof(adapter->misc_vector_name) - 1, "i40evf-%s:mbx",
+                dev_name(&adapter->pdev->dev));
        err = request_irq(adapter->msix_entries[0].vector,
                          &i40evf_msix_aq, 0,
                          adapter->misc_vector_name, netdev);
@@ -662,13 +664,21 @@ i40evf_vlan_filter *i40evf_find_vlan(struct i40evf_adapter *adapter, u16 vlan)
 static struct
 i40evf_vlan_filter *i40evf_add_vlan(struct i40evf_adapter *adapter, u16 vlan)
 {
-       struct i40evf_vlan_filter *f;
+       struct i40evf_vlan_filter *f = NULL;
+       int count = 50;
+
+       while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
+                               &adapter->crit_section)) {
+               udelay(1);
+               if (--count == 0)
+                       goto out;
+       }
 
        f = i40evf_find_vlan(adapter, vlan);
        if (!f) {
                f = kzalloc(sizeof(*f), GFP_ATOMIC);
                if (!f)
-                       return NULL;
+                       goto clearout;
 
                f->vlan = vlan;
 
@@ -678,6 +688,9 @@ i40evf_vlan_filter *i40evf_add_vlan(struct i40evf_adapter *adapter, u16 vlan)
                adapter->aq_required |= I40EVF_FLAG_AQ_ADD_VLAN_FILTER;
        }
 
+clearout:
+       clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
+out:
        return f;
 }
 
@@ -689,12 +702,21 @@ i40evf_vlan_filter *i40evf_add_vlan(struct i40evf_adapter *adapter, u16 vlan)
 static void i40evf_del_vlan(struct i40evf_adapter *adapter, u16 vlan)
 {
        struct i40evf_vlan_filter *f;
+       int count = 50;
+
+       while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
+                               &adapter->crit_section)) {
+               udelay(1);
+               if (--count == 0)
+                       return;
+       }
 
        f = i40evf_find_vlan(adapter, vlan);
        if (f) {
                f->remove = true;
                adapter->aq_required |= I40EVF_FLAG_AQ_DEL_VLAN_FILTER;
        }
+       clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
 }
 
 /**
@@ -761,13 +783,17 @@ i40evf_mac_filter *i40evf_add_filter(struct i40evf_adapter *adapter,
                                     u8 *macaddr)
 {
        struct i40evf_mac_filter *f;
+       int count = 50;
 
        if (!macaddr)
                return NULL;
 
        while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
-                               &adapter->crit_section))
+                               &adapter->crit_section)) {
                udelay(1);
+               if (--count == 0)
+                       return NULL;
+       }
 
        f = i40evf_find_filter(adapter, macaddr);
        if (!f) {
@@ -828,6 +854,7 @@ static void i40evf_set_rx_mode(struct net_device *netdev)
        struct i40evf_mac_filter *f, *ftmp;
        struct netdev_hw_addr *uca;
        struct netdev_hw_addr *mca;
+       int count = 50;
 
        /* add addr if not already in the filter list */
        netdev_for_each_uc_addr(uca, netdev) {
@@ -838,8 +865,14 @@ static void i40evf_set_rx_mode(struct net_device *netdev)
        }
 
        while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
-                               &adapter->crit_section))
+                               &adapter->crit_section)) {
                udelay(1);
+               if (--count == 0) {
+                       dev_err(&adapter->pdev->dev,
+                               "Failed to get lock in %s\n", __func__);
+                       return;
+               }
+       }
        /* remove filter if not in netdev list */
        list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
                bool found = false;
@@ -920,7 +953,7 @@ static void i40evf_configure(struct i40evf_adapter *adapter)
        for (i = 0; i < adapter->num_active_queues; i++) {
                struct i40e_ring *ring = adapter->rx_rings[i];
 
-               i40evf_alloc_rx_buffers(ring, ring->count);
+               i40evf_alloc_rx_buffers_1buf(ring, ring->count);
                ring->next_to_use = ring->count - 1;
                writel(ring->next_to_use, ring->tail);
        }
@@ -958,6 +991,9 @@ void i40evf_down(struct i40evf_adapter *adapter)
                                &adapter->crit_section))
                usleep_range(500, 1000);
 
+       netif_carrier_off(netdev);
+       netif_tx_disable(netdev);
+       i40evf_napi_disable_all(adapter);
        i40evf_irq_disable(adapter);
 
        /* remove all MAC filters */
@@ -981,15 +1017,7 @@ void i40evf_down(struct i40evf_adapter *adapter)
                adapter->aq_required |= I40EVF_FLAG_AQ_DEL_VLAN_FILTER;
                adapter->aq_required |= I40EVF_FLAG_AQ_DISABLE_QUEUES;
        }
-       netif_tx_disable(netdev);
-
-       netif_tx_stop_all_queues(netdev);
-
-       i40evf_napi_disable_all(adapter);
-
-       msleep(20);
 
-       netif_carrier_off(netdev);
        clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
 }
 
@@ -1344,6 +1372,11 @@ static void i40evf_watchdog_task(struct work_struct *work)
                goto watchdog_done;
        }
 
+       if (adapter->aq_required & I40EVF_FLAG_AQ_DISABLE_QUEUES) {
+               i40evf_disable_queues(adapter);
+               goto watchdog_done;
+       }
+
        if (adapter->aq_required & I40EVF_FLAG_AQ_MAP_VECTORS) {
                i40evf_map_queues(adapter);
                goto watchdog_done;
@@ -1369,11 +1402,6 @@ static void i40evf_watchdog_task(struct work_struct *work)
                goto watchdog_done;
        }
 
-       if (adapter->aq_required & I40EVF_FLAG_AQ_DISABLE_QUEUES) {
-               i40evf_disable_queues(adapter);
-               goto watchdog_done;
-       }
-
        if (adapter->aq_required & I40EVF_FLAG_AQ_CONFIGURE_QUEUES) {
                i40evf_configure_queues(adapter);
                goto watchdog_done;
@@ -1407,41 +1435,22 @@ restart_watchdog:
 }
 
 /**
- * next_queue - increment to next available tx queue
- * @adapter: board private structure
- * @j: queue counter
- *
- * Helper function for RSS programming to increment through available
- * queus. Returns the next queue value.
- **/
-static int next_queue(struct i40evf_adapter *adapter, int j)
-{
-       j += 1;
-
-       return j >= adapter->num_active_queues ? 0 : j;
-}
-
-/**
- * i40evf_configure_rss - Prepare for RSS if used
+ * i40evf_configure_rss - Prepare for RSS
  * @adapter: board private structure
  **/
 static void i40evf_configure_rss(struct i40evf_adapter *adapter)
 {
        u32 rss_key[I40E_VFQF_HKEY_MAX_INDEX + 1];
        struct i40e_hw *hw = &adapter->hw;
+       u32 cqueue = 0;
        u32 lut = 0;
        int i, j;
        u64 hena;
 
-       /* No RSS for single queue. */
-       if (adapter->num_active_queues == 1) {
-               wr32(hw, I40E_VFQF_HENA(0), 0);
-               wr32(hw, I40E_VFQF_HENA(1), 0);
-               return;
-       }
-
        /* Hash type is configured by the PF - we just supply the key */
        netdev_rss_key_fill(rss_key, sizeof(rss_key));
+
+       /* Fill out hash function seed */
        for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++)
                wr32(hw, I40E_VFQF_HKEY(i), rss_key[i]);
 
@@ -1451,16 +1460,14 @@ static void i40evf_configure_rss(struct i40evf_adapter *adapter)
        wr32(hw, I40E_VFQF_HENA(1), (u32)(hena >> 32));
 
        /* Populate the LUT with max no. of queues in round robin fashion */
-       j = adapter->num_active_queues;
        for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++) {
-               j = next_queue(adapter, j);
-               lut = j;
-               j = next_queue(adapter, j);
-               lut |= j << 8;
-               j = next_queue(adapter, j);
-               lut |= j << 16;
-               j = next_queue(adapter, j);
-               lut |= j << 24;
+               lut = 0;
+               for (j = 0; j < 4; j++) {
+                       if (cqueue == adapter->vsi_res->num_queue_pairs)
+                               cqueue = 0;
+                       lut |= ((cqueue) << (8 * j));
+                       cqueue++;
+               }
                wr32(hw, I40E_VFQF_HLUT(i), lut);
        }
        i40e_flush(hw);
@@ -1481,9 +1488,11 @@ static void i40evf_reset_task(struct work_struct *work)
        struct i40evf_adapter *adapter = container_of(work,
                                                      struct i40evf_adapter,
                                                      reset_task);
+       struct net_device *netdev = adapter->netdev;
        struct i40e_hw *hw = &adapter->hw;
-       int i = 0, err;
+       struct i40evf_mac_filter *f;
        uint32_t rstat_val;
+       int i = 0, err;
 
        while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
                                &adapter->crit_section))
@@ -1528,7 +1537,11 @@ static void i40evf_reset_task(struct work_struct *work)
 
                if (netif_running(adapter->netdev)) {
                        set_bit(__I40E_DOWN, &adapter->vsi.state);
-                       i40evf_down(adapter);
+                       i40evf_irq_disable(adapter);
+                       i40evf_napi_disable_all(adapter);
+                       netif_tx_disable(netdev);
+                       netif_tx_stop_all_queues(netdev);
+                       netif_carrier_off(netdev);
                        i40evf_free_traffic_irqs(adapter);
                        i40evf_free_all_tx_resources(adapter);
                        i40evf_free_all_rx_resources(adapter);
@@ -1560,22 +1573,38 @@ static void i40evf_reset_task(struct work_struct *work)
 continue_reset:
        adapter->flags &= ~I40EVF_FLAG_RESET_PENDING;
 
-       i40evf_down(adapter);
+       i40evf_irq_disable(adapter);
+
+       if (netif_running(adapter->netdev)) {
+               i40evf_napi_disable_all(adapter);
+               netif_tx_disable(netdev);
+               netif_tx_stop_all_queues(netdev);
+               netif_carrier_off(netdev);
+       }
+
        adapter->state = __I40EVF_RESETTING;
 
        /* kill and reinit the admin queue */
        if (i40evf_shutdown_adminq(hw))
-               dev_warn(&adapter->pdev->dev,
-                        "%s: Failed to destroy the Admin Queue resources\n",
-                        __func__);
+               dev_warn(&adapter->pdev->dev, "Failed to shut down adminq\n");
+       adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
        err = i40evf_init_adminq(hw);
        if (err)
-               dev_info(&adapter->pdev->dev, "%s: init_adminq failed: %d\n",
-                        __func__, err);
+               dev_info(&adapter->pdev->dev, "Failed to init adminq: %d\n",
+                        err);
 
-       adapter->aq_pending = 0;
-       adapter->aq_required = 0;
        i40evf_map_queues(adapter);
+
+       /* re-add all MAC filters */
+       list_for_each_entry(f, &adapter->mac_filter_list, list) {
+               f->add = true;
+       }
+       /* re-add all VLAN filters */
+       list_for_each_entry(f, &adapter->vlan_filter_list, list) {
+               f->add = true;
+       }
+       adapter->aq_required = I40EVF_FLAG_AQ_ADD_MAC_FILTER;
+       adapter->aq_required |= I40EVF_FLAG_AQ_ADD_VLAN_FILTER;
        clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
 
        mod_timer(&adapter->watchdog_timer, jiffies + 2);
@@ -1688,7 +1717,7 @@ out:
  *
  * Free all transmit software resources
  **/
-static void i40evf_free_all_tx_resources(struct i40evf_adapter *adapter)
+void i40evf_free_all_tx_resources(struct i40evf_adapter *adapter)
 {
        int i;
 
@@ -1758,7 +1787,7 @@ static int i40evf_setup_all_rx_resources(struct i40evf_adapter *adapter)
  *
  * Free all receive software resources
  **/
-static void i40evf_free_all_rx_resources(struct i40evf_adapter *adapter)
+void i40evf_free_all_rx_resources(struct i40evf_adapter *adapter)
 {
        int i;
 
@@ -1788,7 +1817,7 @@ static int i40evf_open(struct net_device *netdev)
                dev_err(&adapter->pdev->dev, "Unable to open device due to PF driver failure.\n");
                return -EIO;
        }
-       if (adapter->state != __I40EVF_DOWN)
+       if (adapter->state != __I40EVF_DOWN || adapter->aq_required)
                return -EBUSY;
 
        /* allocate transmit descriptors */
@@ -1852,9 +1881,6 @@ static int i40evf_close(struct net_device *netdev)
        adapter->state = __I40EVF_DOWN;
        i40evf_free_traffic_irqs(adapter);
 
-       i40evf_free_all_tx_resources(adapter);
-       i40evf_free_all_rx_resources(adapter);
-
        return 0;
 }
 
@@ -1977,7 +2003,7 @@ static int i40evf_check_reset_complete(struct i40e_hw *hw)
  *
  * This task completes the work that was begun in probe. Due to the nature
  * of VF-PF communications, we may need to wait tens of milliseconds to get
- * reponses back from the PF. Rather than busy-wait in probe and bog down the
+ * responses back from the PF. Rather than busy-wait in probe and bog down the
  * whole system, we'll do it in a task so we can sleep.
  * This task only runs during driver init. Once we've established
  * communications with the PF driver and set up our netdev, the watchdog
@@ -2368,7 +2394,7 @@ static int i40evf_suspend(struct pci_dev *pdev, pm_message_t state)
 }
 
 /**
- * i40evf_resume - Power managment resume routine
+ * i40evf_resume - Power management resume routine
  * @pdev: PCI device information struct
  *
  * Called when the system (VM) is resumed from sleep/suspend.
@@ -2468,6 +2494,8 @@ static void i40evf_remove(struct pci_dev *pdev)
        iounmap(hw->hw_addr);
        pci_release_regions(pdev);
 
+       i40evf_free_all_tx_resources(adapter);
+       i40evf_free_all_rx_resources(adapter);
        i40evf_free_queues(adapter);
        kfree(adapter->vf_res);
        /* If we got removed before an up/down sequence, we've got a filter
index 3f0c85ecbca68c997aeec7bd53f66bc7c1736d95..4240a496dc502193242be05561135d25b3598f66 100644 (file)
@@ -761,6 +761,8 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
                break;
        case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
                adapter->aq_pending &= ~(I40EVF_FLAG_AQ_DISABLE_QUEUES);
+               i40evf_free_all_tx_resources(adapter);
+               i40evf_free_all_rx_resources(adapter);
                break;
        case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
                adapter->aq_pending &= ~(I40EVF_FLAG_AQ_CONFIGURE_QUEUES);
index f366b3b96d03db4cd54351bad599d425520d4df7..8457d0306e3a76107c18ed524a3000d47b3ead6e 100644 (file)
@@ -1776,6 +1776,7 @@ void igb_down(struct igb_adapter *adapter)
        wr32(E1000_RCTL, rctl & ~E1000_RCTL_EN);
        /* flush and sleep below */
 
+       netif_carrier_off(netdev);
        netif_tx_stop_all_queues(netdev);
 
        /* disable transmits in the hardware */
@@ -1797,12 +1798,9 @@ void igb_down(struct igb_adapter *adapter)
                }
        }
 
-
        del_timer_sync(&adapter->watchdog_timer);
        del_timer_sync(&adapter->phy_info_timer);
 
-       netif_carrier_off(netdev);
-
        /* record the stats before reset*/
        spin_lock(&adapter->stats64_lock);
        igb_update_stats(adapter, &adapter->stats64);
@@ -2095,6 +2093,7 @@ static const struct net_device_ops igb_netdev_ops = {
 #endif
        .ndo_fix_features       = igb_fix_features,
        .ndo_set_features       = igb_set_features,
+       .ndo_features_check     = passthru_features_check,
 };
 
 /**
index d20fc8ed11f1574a2ae0fa4649be23fe204e1308..e3b9b63ad01083cb987429f57c9ebef84d86f4db 100644 (file)
@@ -30,7 +30,7 @@
  *
  * Neither the 82576 nor the 82580 offer registers wide enough to hold
  * nanoseconds time values for very long. For the 82580, SYSTIM always
- * counts nanoseconds, but the upper 24 bits are not availible. The
+ * counts nanoseconds, but the upper 24 bits are not available. The
  * frequency is adjusted by changing the 32 bit fractional nanoseconds
  * register, TIMINCA.
  *
@@ -116,7 +116,8 @@ static cycle_t igb_ptp_read_82580(const struct cyclecounter *cc)
 }
 
 /* SYSTIM read access for I210/I211 */
-static void igb_ptp_read_i210(struct igb_adapter *adapter, struct timespec *ts)
+static void igb_ptp_read_i210(struct igb_adapter *adapter,
+                             struct timespec64 *ts)
 {
        struct e1000_hw *hw = &adapter->hw;
        u32 sec, nsec;
@@ -134,7 +135,7 @@ static void igb_ptp_read_i210(struct igb_adapter *adapter, struct timespec *ts)
 }
 
 static void igb_ptp_write_i210(struct igb_adapter *adapter,
-                              const struct timespec *ts)
+                              const struct timespec64 *ts)
 {
        struct e1000_hw *hw = &adapter->hw;
 
@@ -269,13 +270,13 @@ static int igb_ptp_adjtime_i210(struct ptp_clock_info *ptp, s64 delta)
        struct igb_adapter *igb = container_of(ptp, struct igb_adapter,
                                               ptp_caps);
        unsigned long flags;
-       struct timespec now, then = ns_to_timespec(delta);
+       struct timespec64 now, then = ns_to_timespec64(delta);
 
        spin_lock_irqsave(&igb->tmreg_lock, flags);
 
        igb_ptp_read_i210(igb, &now);
-       now = timespec_add(now, then);
-       igb_ptp_write_i210(igb, (const struct timespec *)&now);
+       now = timespec64_add(now, then);
+       igb_ptp_write_i210(igb, (const struct timespec64 *)&now);
 
        spin_unlock_irqrestore(&igb->tmreg_lock, flags);
 
@@ -283,13 +284,12 @@ static int igb_ptp_adjtime_i210(struct ptp_clock_info *ptp, s64 delta)
 }
 
 static int igb_ptp_gettime_82576(struct ptp_clock_info *ptp,
-                                struct timespec *ts)
+                                struct timespec64 *ts)
 {
        struct igb_adapter *igb = container_of(ptp, struct igb_adapter,
                                               ptp_caps);
        unsigned long flags;
        u64 ns;
-       u32 remainder;
 
        spin_lock_irqsave(&igb->tmreg_lock, flags);
 
@@ -297,14 +297,13 @@ static int igb_ptp_gettime_82576(struct ptp_clock_info *ptp,
 
        spin_unlock_irqrestore(&igb->tmreg_lock, flags);
 
-       ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder);
-       ts->tv_nsec = remainder;
+       *ts = ns_to_timespec64(ns);
 
        return 0;
 }
 
 static int igb_ptp_gettime_i210(struct ptp_clock_info *ptp,
-                               struct timespec *ts)
+                               struct timespec64 *ts)
 {
        struct igb_adapter *igb = container_of(ptp, struct igb_adapter,
                                               ptp_caps);
@@ -320,15 +319,14 @@ static int igb_ptp_gettime_i210(struct ptp_clock_info *ptp,
 }
 
 static int igb_ptp_settime_82576(struct ptp_clock_info *ptp,
-                                const struct timespec *ts)
+                                const struct timespec64 *ts)
 {
        struct igb_adapter *igb = container_of(ptp, struct igb_adapter,
                                               ptp_caps);
        unsigned long flags;
        u64 ns;
 
-       ns = ts->tv_sec * 1000000000ULL;
-       ns += ts->tv_nsec;
+       ns = timespec64_to_ns(ts);
 
        spin_lock_irqsave(&igb->tmreg_lock, flags);
 
@@ -340,7 +338,7 @@ static int igb_ptp_settime_82576(struct ptp_clock_info *ptp,
 }
 
 static int igb_ptp_settime_i210(struct ptp_clock_info *ptp,
-                               const struct timespec *ts)
+                               const struct timespec64 *ts)
 {
        struct igb_adapter *igb = container_of(ptp, struct igb_adapter,
                                               ptp_caps);
@@ -358,7 +356,7 @@ static int igb_ptp_settime_i210(struct ptp_clock_info *ptp,
 static void igb_pin_direction(int pin, int input, u32 *ctrl, u32 *ctrl_ext)
 {
        u32 *ptr = pin < 2 ? ctrl : ctrl_ext;
-       u32 mask[IGB_N_SDP] = {
+       static const u32 mask[IGB_N_SDP] = {
                E1000_CTRL_SDP0_DIR,
                E1000_CTRL_SDP1_DIR,
                E1000_CTRL_EXT_SDP2_DIR,
@@ -373,16 +371,16 @@ static void igb_pin_direction(int pin, int input, u32 *ctrl, u32 *ctrl_ext)
 
 static void igb_pin_extts(struct igb_adapter *igb, int chan, int pin)
 {
-       struct e1000_hw *hw = &igb->hw;
-       u32 aux0_sel_sdp[IGB_N_SDP] = {
+       static const u32 aux0_sel_sdp[IGB_N_SDP] = {
                AUX0_SEL_SDP0, AUX0_SEL_SDP1, AUX0_SEL_SDP2, AUX0_SEL_SDP3,
        };
-       u32 aux1_sel_sdp[IGB_N_SDP] = {
+       static const u32 aux1_sel_sdp[IGB_N_SDP] = {
                AUX1_SEL_SDP0, AUX1_SEL_SDP1, AUX1_SEL_SDP2, AUX1_SEL_SDP3,
        };
-       u32 ts_sdp_en[IGB_N_SDP] = {
+       static const u32 ts_sdp_en[IGB_N_SDP] = {
                TS_SDP0_EN, TS_SDP1_EN, TS_SDP2_EN, TS_SDP3_EN,
        };
+       struct e1000_hw *hw = &igb->hw;
        u32 ctrl, ctrl_ext, tssdp = 0;
 
        ctrl = rd32(E1000_CTRL);
@@ -409,28 +407,28 @@ static void igb_pin_extts(struct igb_adapter *igb, int chan, int pin)
 
 static void igb_pin_perout(struct igb_adapter *igb, int chan, int pin)
 {
-       struct e1000_hw *hw = &igb->hw;
-       u32 aux0_sel_sdp[IGB_N_SDP] = {
+       static const u32 aux0_sel_sdp[IGB_N_SDP] = {
                AUX0_SEL_SDP0, AUX0_SEL_SDP1, AUX0_SEL_SDP2, AUX0_SEL_SDP3,
        };
-       u32 aux1_sel_sdp[IGB_N_SDP] = {
+       static const u32 aux1_sel_sdp[IGB_N_SDP] = {
                AUX1_SEL_SDP0, AUX1_SEL_SDP1, AUX1_SEL_SDP2, AUX1_SEL_SDP3,
        };
-       u32 ts_sdp_en[IGB_N_SDP] = {
+       static const u32 ts_sdp_en[IGB_N_SDP] = {
                TS_SDP0_EN, TS_SDP1_EN, TS_SDP2_EN, TS_SDP3_EN,
        };
-       u32 ts_sdp_sel_tt0[IGB_N_SDP] = {
+       static const u32 ts_sdp_sel_tt0[IGB_N_SDP] = {
                TS_SDP0_SEL_TT0, TS_SDP1_SEL_TT0,
                TS_SDP2_SEL_TT0, TS_SDP3_SEL_TT0,
        };
-       u32 ts_sdp_sel_tt1[IGB_N_SDP] = {
+       static const u32 ts_sdp_sel_tt1[IGB_N_SDP] = {
                TS_SDP0_SEL_TT1, TS_SDP1_SEL_TT1,
                TS_SDP2_SEL_TT1, TS_SDP3_SEL_TT1,
        };
-       u32 ts_sdp_sel_clr[IGB_N_SDP] = {
+       static const u32 ts_sdp_sel_clr[IGB_N_SDP] = {
                TS_SDP0_SEL_FC1, TS_SDP1_SEL_FC1,
                TS_SDP2_SEL_FC1, TS_SDP3_SEL_FC1,
        };
+       struct e1000_hw *hw = &igb->hw;
        u32 ctrl, ctrl_ext, tssdp = 0;
 
        ctrl = rd32(E1000_CTRL);
@@ -468,7 +466,7 @@ static int igb_ptp_feature_enable_i210(struct ptp_clock_info *ptp,
        u32 tsauxc, tsim, tsauxc_mask, tsim_mask, trgttiml, trgttimh;
        unsigned long flags;
        struct timespec ts;
-       int pin;
+       int pin = -1;
        s64 ns;
 
        switch (rq->type) {
@@ -627,11 +625,12 @@ static void igb_ptp_overflow_check(struct work_struct *work)
 {
        struct igb_adapter *igb =
                container_of(work, struct igb_adapter, ptp_overflow_work.work);
-       struct timespec ts;
+       struct timespec64 ts;
 
-       igb->ptp_caps.gettime(&igb->ptp_caps, &ts);
+       igb->ptp_caps.gettime64(&igb->ptp_caps, &ts);
 
-       pr_debug("igb overflow check at %ld.%09lu\n", ts.tv_sec, ts.tv_nsec);
+       pr_debug("igb overflow check at %lld.%09lu\n",
+                (long long) ts.tv_sec, ts.tv_nsec);
 
        schedule_delayed_work(&igb->ptp_overflow_work,
                              IGB_SYSTIM_OVERFLOW_PERIOD);
@@ -989,8 +988,8 @@ void igb_ptp_init(struct igb_adapter *adapter)
                adapter->ptp_caps.pps = 0;
                adapter->ptp_caps.adjfreq = igb_ptp_adjfreq_82576;
                adapter->ptp_caps.adjtime = igb_ptp_adjtime_82576;
-               adapter->ptp_caps.gettime = igb_ptp_gettime_82576;
-               adapter->ptp_caps.settime = igb_ptp_settime_82576;
+               adapter->ptp_caps.gettime64 = igb_ptp_gettime_82576;
+               adapter->ptp_caps.settime64 = igb_ptp_settime_82576;
                adapter->ptp_caps.enable = igb_ptp_feature_enable;
                adapter->cc.read = igb_ptp_read_82576;
                adapter->cc.mask = CYCLECOUNTER_MASK(64);
@@ -1009,8 +1008,8 @@ void igb_ptp_init(struct igb_adapter *adapter)
                adapter->ptp_caps.pps = 0;
                adapter->ptp_caps.adjfreq = igb_ptp_adjfreq_82580;
                adapter->ptp_caps.adjtime = igb_ptp_adjtime_82576;
-               adapter->ptp_caps.gettime = igb_ptp_gettime_82576;
-               adapter->ptp_caps.settime = igb_ptp_settime_82576;
+               adapter->ptp_caps.gettime64 = igb_ptp_gettime_82576;
+               adapter->ptp_caps.settime64 = igb_ptp_settime_82576;
                adapter->ptp_caps.enable = igb_ptp_feature_enable;
                adapter->cc.read = igb_ptp_read_82580;
                adapter->cc.mask = CYCLECOUNTER_MASK(IGB_NBITS_82580);
@@ -1038,8 +1037,8 @@ void igb_ptp_init(struct igb_adapter *adapter)
                adapter->ptp_caps.pin_config = adapter->sdp_config;
                adapter->ptp_caps.adjfreq = igb_ptp_adjfreq_82580;
                adapter->ptp_caps.adjtime = igb_ptp_adjtime_i210;
-               adapter->ptp_caps.gettime = igb_ptp_gettime_i210;
-               adapter->ptp_caps.settime = igb_ptp_settime_i210;
+               adapter->ptp_caps.gettime64 = igb_ptp_gettime_i210;
+               adapter->ptp_caps.settime64 = igb_ptp_settime_i210;
                adapter->ptp_caps.enable = igb_ptp_feature_enable_i210;
                adapter->ptp_caps.verify = igb_ptp_verify_pin;
                /* Enable the timer functions by clearing bit 31. */
@@ -1057,7 +1056,7 @@ void igb_ptp_init(struct igb_adapter *adapter)
 
        /* Initialize the clock and overflow work for devices that need it. */
        if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211)) {
-               struct timespec ts = ktime_to_timespec(ktime_get_real());
+               struct timespec64 ts = ktime_to_timespec64(ktime_get_real());
 
                igb_ptp_settime_i210(&adapter->ptp_caps, &ts);
        } else {
@@ -1171,7 +1170,7 @@ void igb_ptp_reset(struct igb_adapter *adapter)
 
        /* Re-initialize the timer. */
        if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211)) {
-               struct timespec ts = ktime_to_timespec(ktime_get_real());
+               struct timespec64 ts = ktime_to_timespec64(ktime_get_real());
 
                igb_ptp_write_i210(adapter, &ts);
        } else {
index d9fa999b16856e41f79a750c92d94223dfaa6d98..ae3f28332fa0151581488f2b38b618cf89d3c456 100644 (file)
@@ -13,8 +13,7 @@
   more details.
 
   You should have received a copy of the GNU General Public License along with
-  this program; if not, write to the Free Software Foundation, Inc.,
-  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+  this program; if not, see <http://www.gnu.org/licenses/>.
 
   The full GNU General Public License is included in this distribution in
   the file called "COPYING".
 #define _E1000_DEFINES_H_
 
 /* Number of Transmit and Receive Descriptors must be a multiple of 8 */
-#define REQ_TX_DESCRIPTOR_MULTIPLE  8
-#define REQ_RX_DESCRIPTOR_MULTIPLE  8
+#define REQ_TX_DESCRIPTOR_MULTIPLE     8
+#define REQ_RX_DESCRIPTOR_MULTIPLE     8
 
 /* IVAR valid bit */
-#define E1000_IVAR_VALID        0x80
+#define E1000_IVAR_VALID       0x80
 
 /* Receive Descriptor bit definitions */
-#define E1000_RXD_STAT_DD       0x01    /* Descriptor Done */
-#define E1000_RXD_STAT_EOP      0x02    /* End of Packet */
-#define E1000_RXD_STAT_IXSM     0x04    /* Ignore checksum */
-#define E1000_RXD_STAT_VP       0x08    /* IEEE VLAN Packet */
-#define E1000_RXD_STAT_UDPCS    0x10    /* UDP xsum calculated */
-#define E1000_RXD_STAT_TCPCS    0x20    /* TCP xsum calculated */
-#define E1000_RXD_STAT_IPCS     0x40    /* IP xsum calculated */
-#define E1000_RXD_ERR_SE        0x02    /* Symbol Error */
-#define E1000_RXD_SPC_VLAN_MASK 0x0FFF  /* VLAN ID is in lower 12 bits */
-
-#define E1000_RXDEXT_STATERR_LB    0x00040000
-#define E1000_RXDEXT_STATERR_CE    0x01000000
-#define E1000_RXDEXT_STATERR_SE    0x02000000
-#define E1000_RXDEXT_STATERR_SEQ   0x04000000
-#define E1000_RXDEXT_STATERR_CXE   0x10000000
-#define E1000_RXDEXT_STATERR_TCPE  0x20000000
-#define E1000_RXDEXT_STATERR_IPE   0x40000000
-#define E1000_RXDEXT_STATERR_RXE   0x80000000
-
+#define E1000_RXD_STAT_DD      0x01    /* Descriptor Done */
+#define E1000_RXD_STAT_EOP     0x02    /* End of Packet */
+#define E1000_RXD_STAT_IXSM    0x04    /* Ignore checksum */
+#define E1000_RXD_STAT_VP      0x08    /* IEEE VLAN Packet */
+#define E1000_RXD_STAT_UDPCS   0x10    /* UDP xsum calculated */
+#define E1000_RXD_STAT_TCPCS   0x20    /* TCP xsum calculated */
+#define E1000_RXD_STAT_IPCS    0x40    /* IP xsum calculated */
+#define E1000_RXD_ERR_SE       0x02    /* Symbol Error */
+#define E1000_RXD_SPC_VLAN_MASK        0x0FFF  /* VLAN ID is in lower 12 bits */
+
+#define E1000_RXDEXT_STATERR_LB        0x00040000
+#define E1000_RXDEXT_STATERR_CE        0x01000000
+#define E1000_RXDEXT_STATERR_SE        0x02000000
+#define E1000_RXDEXT_STATERR_SEQ       0x04000000
+#define E1000_RXDEXT_STATERR_CXE       0x10000000
+#define E1000_RXDEXT_STATERR_TCPE      0x20000000
+#define E1000_RXDEXT_STATERR_IPE       0x40000000
+#define E1000_RXDEXT_STATERR_RXE       0x80000000
 
 /* Same mask, but for extended and packet split descriptors */
 #define E1000_RXDEXT_ERR_FRAME_ERR_MASK ( \
-    E1000_RXDEXT_STATERR_CE  |            \
-    E1000_RXDEXT_STATERR_SE  |            \
-    E1000_RXDEXT_STATERR_SEQ |            \
-    E1000_RXDEXT_STATERR_CXE |            \
-    E1000_RXDEXT_STATERR_RXE)
+       E1000_RXDEXT_STATERR_CE  | \
+       E1000_RXDEXT_STATERR_SE  | \
+       E1000_RXDEXT_STATERR_SEQ | \
+       E1000_RXDEXT_STATERR_CXE | \
+       E1000_RXDEXT_STATERR_RXE)
 
 /* Device Control */
-#define E1000_CTRL_RST      0x04000000  /* Global reset */
+#define E1000_CTRL_RST         0x04000000  /* Global reset */
 
 /* Device Status */
-#define E1000_STATUS_FD         0x00000001      /* Full duplex.0=half,1=full */
-#define E1000_STATUS_LU         0x00000002      /* Link up.0=no,1=link */
-#define E1000_STATUS_TXOFF      0x00000010      /* transmission paused */
-#define E1000_STATUS_SPEED_10   0x00000000      /* Speed 10Mb/s */
-#define E1000_STATUS_SPEED_100  0x00000040      /* Speed 100Mb/s */
-#define E1000_STATUS_SPEED_1000 0x00000080      /* Speed 1000Mb/s */
-
-#define SPEED_10    10
-#define SPEED_100   100
-#define SPEED_1000  1000
-#define HALF_DUPLEX 1
-#define FULL_DUPLEX 2
+#define E1000_STATUS_FD                0x00000001      /* Full duplex.0=half,1=full */
+#define E1000_STATUS_LU                0x00000002      /* Link up.0=no,1=link */
+#define E1000_STATUS_TXOFF     0x00000010      /* transmission paused */
+#define E1000_STATUS_SPEED_10  0x00000000      /* Speed 10Mb/s */
+#define E1000_STATUS_SPEED_100 0x00000040      /* Speed 100Mb/s */
+#define E1000_STATUS_SPEED_1000        0x00000080      /* Speed 1000Mb/s */
+
+#define SPEED_10       10
+#define SPEED_100      100
+#define SPEED_1000     1000
+#define HALF_DUPLEX    1
+#define FULL_DUPLEX    2
 
 /* Transmit Descriptor bit definitions */
-#define E1000_TXD_POPTS_IXSM 0x01       /* Insert IP checksum */
-#define E1000_TXD_POPTS_TXSM 0x02       /* Insert TCP/UDP checksum */
-#define E1000_TXD_CMD_DEXT   0x20000000 /* Descriptor extension (0 = legacy) */
-#define E1000_TXD_STAT_DD    0x00000001 /* Descriptor Done */
+#define E1000_TXD_POPTS_IXSM   0x01       /* Insert IP checksum */
+#define E1000_TXD_POPTS_TXSM   0x02       /* Insert TCP/UDP checksum */
+#define E1000_TXD_CMD_DEXT     0x20000000 /* Desc extension (0 = legacy) */
+#define E1000_TXD_STAT_DD      0x00000001 /* Desc Done */
 
-#define MAX_JUMBO_FRAME_SIZE    0x3F00
+#define MAX_JUMBO_FRAME_SIZE   0x3F00
 
 /* 802.1q VLAN Packet Size */
-#define VLAN_TAG_SIZE              4    /* 802.3ac tag (not DMA'd) */
+#define VLAN_TAG_SIZE          4    /* 802.3ac tag (not DMA'd) */
 
 /* Error Codes */
-#define E1000_SUCCESS      0
-#define E1000_ERR_CONFIG   3
-#define E1000_ERR_MAC_INIT 5
-#define E1000_ERR_MBX      15
+#define E1000_SUCCESS          0
+#define E1000_ERR_CONFIG       3
+#define E1000_ERR_MAC_INIT     5
+#define E1000_ERR_MBX          15
 
 /* SRRCTL bit definitions */
-#define E1000_SRRCTL_BSIZEPKT_SHIFT                     10 /* Shift _right_ */
-#define E1000_SRRCTL_BSIZEHDRSIZE_MASK                  0x00000F00
-#define E1000_SRRCTL_BSIZEHDRSIZE_SHIFT                 2  /* Shift _left_ */
-#define E1000_SRRCTL_DESCTYPE_ADV_ONEBUF                0x02000000
-#define E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS          0x0A000000
-#define E1000_SRRCTL_DESCTYPE_MASK                      0x0E000000
-#define E1000_SRRCTL_DROP_EN                            0x80000000
+#define E1000_SRRCTL_BSIZEPKT_SHIFT            10 /* Shift _right_ */
+#define E1000_SRRCTL_BSIZEHDRSIZE_MASK         0x00000F00
+#define E1000_SRRCTL_BSIZEHDRSIZE_SHIFT                2  /* Shift _left_ */
+#define E1000_SRRCTL_DESCTYPE_ADV_ONEBUF       0x02000000
+#define E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS 0x0A000000
+#define E1000_SRRCTL_DESCTYPE_MASK             0x0E000000
+#define E1000_SRRCTL_DROP_EN                   0x80000000
 
-#define E1000_SRRCTL_BSIZEPKT_MASK      0x0000007F
-#define E1000_SRRCTL_BSIZEHDR_MASK      0x00003F00
+#define E1000_SRRCTL_BSIZEPKT_MASK     0x0000007F
+#define E1000_SRRCTL_BSIZEHDR_MASK     0x00003F00
 
 /* Additional Descriptor Control definitions */
-#define E1000_TXDCTL_QUEUE_ENABLE  0x02000000 /* Enable specific Tx Queue */
-#define E1000_RXDCTL_QUEUE_ENABLE  0x02000000 /* Enable specific Rx Queue */
+#define E1000_TXDCTL_QUEUE_ENABLE      0x02000000 /* Enable specific Tx Que */
+#define E1000_RXDCTL_QUEUE_ENABLE      0x02000000 /* Enable specific Rx Que */
 
 /* Direct Cache Access (DCA) definitions */
-#define E1000_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */
+#define E1000_DCA_TXCTRL_TX_WB_RO_EN   (1 << 11) /* Tx Desc writeback RO bit */
 
-#define E1000_VF_INIT_TIMEOUT 200 /* Number of retries to clear RSTI */
+#define E1000_VF_INIT_TIMEOUT  200 /* Number of retries to clear RSTI */
 
 #endif /* _E1000_DEFINES_H_ */
index 2178f87e9f610f3a95222275978d2b68a9d7832d..c6996feb1cb4e2559b2b3a260d95a6b0f980b449 100644 (file)
@@ -13,8 +13,7 @@
   more details.
 
   You should have received a copy of the GNU General Public License along with
-  this program; if not, write to the Free Software Foundation, Inc.,
-  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+  this program; if not, see <http://www.gnu.org/licenses/>.
 
   The full GNU General Public License is included in this distribution in
   the file called "COPYING".
@@ -36,7 +35,6 @@
 #include "igbvf.h"
 #include <linux/if_vlan.h>
 
-
 struct igbvf_stats {
        char stat_string[ETH_GSTRING_LEN];
        int sizeof_stat;
@@ -74,7 +72,7 @@ static const char igbvf_gstrings_test[][ETH_GSTRING_LEN] = {
 #define IGBVF_TEST_LEN ARRAY_SIZE(igbvf_gstrings_test)
 
 static int igbvf_get_settings(struct net_device *netdev,
-                              struct ethtool_cmd *ecmd)
+                             struct ethtool_cmd *ecmd)
 {
        struct igbvf_adapter *adapter = netdev_priv(netdev);
        struct e1000_hw *hw = &adapter->hw;
@@ -111,18 +109,18 @@ static int igbvf_get_settings(struct net_device *netdev,
 }
 
 static int igbvf_set_settings(struct net_device *netdev,
-                              struct ethtool_cmd *ecmd)
+                             struct ethtool_cmd *ecmd)
 {
        return -EOPNOTSUPP;
 }
 
 static void igbvf_get_pauseparam(struct net_device *netdev,
-                                 struct ethtool_pauseparam *pause)
+                                struct ethtool_pauseparam *pause)
 {
 }
 
 static int igbvf_set_pauseparam(struct net_device *netdev,
-                                struct ethtool_pauseparam *pause)
+                               struct ethtool_pauseparam *pause)
 {
        return -EOPNOTSUPP;
 }
@@ -130,12 +128,14 @@ static int igbvf_set_pauseparam(struct net_device *netdev,
 static u32 igbvf_get_msglevel(struct net_device *netdev)
 {
        struct igbvf_adapter *adapter = netdev_priv(netdev);
+
        return adapter->msg_enable;
 }
 
 static void igbvf_set_msglevel(struct net_device *netdev, u32 data)
 {
        struct igbvf_adapter *adapter = netdev_priv(netdev);
+
        adapter->msg_enable = data;
 }
 
@@ -146,7 +146,7 @@ static int igbvf_get_regs_len(struct net_device *netdev)
 }
 
 static void igbvf_get_regs(struct net_device *netdev,
-                           struct ethtool_regs *regs, void *p)
+                          struct ethtool_regs *regs, void *p)
 {
        struct igbvf_adapter *adapter = netdev_priv(netdev);
        struct e1000_hw *hw = &adapter->hw;
@@ -175,19 +175,19 @@ static int igbvf_get_eeprom_len(struct net_device *netdev)
 }
 
 static int igbvf_get_eeprom(struct net_device *netdev,
-                            struct ethtool_eeprom *eeprom, u8 *bytes)
+                           struct ethtool_eeprom *eeprom, u8 *bytes)
 {
        return -EOPNOTSUPP;
 }
 
 static int igbvf_set_eeprom(struct net_device *netdev,
-                            struct ethtool_eeprom *eeprom, u8 *bytes)
+                           struct ethtool_eeprom *eeprom, u8 *bytes)
 {
        return -EOPNOTSUPP;
 }
 
 static void igbvf_get_drvinfo(struct net_device *netdev,
-                              struct ethtool_drvinfo *drvinfo)
+                             struct ethtool_drvinfo *drvinfo)
 {
        struct igbvf_adapter *adapter = netdev_priv(netdev);
 
@@ -201,7 +201,7 @@ static void igbvf_get_drvinfo(struct net_device *netdev,
 }
 
 static void igbvf_get_ringparam(struct net_device *netdev,
-                                struct ethtool_ringparam *ring)
+                               struct ethtool_ringparam *ring)
 {
        struct igbvf_adapter *adapter = netdev_priv(netdev);
        struct igbvf_ring *tx_ring = adapter->tx_ring;
@@ -214,7 +214,7 @@ static void igbvf_get_ringparam(struct net_device *netdev,
 }
 
 static int igbvf_set_ringparam(struct net_device *netdev,
-                               struct ethtool_ringparam *ring)
+                              struct ethtool_ringparam *ring)
 {
        struct igbvf_adapter *adapter = netdev_priv(netdev);
        struct igbvf_ring *temp_ring;
@@ -224,12 +224,12 @@ static int igbvf_set_ringparam(struct net_device *netdev,
        if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
                return -EINVAL;
 
-       new_rx_count = max(ring->rx_pending, (u32)IGBVF_MIN_RXD);
-       new_rx_count = min(new_rx_count, (u32)IGBVF_MAX_RXD);
+       new_rx_count = max_t(u32, ring->rx_pending, IGBVF_MIN_RXD);
+       new_rx_count = min_t(u32, new_rx_count, IGBVF_MAX_RXD);
        new_rx_count = ALIGN(new_rx_count, REQ_RX_DESCRIPTOR_MULTIPLE);
 
-       new_tx_count = max(ring->tx_pending, (u32)IGBVF_MIN_TXD);
-       new_tx_count = min(new_tx_count, (u32)IGBVF_MAX_TXD);
+       new_tx_count = max_t(u32, ring->tx_pending, IGBVF_MIN_TXD);
+       new_tx_count = min_t(u32, new_tx_count, IGBVF_MAX_TXD);
        new_tx_count = ALIGN(new_tx_count, REQ_TX_DESCRIPTOR_MULTIPLE);
 
        if ((new_tx_count == adapter->tx_ring->count) &&
@@ -239,7 +239,7 @@ static int igbvf_set_ringparam(struct net_device *netdev,
        }
 
        while (test_and_set_bit(__IGBVF_RESETTING, &adapter->state))
-               msleep(1);
+               usleep_range(1000, 2000);
 
        if (!netif_running(adapter->netdev)) {
                adapter->tx_ring->count = new_tx_count;
@@ -255,10 +255,9 @@ static int igbvf_set_ringparam(struct net_device *netdev,
 
        igbvf_down(adapter);
 
-       /*
-        * We can't just free everything and then setup again,
+       /* We can't just free everything and then setup again,
         * because the ISRs in MSI-X mode get passed pointers
-        * to the tx and rx ring structs.
+        * to the Tx and Rx ring structs.
         */
        if (new_tx_count != adapter->tx_ring->count) {
                memcpy(temp_ring, adapter->tx_ring, sizeof(struct igbvf_ring));
@@ -283,7 +282,7 @@ static int igbvf_set_ringparam(struct net_device *netdev,
 
                igbvf_free_rx_resources(adapter->rx_ring);
 
-               memcpy(adapter->rx_ring, temp_ring,sizeof(struct igbvf_ring));
+               memcpy(adapter->rx_ring, temp_ring, sizeof(struct igbvf_ring));
        }
 err_setup:
        igbvf_up(adapter);
@@ -307,14 +306,13 @@ static int igbvf_link_test(struct igbvf_adapter *adapter, u64 *data)
 }
 
 static void igbvf_diag_test(struct net_device *netdev,
-                            struct ethtool_test *eth_test, u64 *data)
+                           struct ethtool_test *eth_test, u64 *data)
 {
        struct igbvf_adapter *adapter = netdev_priv(netdev);
 
        set_bit(__IGBVF_TESTING, &adapter->state);
 
-       /*
-        * Link test performed before hardware reset so autoneg doesn't
+       /* Link test performed before hardware reset so autoneg doesn't
         * interfere with test result
         */
        if (igbvf_link_test(adapter, &data[0]))
@@ -325,20 +323,20 @@ static void igbvf_diag_test(struct net_device *netdev,
 }
 
 static void igbvf_get_wol(struct net_device *netdev,
-                          struct ethtool_wolinfo *wol)
+                         struct ethtool_wolinfo *wol)
 {
        wol->supported = 0;
        wol->wolopts = 0;
 }
 
 static int igbvf_set_wol(struct net_device *netdev,
-                         struct ethtool_wolinfo *wol)
+                        struct ethtool_wolinfo *wol)
 {
        return -EOPNOTSUPP;
 }
 
 static int igbvf_get_coalesce(struct net_device *netdev,
-                              struct ethtool_coalesce *ec)
+                             struct ethtool_coalesce *ec)
 {
        struct igbvf_adapter *adapter = netdev_priv(netdev);
 
@@ -351,13 +349,13 @@ static int igbvf_get_coalesce(struct net_device *netdev,
 }
 
 static int igbvf_set_coalesce(struct net_device *netdev,
-                              struct ethtool_coalesce *ec)
+                             struct ethtool_coalesce *ec)
 {
        struct igbvf_adapter *adapter = netdev_priv(netdev);
        struct e1000_hw *hw = &adapter->hw;
 
        if ((ec->rx_coalesce_usecs >= IGBVF_MIN_ITR_USECS) &&
-            (ec->rx_coalesce_usecs <= IGBVF_MAX_ITR_USECS)) {
+           (ec->rx_coalesce_usecs <= IGBVF_MAX_ITR_USECS)) {
                adapter->current_itr = ec->rx_coalesce_usecs << 2;
                adapter->requested_itr = 1000000000 /
                                        (adapter->current_itr * 256);
@@ -366,8 +364,7 @@ static int igbvf_set_coalesce(struct net_device *netdev,
                adapter->current_itr = IGBVF_START_ITR;
                adapter->requested_itr = ec->rx_coalesce_usecs;
        } else if (ec->rx_coalesce_usecs == 0) {
-               /*
-                * The user's desire is to turn off interrupt throttling
+               /* The user's desire is to turn off interrupt throttling
                 * altogether, but due to HW limitations, we can't do that.
                 * Instead we set a very small value in EITR, which would
                 * allow ~967k interrupts per second, but allow the adapter's
@@ -376,8 +373,9 @@ static int igbvf_set_coalesce(struct net_device *netdev,
                adapter->current_itr = 4;
                adapter->requested_itr = 1000000000 /
                                        (adapter->current_itr * 256);
-       } else
+       } else {
                return -EINVAL;
+       }
 
        writel(adapter->current_itr,
               hw->hw_addr + adapter->rx_ring->itr_register);
@@ -388,15 +386,15 @@ static int igbvf_set_coalesce(struct net_device *netdev,
 static int igbvf_nway_reset(struct net_device *netdev)
 {
        struct igbvf_adapter *adapter = netdev_priv(netdev);
+
        if (netif_running(netdev))
                igbvf_reinit_locked(adapter);
        return 0;
 }
 
-
 static void igbvf_get_ethtool_stats(struct net_device *netdev,
-                                    struct ethtool_stats *stats,
-                                    u64 *data)
+                                   struct ethtool_stats *stats,
+                                   u64 *data)
 {
        struct igbvf_adapter *adapter = netdev_priv(netdev);
        int i;
@@ -404,19 +402,18 @@ static void igbvf_get_ethtool_stats(struct net_device *netdev,
        igbvf_update_stats(adapter);
        for (i = 0; i < IGBVF_GLOBAL_STATS_LEN; i++) {
                char *p = (char *)adapter +
-                         igbvf_gstrings_stats[i].stat_offset;
+                         igbvf_gstrings_stats[i].stat_offset;
                char *b = (char *)adapter +
-                         igbvf_gstrings_stats[i].base_stat_offset;
+                         igbvf_gstrings_stats[i].base_stat_offset;
                data[i] = ((igbvf_gstrings_stats[i].sizeof_stat ==
-                           sizeof(u64)) ? (*(u64 *)p - *(u64 *)b) :
-                           (*(u32 *)p - *(u32 *)b));
+                           sizeof(u64)) ? (*(u64 *)p - *(u64 *)b) :
+                           (*(u32 *)p - *(u32 *)b));
        }
-
 }
 
 static int igbvf_get_sset_count(struct net_device *dev, int stringset)
 {
-       switch(stringset) {
+       switch (stringset) {
        case ETH_SS_TEST:
                return IGBVF_TEST_LEN;
        case ETH_SS_STATS:
@@ -427,7 +424,7 @@ static int igbvf_get_sset_count(struct net_device *dev, int stringset)
 }
 
 static void igbvf_get_strings(struct net_device *netdev, u32 stringset,
-                              u8 *data)
+                             u8 *data)
 {
        u8 *p = data;
        int i;
index 7d6a25c8f889efd0eb69ef749dd0bb13f18302ad..f166baab8d7e59e7e8260076eeaecf21735f7514 100644 (file)
@@ -13,8 +13,7 @@
   more details.
 
   You should have received a copy of the GNU General Public License along with
-  this program; if not, write to the Free Software Foundation, Inc.,
-  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+  this program; if not, see <http://www.gnu.org/licenses/>.
 
   The full GNU General Public License is included in this distribution in
   the file called "COPYING".
@@ -43,10 +42,10 @@ struct igbvf_info;
 struct igbvf_adapter;
 
 /* Interrupt defines */
-#define IGBVF_START_ITR                    488 /* ~8000 ints/sec */
-#define IGBVF_4K_ITR                       980
-#define IGBVF_20K_ITR                      196
-#define IGBVF_70K_ITR                       56
+#define IGBVF_START_ITR                488 /* ~8000 ints/sec */
+#define IGBVF_4K_ITR           980
+#define IGBVF_20K_ITR          196
+#define IGBVF_70K_ITR          56
 
 enum latency_range {
        lowest_latency = 0,
@@ -55,56 +54,55 @@ enum latency_range {
        latency_invalid = 255
 };
 
-
 /* Interrupt modes, as used by the IntMode parameter */
-#define IGBVF_INT_MODE_LEGACY           0
-#define IGBVF_INT_MODE_MSI              1
-#define IGBVF_INT_MODE_MSIX             2
+#define IGBVF_INT_MODE_LEGACY  0
+#define IGBVF_INT_MODE_MSI     1
+#define IGBVF_INT_MODE_MSIX    2
 
 /* Tx/Rx descriptor defines */
-#define IGBVF_DEFAULT_TXD               256
-#define IGBVF_MAX_TXD                   4096
-#define IGBVF_MIN_TXD                   80
+#define IGBVF_DEFAULT_TXD      256
+#define IGBVF_MAX_TXD          4096
+#define IGBVF_MIN_TXD          80
 
-#define IGBVF_DEFAULT_RXD               256
-#define IGBVF_MAX_RXD                   4096
-#define IGBVF_MIN_RXD                   80
+#define IGBVF_DEFAULT_RXD      256
+#define IGBVF_MAX_RXD          4096
+#define IGBVF_MIN_RXD          80
 
-#define IGBVF_MIN_ITR_USECS             10 /* 100000 irq/sec */
-#define IGBVF_MAX_ITR_USECS             10000 /* 100    irq/sec */
+#define IGBVF_MIN_ITR_USECS    10 /* 100000 irq/sec */
+#define IGBVF_MAX_ITR_USECS    10000 /* 100    irq/sec */
 
 /* RX descriptor control thresholds.
  * PTHRESH - MAC will consider prefetch if it has fewer than this number of
- *           descriptors available in its onboard memory.
- *           Setting this to 0 disables RX descriptor prefetch.
+ *        descriptors available in its onboard memory.
+ *        Setting this to 0 disables RX descriptor prefetch.
  * HTHRESH - MAC will only prefetch if there are at least this many descriptors
- *           available in host memory.
- *           If PTHRESH is 0, this should also be 0.
+ *        available in host memory.
+ *        If PTHRESH is 0, this should also be 0.
  * WTHRESH - RX descriptor writeback threshold - MAC will delay writing back
- *           descriptors until either it has this many to write back, or the
- *           ITR timer expires.
+ *        descriptors until either it has this many to write back, or the
+ *        ITR timer expires.
  */
-#define IGBVF_RX_PTHRESH                16
-#define IGBVF_RX_HTHRESH                8
-#define IGBVF_RX_WTHRESH                1
+#define IGBVF_RX_PTHRESH       16
+#define IGBVF_RX_HTHRESH       8
+#define IGBVF_RX_WTHRESH       1
 
 /* this is the size past which hardware will drop packets when setting LPE=0 */
-#define MAXIMUM_ETHERNET_VLAN_SIZE      1522
+#define MAXIMUM_ETHERNET_VLAN_SIZE     1522
 
-#define IGBVF_FC_PAUSE_TIME             0x0680 /* 858 usec */
+#define IGBVF_FC_PAUSE_TIME    0x0680 /* 858 usec */
 
 /* How many Tx Descriptors do we need to call netif_wake_queue ? */
-#define IGBVF_TX_QUEUE_WAKE             32
+#define IGBVF_TX_QUEUE_WAKE    32
 /* How many Rx Buffers do we bundle into one write to the hardware ? */
-#define IGBVF_RX_BUFFER_WRITE           16 /* Must be power of 2 */
+#define IGBVF_RX_BUFFER_WRITE  16 /* Must be power of 2 */
 
-#define AUTO_ALL_MODES                  0
-#define IGBVF_EEPROM_APME               0x0400
+#define AUTO_ALL_MODES         0
+#define IGBVF_EEPROM_APME      0x0400
 
-#define IGBVF_MNG_VLAN_NONE             (-1)
+#define IGBVF_MNG_VLAN_NONE    (-1)
 
 /* Number of packet split data buffers (not including the header buffer) */
-#define PS_PAGE_BUFFERS                 (MAX_PS_BUFFERS - 1)
+#define PS_PAGE_BUFFERS                (MAX_PS_BUFFERS - 1)
 
 enum igbvf_boards {
        board_vf,
@@ -116,8 +114,7 @@ struct igbvf_queue_stats {
        u64 bytes;
 };
 
-/*
- * wrappers around a pointer to a socket buffer,
+/* wrappers around a pointer to a socket buffer,
  * so a DMA handle can be stored along with the buffer
  */
 struct igbvf_buffer {
@@ -148,10 +145,10 @@ union igbvf_desc {
 
 struct igbvf_ring {
        struct igbvf_adapter *adapter;  /* backlink */
-       union igbvf_desc *desc;         /* pointer to ring memory  */
-       dma_addr_t dma;                 /* phys address of ring    */
-       unsigned int size;              /* length of ring in bytes */
-       unsigned int count;             /* number of desc. in ring */
+       union igbvf_desc *desc; /* pointer to ring memory  */
+       dma_addr_t dma;         /* phys address of ring    */
+       unsigned int size;      /* length of ring in bytes */
+       unsigned int count;     /* number of desc. in ring */
 
        u16 next_to_use;
        u16 next_to_clean;
@@ -202,9 +199,7 @@ struct igbvf_adapter {
        u32 requested_itr; /* ints/sec or adaptive */
        u32 current_itr; /* Actual ITR register value, not ints/sec */
 
-       /*
-        * Tx
-        */
+       /* Tx */
        struct igbvf_ring *tx_ring /* One per active queue */
        ____cacheline_aligned_in_smp;
 
@@ -226,9 +221,7 @@ struct igbvf_adapter {
        u32 tx_fifo_size;
        u32 tx_dma_failed;
 
-       /*
-        * Rx
-        */
+       /* Rx */
        struct igbvf_ring *rx_ring;
 
        u32 rx_int_delay;
@@ -249,7 +242,7 @@ struct igbvf_adapter {
        struct net_device *netdev;
        struct pci_dev *pdev;
        struct net_device_stats net_stats;
-       spinlock_t stats_lock;      /* prevent concurrent stats updates */
+       spinlock_t stats_lock; /* prevent concurrent stats updates */
 
        /* structs defined in e1000_hw.h */
        struct e1000_hw hw;
@@ -286,16 +279,16 @@ struct igbvf_adapter {
 };
 
 struct igbvf_info {
-       enum e1000_mac_type     mac;
-       unsigned int            flags;
-       u32                     pba;
-       void                    (*init_ops)(struct e1000_hw *);
-       s32                     (*get_variants)(struct igbvf_adapter *);
+       enum e1000_mac_type     mac;
+       unsigned int            flags;
+       u32                     pba;
+       void                    (*init_ops)(struct e1000_hw *);
+       s32                     (*get_variants)(struct igbvf_adapter *);
 };
 
 /* hardware capability, feature, and workaround flags */
-#define IGBVF_FLAG_RX_CSUM_DISABLED             (1 << 0)
-#define IGBVF_FLAG_RX_LB_VLAN_BSWAP            (1 << 1)
+#define IGBVF_FLAG_RX_CSUM_DISABLED    (1 << 0)
+#define IGBVF_FLAG_RX_LB_VLAN_BSWAP    (1 << 1)
 #define IGBVF_RX_DESC_ADV(R, i)     \
        (&((((R).desc))[i].rx_desc))
 #define IGBVF_TX_DESC_ADV(R, i)     \
index b4b65bc9fc5dd0852969eb1942d347bc2994cef4..7b6cb4c3764caff0432a543080d9d05d766bf475 100644 (file)
@@ -13,8 +13,7 @@
   more details.
 
   You should have received a copy of the GNU General Public License along with
-  this program; if not, write to the Free Software Foundation, Inc.,
-  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+  this program; if not, see <http://www.gnu.org/licenses/>.
 
   The full GNU General Public License is included in this distribution in
   the file called "COPYING".
@@ -54,10 +53,10 @@ out:
 }
 
 /**
- *  e1000_poll_for_ack - Wait for message acknowledgement
+ *  e1000_poll_for_ack - Wait for message acknowledgment
  *  @hw: pointer to the HW structure
  *
- *  returns SUCCESS if it successfully received a message acknowledgement
+ *  returns SUCCESS if it successfully received a message acknowledgment
  **/
 static s32 e1000_poll_for_ack(struct e1000_hw *hw)
 {
@@ -218,7 +217,7 @@ static s32 e1000_check_for_rst_vf(struct e1000_hw *hw)
        s32 ret_val = -E1000_ERR_MBX;
 
        if (!e1000_check_for_bit_vf(hw, (E1000_V2PMAILBOX_RSTD |
-                                        E1000_V2PMAILBOX_RSTI))) {
+                                        E1000_V2PMAILBOX_RSTI))) {
                ret_val = E1000_SUCCESS;
                hw->mbx.stats.rsts++;
        }
@@ -239,7 +238,7 @@ static s32 e1000_obtain_mbx_lock_vf(struct e1000_hw *hw)
        /* Take ownership of the buffer */
        ew32(V2PMAILBOX(0), E1000_V2PMAILBOX_VFU);
 
-       /* reserve mailbox for vf use */
+       /* reserve mailbox for VF use */
        if (e1000_read_v2p_mailbox(hw) & E1000_V2PMAILBOX_VFU)
                ret_val = E1000_SUCCESS;
 
@@ -283,7 +282,7 @@ out_no_write:
 }
 
 /**
- *  e1000_read_mbx_vf - Reads a message from the inbox intended for vf
+ *  e1000_read_mbx_vf - Reads a message from the inbox intended for VF
  *  @hw: pointer to the HW structure
  *  @msg: The message buffer
  *  @size: Length of buffer
@@ -315,17 +314,18 @@ out_no_read:
 }
 
 /**
- *  e1000_init_mbx_params_vf - set initial values for vf mailbox
+ *  e1000_init_mbx_params_vf - set initial values for VF mailbox
  *  @hw: pointer to the HW structure
  *
- *  Initializes the hw->mbx struct to correct values for vf mailbox
+ *  Initializes the hw->mbx struct to correct values for VF mailbox
  */
 s32 e1000_init_mbx_params_vf(struct e1000_hw *hw)
 {
        struct e1000_mbx_info *mbx = &hw->mbx;
 
        /* start mailbox as timed out and let the reset_hw call set the timeout
-        * value to being communications */
+        * value to being communications
+        */
        mbx->timeout = 0;
        mbx->usec_delay = E1000_VF_MBX_INIT_DELAY;
 
@@ -347,4 +347,3 @@ s32 e1000_init_mbx_params_vf(struct e1000_hw *hw)
 
        return E1000_SUCCESS;
 }
-
index 24370bcb0e22dd3d1b386d2404903840e08c37f3..f800bf8eedaedbbb3e0836828f2f0fe5e6edee2e 100644 (file)
@@ -13,8 +13,7 @@
   more details.
 
   You should have received a copy of the GNU General Public License along with
-  this program; if not, write to the Free Software Foundation, Inc.,
-  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+  this program; if not, see <http://www.gnu.org/licenses/>.
 
   The full GNU General Public License is included in this distribution in
   the file called "COPYING".
 
 #include "vf.h"
 
-#define E1000_V2PMAILBOX_REQ   0x00000001 /* Request for PF Ready bit */
-#define E1000_V2PMAILBOX_ACK   0x00000002 /* Ack PF message received */
-#define E1000_V2PMAILBOX_VFU   0x00000004 /* VF owns the mailbox buffer */
-#define E1000_V2PMAILBOX_PFU   0x00000008 /* PF owns the mailbox buffer */
-#define E1000_V2PMAILBOX_PFSTS 0x00000010 /* PF wrote a message in the MB */
-#define E1000_V2PMAILBOX_PFACK 0x00000020 /* PF ack the previous VF msg */
-#define E1000_V2PMAILBOX_RSTI  0x00000040 /* PF has reset indication */
-#define E1000_V2PMAILBOX_RSTD  0x00000080 /* PF has indicated reset done */
+#define E1000_V2PMAILBOX_REQ   0x00000001 /* Request for PF Ready bit */
+#define E1000_V2PMAILBOX_ACK   0x00000002 /* Ack PF message received */
+#define E1000_V2PMAILBOX_VFU   0x00000004 /* VF owns the mailbox buffer */
+#define E1000_V2PMAILBOX_PFU   0x00000008 /* PF owns the mailbox buffer */
+#define E1000_V2PMAILBOX_PFSTS 0x00000010 /* PF wrote a message in the MB */
+#define E1000_V2PMAILBOX_PFACK 0x00000020 /* PF ack the previous VF msg */
+#define E1000_V2PMAILBOX_RSTI  0x00000040 /* PF has reset indication */
+#define E1000_V2PMAILBOX_RSTD  0x00000080 /* PF has indicated reset done */
 #define E1000_V2PMAILBOX_R2C_BITS 0x000000B0 /* All read to clear bits */
 
-#define E1000_VFMAILBOX_SIZE   16 /* 16 32 bit words - 64 bytes */
+#define E1000_VFMAILBOX_SIZE   16 /* 16 32 bit words - 64 bytes */
 
 /* If it's a E1000_VF_* msg then it originates in the VF and is sent to the
  * PF.  The reverse is true if it is E1000_PF_*.
  * Message ACK's are the value or'd with 0xF0000000
  */
-#define E1000_VT_MSGTYPE_ACK      0x80000000  /* Messages below or'd with
-                                               * this are the ACK */
-#define E1000_VT_MSGTYPE_NACK     0x40000000  /* Messages below or'd with
-                                               * this are the NACK */
-#define E1000_VT_MSGTYPE_CTS      0x20000000  /* Indicates that VF is still
-                                                 clear to send requests */
+/* Messages below or'd with this are the ACK */
+#define E1000_VT_MSGTYPE_ACK   0x80000000
+/* Messages below or'd with this are the NACK */
+#define E1000_VT_MSGTYPE_NACK  0x40000000
+/* Indicates that VF is still clear to send requests */
+#define E1000_VT_MSGTYPE_CTS   0x20000000
 
 /* We have a total wait time of 1s for vf mailbox posted messages */
-#define E1000_VF_MBX_INIT_TIMEOUT 2000 /* retry count for mailbox timeout */
-#define E1000_VF_MBX_INIT_DELAY   500  /* usec delay between retries */
+#define E1000_VF_MBX_INIT_TIMEOUT      2000 /* retry count for mbx timeout */
+#define E1000_VF_MBX_INIT_DELAY                500  /* usec delay between retries */
 
-#define E1000_VT_MSGINFO_SHIFT    16
+#define E1000_VT_MSGINFO_SHIFT 16
 /* bits 23:16 are used for exra info for certain messages */
-#define E1000_VT_MSGINFO_MASK     (0xFF << E1000_VT_MSGINFO_SHIFT)
+#define E1000_VT_MSGINFO_MASK  (0xFF << E1000_VT_MSGINFO_SHIFT)
 
-#define E1000_VF_RESET            0x01 /* VF requests reset */
-#define E1000_VF_SET_MAC_ADDR     0x02 /* VF requests PF to set MAC addr */
-#define E1000_VF_SET_MULTICAST    0x03 /* VF requests PF to set MC addr */
-#define E1000_VF_SET_VLAN         0x04 /* VF requests PF to set VLAN */
-#define E1000_VF_SET_LPE          0x05 /* VF requests PF to set VMOLR.LPE */
+#define E1000_VF_RESET         0x01 /* VF requests reset */
+#define E1000_VF_SET_MAC_ADDR  0x02 /* VF requests PF to set MAC addr */
+#define E1000_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */
+#define E1000_VF_SET_VLAN      0x04 /* VF requests PF to set VLAN */
+#define E1000_VF_SET_LPE       0x05 /* VF requests PF to set VMOLR.LPE */
 
-#define E1000_PF_CONTROL_MSG      0x0100 /* PF control message */
+#define E1000_PF_CONTROL_MSG   0x0100 /* PF control message */
 
 void e1000_init_mbx_ops_generic(struct e1000_hw *hw);
 s32 e1000_init_mbx_params_vf(struct e1000_hw *);
index ebf9d4a42fdde33a2cd977fd6892f96ffec9b51b..95af14e139d769254e8b3e20982b37a5888ebbba 100644 (file)
@@ -13,8 +13,7 @@
   more details.
 
   You should have received a copy of the GNU General Public License along with
-  this program; if not, write to the Free Software Foundation, Inc.,
-  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+  this program; if not, see <http://www.gnu.org/licenses/>.
 
   The full GNU General Public License is included in this distribution in
   the file called "COPYING".
@@ -66,26 +65,27 @@ static void igbvf_set_interrupt_capability(struct igbvf_adapter *);
 static void igbvf_reset_interrupt_capability(struct igbvf_adapter *);
 
 static struct igbvf_info igbvf_vf_info = {
-       .mac                    = e1000_vfadapt,
-       .flags                  = 0,
-       .pba                    = 10,
-       .init_ops               = e1000_init_function_pointers_vf,
+       .mac            = e1000_vfadapt,
+       .flags          = 0,
+       .pba            = 10,
+       .init_ops       = e1000_init_function_pointers_vf,
 };
 
 static struct igbvf_info igbvf_i350_vf_info = {
-       .mac                    = e1000_vfadapt_i350,
-       .flags                  = 0,
-       .pba                    = 10,
-       .init_ops               = e1000_init_function_pointers_vf,
+       .mac            = e1000_vfadapt_i350,
+       .flags          = 0,
+       .pba            = 10,
+       .init_ops       = e1000_init_function_pointers_vf,
 };
 
 static const struct igbvf_info *igbvf_info_tbl[] = {
-       [board_vf]              = &igbvf_vf_info,
-       [board_i350_vf]         = &igbvf_i350_vf_info,
+       [board_vf]      = &igbvf_vf_info,
+       [board_i350_vf] = &igbvf_i350_vf_info,
 };
 
 /**
  * igbvf_desc_unused - calculate if we have unused descriptors
+ * @rx_ring: address of receive ring structure
  **/
 static int igbvf_desc_unused(struct igbvf_ring *ring)
 {
@@ -103,9 +103,9 @@ static int igbvf_desc_unused(struct igbvf_ring *ring)
  * @skb: pointer to sk_buff to be indicated to stack
  **/
 static void igbvf_receive_skb(struct igbvf_adapter *adapter,
-                              struct net_device *netdev,
-                              struct sk_buff *skb,
-                              u32 status, u16 vlan)
+                             struct net_device *netdev,
+                             struct sk_buff *skb,
+                             u32 status, u16 vlan)
 {
        u16 vid;
 
@@ -123,7 +123,7 @@ static void igbvf_receive_skb(struct igbvf_adapter *adapter,
 }
 
 static inline void igbvf_rx_checksum_adv(struct igbvf_adapter *adapter,
-                                         u32 status_err, struct sk_buff *skb)
+                                        u32 status_err, struct sk_buff *skb)
 {
        skb_checksum_none_assert(skb);
 
@@ -153,7 +153,7 @@ static inline void igbvf_rx_checksum_adv(struct igbvf_adapter *adapter,
  * @cleaned_count: number of buffers to repopulate
  **/
 static void igbvf_alloc_rx_buffers(struct igbvf_ring *rx_ring,
-                                   int cleaned_count)
+                                  int cleaned_count)
 {
        struct igbvf_adapter *adapter = rx_ring->adapter;
        struct net_device *netdev = adapter->netdev;
@@ -188,8 +188,8 @@ static void igbvf_alloc_rx_buffers(struct igbvf_ring *rx_ring,
                        }
                        buffer_info->page_dma =
                                dma_map_page(&pdev->dev, buffer_info->page,
-                                            buffer_info->page_offset,
-                                            PAGE_SIZE / 2,
+                                            buffer_info->page_offset,
+                                            PAGE_SIZE / 2,
                                             DMA_FROM_DEVICE);
                        if (dma_mapping_error(&pdev->dev,
                                              buffer_info->page_dma)) {
@@ -209,7 +209,7 @@ static void igbvf_alloc_rx_buffers(struct igbvf_ring *rx_ring,
 
                        buffer_info->skb = skb;
                        buffer_info->dma = dma_map_single(&pdev->dev, skb->data,
-                                                         bufsz,
+                                                         bufsz,
                                                          DMA_FROM_DEVICE);
                        if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
                                dev_kfree_skb(buffer_info->skb);
@@ -219,14 +219,14 @@ static void igbvf_alloc_rx_buffers(struct igbvf_ring *rx_ring,
                        }
                }
                /* Refresh the desc even if buffer_addrs didn't change because
-                * each write-back erases this info. */
+                * each write-back erases this info.
+                */
                if (adapter->rx_ps_hdr_size) {
                        rx_desc->read.pkt_addr =
                             cpu_to_le64(buffer_info->page_dma);
                        rx_desc->read.hdr_addr = cpu_to_le64(buffer_info->dma);
                } else {
-                       rx_desc->read.pkt_addr =
-                            cpu_to_le64(buffer_info->dma);
+                       rx_desc->read.pkt_addr = cpu_to_le64(buffer_info->dma);
                        rx_desc->read.hdr_addr = 0;
                }
 
@@ -247,7 +247,8 @@ no_buffers:
                /* Force memory writes to complete before letting h/w
                 * know there are new descriptors to fetch.  (Only
                 * applicable for weak-ordered memory model archs,
-                * such as IA-64). */
+                * such as IA-64).
+               */
                wmb();
                writel(i, adapter->hw.hw_addr + rx_ring->tail);
        }
@@ -261,7 +262,7 @@ no_buffers:
  * is no guarantee that everything was cleaned
  **/
 static bool igbvf_clean_rx_irq(struct igbvf_adapter *adapter,
-                               int *work_done, int work_to_do)
+                              int *work_done, int work_to_do)
 {
        struct igbvf_ring *rx_ring = adapter->rx_ring;
        struct net_device *netdev = adapter->netdev;
@@ -292,8 +293,9 @@ static bool igbvf_clean_rx_irq(struct igbvf_adapter *adapter,
                 * that case, it fills the header buffer and spills the rest
                 * into the page.
                 */
-               hlen = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.hdr_info) &
-                 E1000_RXDADV_HDRBUFLEN_MASK) >> E1000_RXDADV_HDRBUFLEN_SHIFT;
+               hlen = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.hdr_info)
+                      & E1000_RXDADV_HDRBUFLEN_MASK) >>
+                      E1000_RXDADV_HDRBUFLEN_SHIFT;
                if (hlen > adapter->rx_ps_hdr_size)
                        hlen = adapter->rx_ps_hdr_size;
 
@@ -306,7 +308,7 @@ static bool igbvf_clean_rx_irq(struct igbvf_adapter *adapter,
                buffer_info->skb = NULL;
                if (!adapter->rx_ps_hdr_size) {
                        dma_unmap_single(&pdev->dev, buffer_info->dma,
-                                        adapter->rx_buffer_len,
+                                        adapter->rx_buffer_len,
                                         DMA_FROM_DEVICE);
                        buffer_info->dma = 0;
                        skb_put(skb, length);
@@ -315,21 +317,21 @@ static bool igbvf_clean_rx_irq(struct igbvf_adapter *adapter,
 
                if (!skb_shinfo(skb)->nr_frags) {
                        dma_unmap_single(&pdev->dev, buffer_info->dma,
-                                        adapter->rx_ps_hdr_size,
+                                        adapter->rx_ps_hdr_size,
                                         DMA_FROM_DEVICE);
                        skb_put(skb, hlen);
                }
 
                if (length) {
                        dma_unmap_page(&pdev->dev, buffer_info->page_dma,
-                                      PAGE_SIZE / 2,
+                                      PAGE_SIZE / 2,
                                       DMA_FROM_DEVICE);
                        buffer_info->page_dma = 0;
 
                        skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
-                                          buffer_info->page,
-                                          buffer_info->page_offset,
-                                          length);
+                                          buffer_info->page,
+                                          buffer_info->page_offset,
+                                          length);
 
                        if ((adapter->rx_buffer_len > (PAGE_SIZE / 2)) ||
                            (page_count(buffer_info->page) != 1))
@@ -370,7 +372,7 @@ send_up:
                skb->protocol = eth_type_trans(skb, netdev);
 
                igbvf_receive_skb(adapter, netdev, skb, staterr,
-                                 rx_desc->wb.upper.vlan);
+                                 rx_desc->wb.upper.vlan);
 
 next_desc:
                rx_desc->wb.upper.status_error = 0;
@@ -402,7 +404,7 @@ next_desc:
 }
 
 static void igbvf_put_txbuf(struct igbvf_adapter *adapter,
-                            struct igbvf_buffer *buffer_info)
+                           struct igbvf_buffer *buffer_info)
 {
        if (buffer_info->dma) {
                if (buffer_info->mapped_as_page)
@@ -431,7 +433,7 @@ static void igbvf_put_txbuf(struct igbvf_adapter *adapter,
  * Return 0 on success, negative on failure
  **/
 int igbvf_setup_tx_resources(struct igbvf_adapter *adapter,
-                             struct igbvf_ring *tx_ring)
+                            struct igbvf_ring *tx_ring)
 {
        struct pci_dev *pdev = adapter->pdev;
        int size;
@@ -458,7 +460,7 @@ int igbvf_setup_tx_resources(struct igbvf_adapter *adapter,
 err:
        vfree(tx_ring->buffer_info);
        dev_err(&adapter->pdev->dev,
-               "Unable to allocate memory for the transmit descriptor ring\n");
+               "Unable to allocate memory for the transmit descriptor ring\n");
        return -ENOMEM;
 }
 
@@ -501,7 +503,7 @@ err:
        vfree(rx_ring->buffer_info);
        rx_ring->buffer_info = NULL;
        dev_err(&adapter->pdev->dev,
-               "Unable to allocate memory for the receive descriptor ring\n");
+               "Unable to allocate memory for the receive descriptor ring\n");
        return -ENOMEM;
 }
 
@@ -578,13 +580,13 @@ static void igbvf_clean_rx_ring(struct igbvf_ring *rx_ring)
        for (i = 0; i < rx_ring->count; i++) {
                buffer_info = &rx_ring->buffer_info[i];
                if (buffer_info->dma) {
-                       if (adapter->rx_ps_hdr_size){
+                       if (adapter->rx_ps_hdr_size) {
                                dma_unmap_single(&pdev->dev, buffer_info->dma,
-                                                adapter->rx_ps_hdr_size,
+                                                adapter->rx_ps_hdr_size,
                                                 DMA_FROM_DEVICE);
                        } else {
                                dma_unmap_single(&pdev->dev, buffer_info->dma,
-                                                adapter->rx_buffer_len,
+                                                adapter->rx_buffer_len,
                                                 DMA_FROM_DEVICE);
                        }
                        buffer_info->dma = 0;
@@ -599,7 +601,7 @@ static void igbvf_clean_rx_ring(struct igbvf_ring *rx_ring)
                        if (buffer_info->page_dma)
                                dma_unmap_page(&pdev->dev,
                                               buffer_info->page_dma,
-                                              PAGE_SIZE / 2,
+                                              PAGE_SIZE / 2,
                                               DMA_FROM_DEVICE);
                        put_page(buffer_info->page);
                        buffer_info->page = NULL;
@@ -638,7 +640,7 @@ void igbvf_free_rx_resources(struct igbvf_ring *rx_ring)
        rx_ring->buffer_info = NULL;
 
        dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
-                         rx_ring->dma);
+                         rx_ring->dma);
        rx_ring->desc = NULL;
 }
 
@@ -649,13 +651,12 @@ void igbvf_free_rx_resources(struct igbvf_ring *rx_ring)
  * @packets: the number of packets during this measurement interval
  * @bytes: the number of bytes during this measurement interval
  *
- *      Stores a new ITR value based on packets and byte
- *      counts during the last interrupt.  The advantage of per interrupt
- *      computation is faster updates and more accurate ITR for the current
- *      traffic pattern.  Constants in this function were computed
- *      based on theoretical maximum wire speed and thresholds were set based
- *      on testing data as well as attempting to minimize response time
- *      while increasing bulk throughput.
+ * Stores a new ITR value based on packets and byte counts during the last
+ * interrupt.  The advantage of per interrupt computation is faster updates
+ * and more accurate ITR for the current traffic pattern.  Constants in this
+ * function were computed based on theoretical maximum wire speed and thresholds
+ * were set based on testing data as well as attempting to minimize response
+ * time while increasing bulk throughput.
  **/
 static enum latency_range igbvf_update_itr(struct igbvf_adapter *adapter,
                                           enum latency_range itr_setting,
@@ -744,17 +745,15 @@ static void igbvf_set_itr(struct igbvf_adapter *adapter)
 
        new_itr = igbvf_range_to_itr(adapter->tx_ring->itr_range);
 
-
        if (new_itr != adapter->tx_ring->itr_val) {
                u32 current_itr = adapter->tx_ring->itr_val;
-               /*
-                * this attempts to bias the interrupt rate towards Bulk
+               /* this attempts to bias the interrupt rate towards Bulk
                 * by adding intermediate steps when interrupt rate is
                 * increasing
                 */
                new_itr = new_itr > current_itr ?
-                            min(current_itr + (new_itr >> 2), new_itr) :
-                            new_itr;
+                         min(current_itr + (new_itr >> 2), new_itr) :
+                         new_itr;
                adapter->tx_ring->itr_val = new_itr;
 
                adapter->tx_ring->set_itr = 1;
@@ -772,9 +771,10 @@ static void igbvf_set_itr(struct igbvf_adapter *adapter)
 
        if (new_itr != adapter->rx_ring->itr_val) {
                u32 current_itr = adapter->rx_ring->itr_val;
+
                new_itr = new_itr > current_itr ?
-                            min(current_itr + (new_itr >> 2), new_itr) :
-                            new_itr;
+                         min(current_itr + (new_itr >> 2), new_itr) :
+                         new_itr;
                adapter->rx_ring->itr_val = new_itr;
 
                adapter->rx_ring->set_itr = 1;
@@ -829,7 +829,7 @@ static bool igbvf_clean_tx_irq(struct igbvf_ring *tx_ring)
                                segs = skb_shinfo(skb)->gso_segs ?: 1;
                                /* multiply data chunks by size of headers */
                                bytecount = ((segs - 1) * skb_headlen(skb)) +
-                                           skb->len;
+                                           skb->len;
                                total_packets += segs;
                                total_bytes += bytecount;
                        }
@@ -849,9 +849,8 @@ static bool igbvf_clean_tx_irq(struct igbvf_ring *tx_ring)
 
        tx_ring->next_to_clean = i;
 
-       if (unlikely(count &&
-                    netif_carrier_ok(netdev) &&
-                    igbvf_desc_unused(tx_ring) >= IGBVF_TX_QUEUE_WAKE)) {
+       if (unlikely(count && netif_carrier_ok(netdev) &&
+           igbvf_desc_unused(tx_ring) >= IGBVF_TX_QUEUE_WAKE)) {
                /* Make sure that anybody stopping the queue after this
                 * sees the new next_to_clean.
                 */
@@ -902,8 +901,9 @@ static irqreturn_t igbvf_intr_msix_tx(int irq, void *data)
        adapter->total_tx_bytes = 0;
        adapter->total_tx_packets = 0;
 
-       /* auto mask will automatically reenable the interrupt when we write
-        * EICS */
+       /* auto mask will automatically re-enable the interrupt when we write
+        * EICS
+        */
        if (!igbvf_clean_tx_irq(tx_ring))
                /* Ring was not completely cleaned, so fire another interrupt */
                ew32(EICS, tx_ring->eims_value);
@@ -941,15 +941,16 @@ static irqreturn_t igbvf_intr_msix_rx(int irq, void *data)
 #define IGBVF_NO_QUEUE -1
 
 static void igbvf_assign_vector(struct igbvf_adapter *adapter, int rx_queue,
-                                int tx_queue, int msix_vector)
+                               int tx_queue, int msix_vector)
 {
        struct e1000_hw *hw = &adapter->hw;
        u32 ivar, index;
 
        /* 82576 uses a table-based method for assigning vectors.
-          Each queue has a single entry in the table to which we write
-          a vector number along with a "valid" bit.  Sadly, the layout
-          of the table is somewhat counterintuitive. */
+        * Each queue has a single entry in the table to which we write
+        * a vector number along with a "valid" bit.  Sadly, the layout
+        * of the table is somewhat counterintuitive.
+        */
        if (rx_queue > IGBVF_NO_QUEUE) {
                index = (rx_queue >> 1);
                ivar = array_er32(IVAR0, index);
@@ -984,6 +985,7 @@ static void igbvf_assign_vector(struct igbvf_adapter *adapter, int rx_queue,
 
 /**
  * igbvf_configure_msix - Configure MSI-X hardware
+ * @adapter: board private structure
  *
  * igbvf_configure_msix sets up the hardware to properly
  * generate MSI-X interrupts.
@@ -1027,6 +1029,7 @@ static void igbvf_reset_interrupt_capability(struct igbvf_adapter *adapter)
 
 /**
  * igbvf_set_interrupt_capability - set MSI or MSI-X if supported
+ * @adapter: board private structure
  *
  * Attempt to configure interrupts using the best available
  * capabilities of the hardware and kernel.
@@ -1036,27 +1039,28 @@ static void igbvf_set_interrupt_capability(struct igbvf_adapter *adapter)
        int err = -ENOMEM;
        int i;
 
-       /* we allocate 3 vectors, 1 for tx, 1 for rx, one for pf messages */
+       /* we allocate 3 vectors, 1 for Tx, 1 for Rx, one for PF messages */
        adapter->msix_entries = kcalloc(3, sizeof(struct msix_entry),
-                                       GFP_KERNEL);
+                                       GFP_KERNEL);
        if (adapter->msix_entries) {
                for (i = 0; i < 3; i++)
                        adapter->msix_entries[i].entry = i;
 
                err = pci_enable_msix_range(adapter->pdev,
-                                           adapter->msix_entries, 3, 3);
+                                           adapter->msix_entries, 3, 3);
        }
 
        if (err < 0) {
                /* MSI-X failed */
                dev_err(&adapter->pdev->dev,
-                       "Failed to initialize MSI-X interrupts.\n");
+                       "Failed to initialize MSI-X interrupts.\n");
                igbvf_reset_interrupt_capability(adapter);
        }
 }
 
 /**
  * igbvf_request_msix - Initialize MSI-X interrupts
+ * @adapter: board private structure
  *
  * igbvf_request_msix allocates MSI-X vectors and requests interrupts from the
  * kernel.
@@ -1075,8 +1079,8 @@ static int igbvf_request_msix(struct igbvf_adapter *adapter)
        }
 
        err = request_irq(adapter->msix_entries[vector].vector,
-                         igbvf_intr_msix_tx, 0, adapter->tx_ring->name,
-                         netdev);
+                         igbvf_intr_msix_tx, 0, adapter->tx_ring->name,
+                         netdev);
        if (err)
                goto out;
 
@@ -1085,8 +1089,8 @@ static int igbvf_request_msix(struct igbvf_adapter *adapter)
        vector++;
 
        err = request_irq(adapter->msix_entries[vector].vector,
-                         igbvf_intr_msix_rx, 0, adapter->rx_ring->name,
-                         netdev);
+                         igbvf_intr_msix_rx, 0, adapter->rx_ring->name,
+                         netdev);
        if (err)
                goto out;
 
@@ -1095,7 +1099,7 @@ static int igbvf_request_msix(struct igbvf_adapter *adapter)
        vector++;
 
        err = request_irq(adapter->msix_entries[vector].vector,
-                         igbvf_msix_other, 0, netdev->name, netdev);
+                         igbvf_msix_other, 0, netdev->name, netdev);
        if (err)
                goto out;
 
@@ -1130,6 +1134,7 @@ static int igbvf_alloc_queues(struct igbvf_adapter *adapter)
 
 /**
  * igbvf_request_irq - initialize interrupts
+ * @adapter: board private structure
  *
  * Attempts to configure interrupts using the best available
  * capabilities of the hardware and kernel.
@@ -1146,7 +1151,7 @@ static int igbvf_request_irq(struct igbvf_adapter *adapter)
                return err;
 
        dev_err(&adapter->pdev->dev,
-               "Unable to allocate interrupt, Error: %d\n", err);
+               "Unable to allocate interrupt, Error: %d\n", err);
 
        return err;
 }
@@ -1164,6 +1169,7 @@ static void igbvf_free_irq(struct igbvf_adapter *adapter)
 
 /**
  * igbvf_irq_disable - Mask off interrupt generation on the NIC
+ * @adapter: board private structure
  **/
 static void igbvf_irq_disable(struct igbvf_adapter *adapter)
 {
@@ -1177,6 +1183,7 @@ static void igbvf_irq_disable(struct igbvf_adapter *adapter)
 
 /**
  * igbvf_irq_enable - Enable default interrupt generation settings
+ * @adapter: board private structure
  **/
 static void igbvf_irq_enable(struct igbvf_adapter *adapter)
 {
@@ -1252,7 +1259,7 @@ static int igbvf_vlan_rx_kill_vid(struct net_device *netdev,
 
        if (hw->mac.ops.set_vfta(hw, vid, false)) {
                dev_err(&adapter->pdev->dev,
-                       "Failed to remove vlan id %d\n", vid);
+                       "Failed to remove vlan id %d\n", vid);
                return -EINVAL;
        }
        clear_bit(vid, adapter->active_vlans);
@@ -1298,7 +1305,7 @@ static void igbvf_configure_tx(struct igbvf_adapter *adapter)
 
        /* Turn off Relaxed Ordering on head write-backs.  The writebacks
         * MUST be delivered in order or it will completely screw up
-        * our bookeeping.
+        * our bookkeeping.
         */
        dca_txctrl = er32(DCA_TXCTRL(0));
        dca_txctrl &= ~E1000_DCA_TXCTRL_TX_WB_RO_EN;
@@ -1325,15 +1332,15 @@ static void igbvf_setup_srrctl(struct igbvf_adapter *adapter)
        u32 srrctl = 0;
 
        srrctl &= ~(E1000_SRRCTL_DESCTYPE_MASK |
-                   E1000_SRRCTL_BSIZEHDR_MASK |
-                   E1000_SRRCTL_BSIZEPKT_MASK);
+                   E1000_SRRCTL_BSIZEHDR_MASK |
+                   E1000_SRRCTL_BSIZEPKT_MASK);
 
        /* Enable queue drop to avoid head of line blocking */
        srrctl |= E1000_SRRCTL_DROP_EN;
 
        /* Setup buffer sizes */
        srrctl |= ALIGN(adapter->rx_buffer_len, 1024) >>
-                 E1000_SRRCTL_BSIZEPKT_SHIFT;
+                 E1000_SRRCTL_BSIZEPKT_SHIFT;
 
        if (adapter->rx_buffer_len < 2048) {
                adapter->rx_ps_hdr_size = 0;
@@ -1341,7 +1348,7 @@ static void igbvf_setup_srrctl(struct igbvf_adapter *adapter)
        } else {
                adapter->rx_ps_hdr_size = 128;
                srrctl |= adapter->rx_ps_hdr_size <<
-                         E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
+                         E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
                srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
        }
 
@@ -1369,8 +1376,7 @@ static void igbvf_configure_rx(struct igbvf_adapter *adapter)
 
        rdlen = rx_ring->count * sizeof(union e1000_adv_rx_desc);
 
-       /*
-        * Setup the HW Rx Head and Tail Descriptor Pointers and
+       /* Setup the HW Rx Head and Tail Descriptor Pointers and
         * the Base and Length of the Rx Descriptor Ring
         */
        rdba = rx_ring->dma;
@@ -1441,10 +1447,11 @@ static void igbvf_configure(struct igbvf_adapter *adapter)
        igbvf_setup_srrctl(adapter);
        igbvf_configure_rx(adapter);
        igbvf_alloc_rx_buffers(adapter->rx_ring,
-                              igbvf_desc_unused(adapter->rx_ring));
+                              igbvf_desc_unused(adapter->rx_ring));
 }
 
 /* igbvf_reset - bring the hardware into a known good state
+ * @adapter: private board structure
  *
  * This function boots the hardware and enables some settings that
  * require a configuration cycle of the hardware - those cannot be
@@ -1494,7 +1501,6 @@ int igbvf_up(struct igbvf_adapter *adapter)
        hw->mac.get_link_status = 1;
        mod_timer(&adapter->watchdog_timer, jiffies + 1);
 
-
        return 0;
 }
 
@@ -1504,8 +1510,7 @@ void igbvf_down(struct igbvf_adapter *adapter)
        struct e1000_hw *hw = &adapter->hw;
        u32 rxdctl, txdctl;
 
-       /*
-        * signal that we're down so the interrupt handler does not
+       /* signal that we're down so the interrupt handler does not
         * reschedule our watchdog timer
         */
        set_bit(__IGBVF_DOWN, &adapter->state);
@@ -1514,6 +1519,7 @@ void igbvf_down(struct igbvf_adapter *adapter)
        rxdctl = er32(RXDCTL(0));
        ew32(RXDCTL(0), rxdctl & ~E1000_RXDCTL_QUEUE_ENABLE);
 
+       netif_carrier_off(netdev);
        netif_stop_queue(netdev);
 
        /* disable transmits in the hardware */
@@ -1530,8 +1536,6 @@ void igbvf_down(struct igbvf_adapter *adapter)
 
        del_timer_sync(&adapter->watchdog_timer);
 
-       netif_carrier_off(netdev);
-
        /* record the stats before reset*/
        igbvf_update_stats(adapter);
 
@@ -1547,7 +1551,7 @@ void igbvf_reinit_locked(struct igbvf_adapter *adapter)
 {
        might_sleep();
        while (test_and_set_bit(__IGBVF_RESETTING, &adapter->state))
-               msleep(1);
+               usleep_range(1000, 2000);
        igbvf_down(adapter);
        igbvf_up(adapter);
        clear_bit(__IGBVF_RESETTING, &adapter->state);
@@ -1662,8 +1666,7 @@ static int igbvf_open(struct net_device *netdev)
        if (err)
                goto err_setup_rx;
 
-       /*
-        * before we allocate an interrupt, we must be ready to handle it.
+       /* before we allocate an interrupt, we must be ready to handle it.
         * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
         * as soon as we call pci_request_irq, so we have to setup our
         * clean_rx handler before we do so.
@@ -1725,6 +1728,7 @@ static int igbvf_close(struct net_device *netdev)
 
        return 0;
 }
+
 /**
  * igbvf_set_mac - Change the Ethernet Address of the NIC
  * @netdev: network interface device structure
@@ -1753,15 +1757,15 @@ static int igbvf_set_mac(struct net_device *netdev, void *p)
        return 0;
 }
 
-#define UPDATE_VF_COUNTER(reg, name)                                    \
-       {                                                               \
-               u32 current_counter = er32(reg);                        \
-               if (current_counter < adapter->stats.last_##name)       \
-                       adapter->stats.name += 0x100000000LL;           \
-               adapter->stats.last_##name = current_counter;           \
-               adapter->stats.name &= 0xFFFFFFFF00000000LL;            \
-               adapter->stats.name |= current_counter;                 \
-       }
+#define UPDATE_VF_COUNTER(reg, name) \
+{ \
+       u32 current_counter = er32(reg); \
+       if (current_counter < adapter->stats.last_##name) \
+               adapter->stats.name += 0x100000000LL; \
+       adapter->stats.last_##name = current_counter; \
+       adapter->stats.name &= 0xFFFFFFFF00000000LL; \
+       adapter->stats.name |= current_counter; \
+}
 
 /**
  * igbvf_update_stats - Update the board statistics counters
@@ -1772,8 +1776,7 @@ void igbvf_update_stats(struct igbvf_adapter *adapter)
        struct e1000_hw *hw = &adapter->hw;
        struct pci_dev *pdev = adapter->pdev;
 
-       /*
-        * Prevent stats update while adapter is being reset, link is down
+       /* Prevent stats update while adapter is being reset, link is down
         * or if the pci connection is down.
         */
        if (adapter->link_speed == 0)
@@ -1832,7 +1835,7 @@ static bool igbvf_has_link(struct igbvf_adapter *adapter)
  **/
 static void igbvf_watchdog(unsigned long data)
 {
-       struct igbvf_adapter *adapter = (struct igbvf_adapter *) data;
+       struct igbvf_adapter *adapter = (struct igbvf_adapter *)data;
 
        /* Do the rest outside of interrupt context */
        schedule_work(&adapter->watchdog_task);
@@ -1841,8 +1844,8 @@ static void igbvf_watchdog(unsigned long data)
 static void igbvf_watchdog_task(struct work_struct *work)
 {
        struct igbvf_adapter *adapter = container_of(work,
-                                                    struct igbvf_adapter,
-                                                    watchdog_task);
+                                                    struct igbvf_adapter,
+                                                    watchdog_task);
        struct net_device *netdev = adapter->netdev;
        struct e1000_mac_info *mac = &adapter->hw.mac;
        struct igbvf_ring *tx_ring = adapter->tx_ring;
@@ -1855,8 +1858,8 @@ static void igbvf_watchdog_task(struct work_struct *work)
        if (link) {
                if (!netif_carrier_ok(netdev)) {
                        mac->ops.get_link_up_info(&adapter->hw,
-                                                 &adapter->link_speed,
-                                                 &adapter->link_duplex);
+                                                 &adapter->link_speed,
+                                                 &adapter->link_duplex);
                        igbvf_print_link_info(adapter);
 
                        netif_carrier_on(netdev);
@@ -1876,10 +1879,9 @@ static void igbvf_watchdog_task(struct work_struct *work)
                igbvf_update_stats(adapter);
        } else {
                tx_pending = (igbvf_desc_unused(tx_ring) + 1 <
-                             tx_ring->count);
+                             tx_ring->count);
                if (tx_pending) {
-                       /*
-                        * We've lost link, so the controller stops DMA,
+                       /* We've lost link, so the controller stops DMA,
                         * but we've got queued Tx work that's never going
                         * to get done, so reset controller to flush Tx.
                         * (Do the reset outside of interrupt context).
@@ -1898,15 +1900,15 @@ static void igbvf_watchdog_task(struct work_struct *work)
                          round_jiffies(jiffies + (2 * HZ)));
 }
 
-#define IGBVF_TX_FLAGS_CSUM             0x00000001
-#define IGBVF_TX_FLAGS_VLAN             0x00000002
-#define IGBVF_TX_FLAGS_TSO              0x00000004
-#define IGBVF_TX_FLAGS_IPV4             0x00000008
-#define IGBVF_TX_FLAGS_VLAN_MASK        0xffff0000
-#define IGBVF_TX_FLAGS_VLAN_SHIFT       16
+#define IGBVF_TX_FLAGS_CSUM            0x00000001
+#define IGBVF_TX_FLAGS_VLAN            0x00000002
+#define IGBVF_TX_FLAGS_TSO             0x00000004
+#define IGBVF_TX_FLAGS_IPV4            0x00000008
+#define IGBVF_TX_FLAGS_VLAN_MASK       0xffff0000
+#define IGBVF_TX_FLAGS_VLAN_SHIFT      16
 
 static int igbvf_tso(struct igbvf_adapter *adapter,
-                     struct igbvf_ring *tx_ring,
+                    struct igbvf_ring *tx_ring,
                     struct sk_buff *skb, u32 tx_flags, u8 *hdr_len,
                     __be16 protocol)
 {
@@ -1930,17 +1932,18 @@ static int igbvf_tso(struct igbvf_adapter *adapter,
 
        if (protocol == htons(ETH_P_IP)) {
                struct iphdr *iph = ip_hdr(skb);
+
                iph->tot_len = 0;
                iph->check = 0;
                tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
-                                                        iph->daddr, 0,
-                                                        IPPROTO_TCP,
-                                                        0);
+                                                        iph->daddr, 0,
+                                                        IPPROTO_TCP,
+                                                        0);
        } else if (skb_is_gso_v6(skb)) {
                ipv6_hdr(skb)->payload_len = 0;
                tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
-                                                      &ipv6_hdr(skb)->daddr,
-                                                      0, IPPROTO_TCP, 0);
+                                                      &ipv6_hdr(skb)->daddr,
+                                                      0, IPPROTO_TCP, 0);
        }
 
        i = tx_ring->next_to_use;
@@ -1984,7 +1987,7 @@ static int igbvf_tso(struct igbvf_adapter *adapter,
 }
 
 static inline bool igbvf_tx_csum(struct igbvf_adapter *adapter,
-                                 struct igbvf_ring *tx_ring,
+                                struct igbvf_ring *tx_ring,
                                 struct sk_buff *skb, u32 tx_flags,
                                 __be16 protocol)
 {
@@ -2005,8 +2008,7 @@ static inline bool igbvf_tx_csum(struct igbvf_adapter *adapter,
                info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT);
                if (skb->ip_summed == CHECKSUM_PARTIAL)
                        info |= (skb_transport_header(skb) -
-                                skb_network_header(skb));
-
+                                skb_network_header(skb));
 
                context_desc->vlan_macip_lens = cpu_to_le32(info);
 
@@ -2055,6 +2057,10 @@ static int igbvf_maybe_stop_tx(struct net_device *netdev, int size)
 
        netif_stop_queue(netdev);
 
+       /* Herbert's original patch had:
+        *  smp_mb__after_netif_stop_queue();
+        * but since that doesn't exist yet, just open code it.
+        */
        smp_mb();
 
        /* We need to check again just in case room has been made available */
@@ -2067,11 +2073,11 @@ static int igbvf_maybe_stop_tx(struct net_device *netdev, int size)
        return 0;
 }
 
-#define IGBVF_MAX_TXD_PWR       16
-#define IGBVF_MAX_DATA_PER_TXD  (1 << IGBVF_MAX_TXD_PWR)
+#define IGBVF_MAX_TXD_PWR      16
+#define IGBVF_MAX_DATA_PER_TXD (1 << IGBVF_MAX_TXD_PWR)
 
 static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter,
-                                   struct igbvf_ring *tx_ring,
+                                  struct igbvf_ring *tx_ring,
                                   struct sk_buff *skb)
 {
        struct igbvf_buffer *buffer_info;
@@ -2093,7 +2099,6 @@ static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter,
        if (dma_mapping_error(&pdev->dev, buffer_info->dma))
                goto dma_error;
 
-
        for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) {
                const struct skb_frag_struct *frag;
 
@@ -2111,7 +2116,7 @@ static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter,
                buffer_info->time_stamp = jiffies;
                buffer_info->mapped_as_page = true;
                buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag, 0, len,
-                                               DMA_TO_DEVICE);
+                                                   DMA_TO_DEVICE);
                if (dma_mapping_error(&pdev->dev, buffer_info->dma))
                        goto dma_error;
        }
@@ -2133,7 +2138,7 @@ dma_error:
 
        /* clear timestamp and dma mappings for remaining portion of packet */
        while (count--) {
-               if (i==0)
+               if (i == 0)
                        i += tx_ring->count;
                i--;
                buffer_info = &tx_ring->buffer_info[i];
@@ -2144,10 +2149,10 @@ dma_error:
 }
 
 static inline void igbvf_tx_queue_adv(struct igbvf_adapter *adapter,
-                                      struct igbvf_ring *tx_ring,
+                                     struct igbvf_ring *tx_ring,
                                      int tx_flags, int count,
                                      unsigned int first, u32 paylen,
-                                      u8 hdr_len)
+                                     u8 hdr_len)
 {
        union e1000_adv_tx_desc *tx_desc = NULL;
        struct igbvf_buffer *buffer_info;
@@ -2155,7 +2160,7 @@ static inline void igbvf_tx_queue_adv(struct igbvf_adapter *adapter,
        unsigned int i;
 
        cmd_type_len = (E1000_ADVTXD_DTYP_DATA | E1000_ADVTXD_DCMD_IFCS |
-                       E1000_ADVTXD_DCMD_DEXT);
+                       E1000_ADVTXD_DCMD_DEXT);
 
        if (tx_flags & IGBVF_TX_FLAGS_VLAN)
                cmd_type_len |= E1000_ADVTXD_DCMD_VLE;
@@ -2182,7 +2187,7 @@ static inline void igbvf_tx_queue_adv(struct igbvf_adapter *adapter,
                tx_desc = IGBVF_TX_DESC_ADV(*tx_ring, i);
                tx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma);
                tx_desc->read.cmd_type_len =
-                        cpu_to_le32(cmd_type_len | buffer_info->length);
+                        cpu_to_le32(cmd_type_len | buffer_info->length);
                tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
                i++;
                if (i == tx_ring->count)
@@ -2193,14 +2198,16 @@ static inline void igbvf_tx_queue_adv(struct igbvf_adapter *adapter,
        /* Force memory writes to complete before letting h/w
         * know there are new descriptors to fetch.  (Only
         * applicable for weak-ordered memory model archs,
-        * such as IA-64). */
+        * such as IA-64).
+        */
        wmb();
 
        tx_ring->buffer_info[first].next_to_watch = tx_desc;
        tx_ring->next_to_use = i;
        writel(i, adapter->hw.hw_addr + tx_ring->tail);
        /* we need this if more than one processor can write to our tail
-        * at a time, it syncronizes IO on IA64/Altix systems */
+        * at a time, it synchronizes IO on IA64/Altix systems
+        */
        mmiowb();
 }
 
@@ -2225,11 +2232,10 @@ static netdev_tx_t igbvf_xmit_frame_ring_adv(struct sk_buff *skb,
                return NETDEV_TX_OK;
        }
 
-       /*
-        * need: count + 4 desc gap to keep tail from touching
-         *       + 2 desc gap to keep tail from touching head,
-         *       + 1 desc for skb->data,
-         *       + 1 desc for context descriptor,
+       /* need: count + 4 desc gap to keep tail from touching
+        *       + 2 desc gap to keep tail from touching head,
+        *       + 1 desc for skb->data,
+        *       + 1 desc for context descriptor,
         * head, otherwise try next time
         */
        if (igbvf_maybe_stop_tx(netdev, skb_shinfo(skb)->nr_frags + 4)) {
@@ -2258,11 +2264,10 @@ static netdev_tx_t igbvf_xmit_frame_ring_adv(struct sk_buff *skb,
        if (tso)
                tx_flags |= IGBVF_TX_FLAGS_TSO;
        else if (igbvf_tx_csum(adapter, tx_ring, skb, tx_flags, protocol) &&
-                (skb->ip_summed == CHECKSUM_PARTIAL))
+                (skb->ip_summed == CHECKSUM_PARTIAL))
                tx_flags |= IGBVF_TX_FLAGS_CSUM;
 
-       /*
-        * count reflects descriptors mapped, if 0 then mapping error
+       /* count reflects descriptors mapped, if 0 then mapping error
         * has occurred and we need to rewind the descriptor queue
         */
        count = igbvf_tx_map_adv(adapter, tx_ring, skb);
@@ -2313,6 +2318,7 @@ static void igbvf_tx_timeout(struct net_device *netdev)
 static void igbvf_reset_task(struct work_struct *work)
 {
        struct igbvf_adapter *adapter;
+
        adapter = container_of(work, struct igbvf_adapter, reset_task);
 
        igbvf_reinit_locked(adapter);
@@ -2356,14 +2362,13 @@ static int igbvf_change_mtu(struct net_device *netdev, int new_mtu)
        }
 
        while (test_and_set_bit(__IGBVF_RESETTING, &adapter->state))
-               msleep(1);
+               usleep_range(1000, 2000);
        /* igbvf_down has a dependency on max_frame_size */
        adapter->max_frame_size = max_frame;
        if (netif_running(netdev))
                igbvf_down(adapter);
 
-       /*
-        * NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
+       /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
         * means we reserve 2 more, this pushes us to allocate from the next
         * larger slab size.
         * i.e. RXBUFFER_2048 --> size-4096 slab
@@ -2382,15 +2387,14 @@ static int igbvf_change_mtu(struct net_device *netdev, int new_mtu)
                adapter->rx_buffer_len = PAGE_SIZE / 2;
 #endif
 
-
        /* adjust allocation if LPE protects us, and we aren't using SBP */
        if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN) ||
-            (max_frame == ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN))
+           (max_frame == ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN))
                adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN +
-                                        ETH_FCS_LEN;
+                                        ETH_FCS_LEN;
 
        dev_info(&adapter->pdev->dev, "changing MTU from %d to %d\n",
-                netdev->mtu, new_mtu);
+                netdev->mtu, new_mtu);
        netdev->mtu = new_mtu;
 
        if (netif_running(netdev))
@@ -2477,8 +2481,7 @@ static void igbvf_shutdown(struct pci_dev *pdev)
 }
 
 #ifdef CONFIG_NET_POLL_CONTROLLER
-/*
- * Polling 'interrupt' - used by things like netconsole to send skbs
+/* Polling 'interrupt' - used by things like netconsole to send skbs
  * without having to re-enable interrupts. It's not called while
  * the interrupt routine is executing.
  */
@@ -2503,7 +2506,7 @@ static void igbvf_netpoll(struct net_device *netdev)
  * this device has been detected.
  */
 static pci_ers_result_t igbvf_io_error_detected(struct pci_dev *pdev,
-                                                pci_channel_state_t state)
+                                               pci_channel_state_t state)
 {
        struct net_device *netdev = pci_get_drvdata(pdev);
        struct igbvf_adapter *adapter = netdev_priv(netdev);
@@ -2583,7 +2586,7 @@ static void igbvf_print_device_info(struct igbvf_adapter *adapter)
 }
 
 static int igbvf_set_features(struct net_device *netdev,
-       netdev_features_t features)
+                             netdev_features_t features)
 {
        struct igbvf_adapter *adapter = netdev_priv(netdev);
 
@@ -2596,21 +2599,21 @@ static int igbvf_set_features(struct net_device *netdev,
 }
 
 static const struct net_device_ops igbvf_netdev_ops = {
-       .ndo_open                       = igbvf_open,
-       .ndo_stop                       = igbvf_close,
-       .ndo_start_xmit                 = igbvf_xmit_frame,
-       .ndo_get_stats                  = igbvf_get_stats,
-       .ndo_set_rx_mode                = igbvf_set_multi,
-       .ndo_set_mac_address            = igbvf_set_mac,
-       .ndo_change_mtu                 = igbvf_change_mtu,
-       .ndo_do_ioctl                   = igbvf_ioctl,
-       .ndo_tx_timeout                 = igbvf_tx_timeout,
-       .ndo_vlan_rx_add_vid            = igbvf_vlan_rx_add_vid,
-       .ndo_vlan_rx_kill_vid           = igbvf_vlan_rx_kill_vid,
+       .ndo_open               = igbvf_open,
+       .ndo_stop               = igbvf_close,
+       .ndo_start_xmit         = igbvf_xmit_frame,
+       .ndo_get_stats          = igbvf_get_stats,
+       .ndo_set_rx_mode        = igbvf_set_multi,
+       .ndo_set_mac_address    = igbvf_set_mac,
+       .ndo_change_mtu         = igbvf_change_mtu,
+       .ndo_do_ioctl           = igbvf_ioctl,
+       .ndo_tx_timeout         = igbvf_tx_timeout,
+       .ndo_vlan_rx_add_vid    = igbvf_vlan_rx_add_vid,
+       .ndo_vlan_rx_kill_vid   = igbvf_vlan_rx_kill_vid,
 #ifdef CONFIG_NET_POLL_CONTROLLER
-       .ndo_poll_controller            = igbvf_netpoll,
+       .ndo_poll_controller    = igbvf_netpoll,
 #endif
-       .ndo_set_features               = igbvf_set_features,
+       .ndo_set_features       = igbvf_set_features,
 };
 
 /**
@@ -2645,8 +2648,8 @@ static int igbvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        } else {
                err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
                if (err) {
-                       dev_err(&pdev->dev, "No usable DMA "
-                               "configuration, aborting\n");
+                       dev_err(&pdev->dev,
+                               "No usable DMA configuration, aborting\n");
                        goto err_dma;
                }
        }
@@ -2686,7 +2689,7 @@ static int igbvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 
        err = -EIO;
        adapter->hw.hw_addr = ioremap(pci_resource_start(pdev, 0),
-                                     pci_resource_len(pdev, 0));
+                                     pci_resource_len(pdev, 0));
 
        if (!adapter->hw.hw_addr)
                goto err_ioremap;
@@ -2712,16 +2715,16 @@ static int igbvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        adapter->bd_number = cards_found++;
 
        netdev->hw_features = NETIF_F_SG |
-                          NETIF_F_IP_CSUM |
+                          NETIF_F_IP_CSUM |
                           NETIF_F_IPV6_CSUM |
                           NETIF_F_TSO |
                           NETIF_F_TSO6 |
                           NETIF_F_RXCSUM;
 
        netdev->features = netdev->hw_features |
-                          NETIF_F_HW_VLAN_CTAG_TX |
-                          NETIF_F_HW_VLAN_CTAG_RX |
-                          NETIF_F_HW_VLAN_CTAG_FILTER;
+                          NETIF_F_HW_VLAN_CTAG_TX |
+                          NETIF_F_HW_VLAN_CTAG_RX |
+                          NETIF_F_HW_VLAN_CTAG_FILTER;
 
        if (pci_using_dac)
                netdev->features |= NETIF_F_HIGHDMA;
@@ -2742,7 +2745,8 @@ static int igbvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                if (err)
                        dev_info(&pdev->dev, "Error reading MAC address.\n");
                else if (is_zero_ether_addr(adapter->hw.mac.addr))
-                       dev_info(&pdev->dev, "MAC address not assigned by administrator.\n");
+                       dev_info(&pdev->dev,
+                                "MAC address not assigned by administrator.\n");
                memcpy(netdev->dev_addr, adapter->hw.mac.addr,
                       netdev->addr_len);
        }
@@ -2751,11 +2755,11 @@ static int igbvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                dev_info(&pdev->dev, "Assigning random MAC address.\n");
                eth_hw_addr_random(netdev);
                memcpy(adapter->hw.mac.addr, netdev->dev_addr,
-                       netdev->addr_len);
+                      netdev->addr_len);
        }
 
        setup_timer(&adapter->watchdog_timer, &igbvf_watchdog,
-                   (unsigned long) adapter);
+                   (unsigned long)adapter);
 
        INIT_WORK(&adapter->reset_task, igbvf_reset_task);
        INIT_WORK(&adapter->watchdog_task, igbvf_watchdog_task);
@@ -2818,8 +2822,7 @@ static void igbvf_remove(struct pci_dev *pdev)
        struct igbvf_adapter *adapter = netdev_priv(netdev);
        struct e1000_hw *hw = &adapter->hw;
 
-       /*
-        * The watchdog timer may be rescheduled, so explicitly
+       /* The watchdog timer may be rescheduled, so explicitly
         * disable it from being rescheduled.
         */
        set_bit(__IGBVF_DOWN, &adapter->state);
@@ -2832,9 +2835,8 @@ static void igbvf_remove(struct pci_dev *pdev)
 
        igbvf_reset_interrupt_capability(adapter);
 
-       /*
-        * it is important to delete the napi struct prior to freeing the
-        * rx ring so that you do not end up with null pointer refs
+       /* it is important to delete the NAPI struct prior to freeing the
+        * Rx ring so that you do not end up with null pointer refs
         */
        netif_napi_del(&adapter->rx_ring->napi);
        kfree(adapter->tx_ring);
@@ -2866,17 +2868,17 @@ MODULE_DEVICE_TABLE(pci, igbvf_pci_tbl);
 
 /* PCI Device API Driver */
 static struct pci_driver igbvf_driver = {
-       .name     = igbvf_driver_name,
-       .id_table = igbvf_pci_tbl,
-       .probe    = igbvf_probe,
-       .remove   = igbvf_remove,
+       .name           = igbvf_driver_name,
+       .id_table       = igbvf_pci_tbl,
+       .probe          = igbvf_probe,
+       .remove         = igbvf_remove,
 #ifdef CONFIG_PM
        /* Power Management Hooks */
-       .suspend  = igbvf_suspend,
-       .resume   = igbvf_resume,
+       .suspend        = igbvf_suspend,
+       .resume         = igbvf_resume,
 #endif
-       .shutdown = igbvf_shutdown,
-       .err_handler = &igbvf_err_handler
+       .shutdown       = igbvf_shutdown,
+       .err_handler    = &igbvf_err_handler
 };
 
 /**
@@ -2888,6 +2890,7 @@ static struct pci_driver igbvf_driver = {
 static int __init igbvf_init_module(void)
 {
        int ret;
+
        pr_info("%s - version %s\n", igbvf_driver_string, igbvf_driver_version);
        pr_info("%s\n", igbvf_copyright);
 
@@ -2909,7 +2912,6 @@ static void __exit igbvf_exit_module(void)
 }
 module_exit(igbvf_exit_module);
 
-
 MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
 MODULE_DESCRIPTION("Intel(R) Gigabit Virtual Function Network Driver");
 MODULE_LICENSE("GPL");
index 7dc6341715dcd948d01adbd7fc032c5576be04ee..86a7c120b5740555d8e904e7678351fadc96c833 100644 (file)
@@ -13,8 +13,7 @@
   more details.
 
   You should have received a copy of the GNU General Public License along with
-  this program; if not, write to the Free Software Foundation, Inc.,
-  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+  this program; if not, see <http://www.gnu.org/licenses/>.
 
   The full GNU General Public License is included in this distribution in
   the file called "COPYING".
 #ifndef _E1000_REGS_H_
 #define _E1000_REGS_H_
 
-#define E1000_CTRL      0x00000 /* Device Control - RW */
-#define E1000_STATUS    0x00008 /* Device Status - RO */
-#define E1000_ITR       0x000C4 /* Interrupt Throttling Rate - RW */
-#define E1000_EICR      0x01580 /* Ext. Interrupt Cause Read - R/clr */
-#define E1000_EITR(_n)  (0x01680 + (0x4 * (_n)))
-#define E1000_EICS      0x01520 /* Ext. Interrupt Cause Set - W0 */
-#define E1000_EIMS      0x01524 /* Ext. Interrupt Mask Set/Read - RW */
-#define E1000_EIMC      0x01528 /* Ext. Interrupt Mask Clear - WO */
-#define E1000_EIAC      0x0152C /* Ext. Interrupt Auto Clear - RW */
-#define E1000_EIAM      0x01530 /* Ext. Interrupt Ack Auto Clear Mask - RW */
-#define E1000_IVAR0     0x01700 /* Interrupt Vector Allocation (array) - RW */
-#define E1000_IVAR_MISC 0x01740 /* IVAR for "other" causes - RW */
-/*
- * Convenience macros
+#define E1000_CTRL     0x00000 /* Device Control - RW */
+#define E1000_STATUS   0x00008 /* Device Status - RO */
+#define E1000_ITR      0x000C4 /* Interrupt Throttling Rate - RW */
+#define E1000_EICR     0x01580 /* Ext. Interrupt Cause Read - R/clr */
+#define E1000_EITR(_n) (0x01680 + (0x4 * (_n)))
+#define E1000_EICS     0x01520 /* Ext. Interrupt Cause Set - W0 */
+#define E1000_EIMS     0x01524 /* Ext. Interrupt Mask Set/Read - RW */
+#define E1000_EIMC     0x01528 /* Ext. Interrupt Mask Clear - WO */
+#define E1000_EIAC     0x0152C /* Ext. Interrupt Auto Clear - RW */
+#define E1000_EIAM     0x01530 /* Ext. Interrupt Ack Auto Clear Mask - RW */
+#define E1000_IVAR0    0x01700 /* Interrupt Vector Allocation (array) - RW */
+#define E1000_IVAR_MISC        0x01740 /* IVAR for "other" causes - RW */
+
+/* Convenience macros
  *
  * Note: "_n" is the queue number of the register to be written to.
  *
  * Example usage:
  * E1000_RDBAL_REG(current_rx_queue)
  */
-#define E1000_RDBAL(_n)      ((_n) < 4 ? (0x02800 + ((_n) * 0x100)) : \
-                                         (0x0C000 + ((_n) * 0x40)))
-#define E1000_RDBAH(_n)      ((_n) < 4 ? (0x02804 + ((_n) * 0x100)) : \
-                                         (0x0C004 + ((_n) * 0x40)))
-#define E1000_RDLEN(_n)      ((_n) < 4 ? (0x02808 + ((_n) * 0x100)) : \
-                                         (0x0C008 + ((_n) * 0x40)))
-#define E1000_SRRCTL(_n)     ((_n) < 4 ? (0x0280C + ((_n) * 0x100)) : \
-                                         (0x0C00C + ((_n) * 0x40)))
-#define E1000_RDH(_n)        ((_n) < 4 ? (0x02810 + ((_n) * 0x100)) : \
-                                         (0x0C010 + ((_n) * 0x40)))
-#define E1000_RDT(_n)        ((_n) < 4 ? (0x02818 + ((_n) * 0x100)) : \
-                                         (0x0C018 + ((_n) * 0x40)))
-#define E1000_RXDCTL(_n)     ((_n) < 4 ? (0x02828 + ((_n) * 0x100)) : \
-                                         (0x0C028 + ((_n) * 0x40)))
-#define E1000_TDBAL(_n)      ((_n) < 4 ? (0x03800 + ((_n) * 0x100)) : \
-                                         (0x0E000 + ((_n) * 0x40)))
-#define E1000_TDBAH(_n)      ((_n) < 4 ? (0x03804 + ((_n) * 0x100)) : \
-                                         (0x0E004 + ((_n) * 0x40)))
-#define E1000_TDLEN(_n)      ((_n) < 4 ? (0x03808 + ((_n) * 0x100)) : \
-                                         (0x0E008 + ((_n) * 0x40)))
-#define E1000_TDH(_n)        ((_n) < 4 ? (0x03810 + ((_n) * 0x100)) : \
-                                         (0x0E010 + ((_n) * 0x40)))
-#define E1000_TDT(_n)        ((_n) < 4 ? (0x03818 + ((_n) * 0x100)) : \
-                                         (0x0E018 + ((_n) * 0x40)))
-#define E1000_TXDCTL(_n)     ((_n) < 4 ? (0x03828 + ((_n) * 0x100)) : \
-                                         (0x0E028 + ((_n) * 0x40)))
-#define E1000_DCA_TXCTRL(_n) (0x03814 + (_n << 8))
-#define E1000_DCA_RXCTRL(_n) (0x02814 + (_n << 8))
-#define E1000_RAL(_i)  (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \
-                                       (0x054E0 + ((_i - 16) * 8)))
-#define E1000_RAH(_i)  (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \
-                                       (0x054E4 + ((_i - 16) * 8)))
+#define E1000_RDBAL(_n)        ((_n) < 4 ? (0x02800 + ((_n) * 0x100)) : \
+                        (0x0C000 + ((_n) * 0x40)))
+#define E1000_RDBAH(_n)        ((_n) < 4 ? (0x02804 + ((_n) * 0x100)) : \
+                        (0x0C004 + ((_n) * 0x40)))
+#define E1000_RDLEN(_n)        ((_n) < 4 ? (0x02808 + ((_n) * 0x100)) : \
+                        (0x0C008 + ((_n) * 0x40)))
+#define E1000_SRRCTL(_n)       ((_n) < 4 ? (0x0280C + ((_n) * 0x100)) : \
+                                (0x0C00C + ((_n) * 0x40)))
+#define E1000_RDH(_n)  ((_n) < 4 ? (0x02810 + ((_n) * 0x100)) : \
+                        (0x0C010 + ((_n) * 0x40)))
+#define E1000_RDT(_n)  ((_n) < 4 ? (0x02818 + ((_n) * 0x100)) : \
+                        (0x0C018 + ((_n) * 0x40)))
+#define E1000_RXDCTL(_n)       ((_n) < 4 ? (0x02828 + ((_n) * 0x100)) : \
+                                (0x0C028 + ((_n) * 0x40)))
+#define E1000_TDBAL(_n)        ((_n) < 4 ? (0x03800 + ((_n) * 0x100)) : \
+                        (0x0E000 + ((_n) * 0x40)))
+#define E1000_TDBAH(_n)        ((_n) < 4 ? (0x03804 + ((_n) * 0x100)) : \
+                        (0x0E004 + ((_n) * 0x40)))
+#define E1000_TDLEN(_n)        ((_n) < 4 ? (0x03808 + ((_n) * 0x100)) : \
+                        (0x0E008 + ((_n) * 0x40)))
+#define E1000_TDH(_n)  ((_n) < 4 ? (0x03810 + ((_n) * 0x100)) : \
+                        (0x0E010 + ((_n) * 0x40)))
+#define E1000_TDT(_n)  ((_n) < 4 ? (0x03818 + ((_n) * 0x100)) : \
+                        (0x0E018 + ((_n) * 0x40)))
+#define E1000_TXDCTL(_n)       ((_n) < 4 ? (0x03828 + ((_n) * 0x100)) : \
+                                (0x0E028 + ((_n) * 0x40)))
+#define E1000_DCA_TXCTRL(_n)   (0x03814 + (_n << 8))
+#define E1000_DCA_RXCTRL(_n)   (0x02814 + (_n << 8))
+#define E1000_RAL(_i)  (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \
+                        (0x054E0 + ((_i - 16) * 8)))
+#define E1000_RAH(_i)  (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \
+                        (0x054E4 + ((_i - 16) * 8)))
 
 /* Statistics registers */
-#define E1000_VFGPRC    0x00F10
-#define E1000_VFGORC    0x00F18
-#define E1000_VFMPRC    0x00F3C
-#define E1000_VFGPTC    0x00F14
-#define E1000_VFGOTC    0x00F34
-#define E1000_VFGOTLBC  0x00F50
-#define E1000_VFGPTLBC  0x00F44
-#define E1000_VFGORLBC  0x00F48
-#define E1000_VFGPRLBC  0x00F40
+#define E1000_VFGPRC   0x00F10
+#define E1000_VFGORC   0x00F18
+#define E1000_VFMPRC   0x00F3C
+#define E1000_VFGPTC   0x00F14
+#define E1000_VFGOTC   0x00F34
+#define E1000_VFGOTLBC 0x00F50
+#define E1000_VFGPTLBC 0x00F44
+#define E1000_VFGORLBC 0x00F48
+#define E1000_VFGPRLBC 0x00F40
 
 /* These act per VF so an array friendly macro is used */
-#define E1000_V2PMAILBOX(_n)   (0x00C40 + (4 * (_n)))
-#define E1000_VMBMEM(_n)       (0x00800 + (64 * (_n)))
+#define E1000_V2PMAILBOX(_n)   (0x00C40 + (4 * (_n)))
+#define E1000_VMBMEM(_n)       (0x00800 + (64 * (_n)))
 
 /* Define macros for handling registers */
-#define er32(reg) readl(hw->hw_addr + E1000_##reg)
-#define ew32(reg, val) writel((val), hw->hw_addr +  E1000_##reg)
+#define er32(reg)      readl(hw->hw_addr + E1000_##reg)
+#define ew32(reg, val) writel((val), hw->hw_addr +  E1000_##reg)
 #define array_er32(reg, offset) \
        readl(hw->hw_addr + E1000_##reg + (offset << 2))
 #define array_ew32(reg, offset, val) \
        writel((val), hw->hw_addr +  E1000_##reg + (offset << 2))
-#define e1e_flush() er32(STATUS)
+#define e1e_flush()    er32(STATUS)
 
 #endif
index 955ad8c2c53456a2bb9ce28515a00d52264ed3ee..a13baa90ae20298842e3aa6b5877be0f4ad7fe25 100644 (file)
@@ -13,8 +13,7 @@
   more details.
 
   You should have received a copy of the GNU General Public License along with
-  this program; if not, write to the Free Software Foundation, Inc.,
-  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+  this program; if not, see <http://www.gnu.org/licenses/>.
 
   The full GNU General Public License is included in this distribution in
   the file called "COPYING".
 
 *******************************************************************************/
 
-
 #include "vf.h"
 
 static s32 e1000_check_for_link_vf(struct e1000_hw *hw);
 static s32 e1000_get_link_up_info_vf(struct e1000_hw *hw, u16 *speed,
-                                     u16 *duplex);
+                                    u16 *duplex);
 static s32 e1000_init_hw_vf(struct e1000_hw *hw);
 static s32 e1000_reset_hw_vf(struct e1000_hw *hw);
 
 static void e1000_update_mc_addr_list_vf(struct e1000_hw *hw, u8 *,
-                                         u32, u32, u32);
+                                        u32, u32, u32);
 static void e1000_rar_set_vf(struct e1000_hw *, u8 *, u32);
 static s32 e1000_read_mac_addr_vf(struct e1000_hw *);
 static s32 e1000_set_vfta_vf(struct e1000_hw *, u16, bool);
@@ -94,7 +92,7 @@ void e1000_init_function_pointers_vf(struct e1000_hw *hw)
  *  the status register's data which is often stale and inaccurate.
  **/
 static s32 e1000_get_link_up_info_vf(struct e1000_hw *hw, u16 *speed,
-                                     u16 *duplex)
+                                    u16 *duplex)
 {
        s32 status;
 
@@ -130,7 +128,7 @@ static s32 e1000_reset_hw_vf(struct e1000_hw *hw)
        u8 *addr = (u8 *)(&msgbuf[1]);
        u32 ctrl;
 
-       /* assert vf queue/interrupt reset */
+       /* assert VF queue/interrupt reset */
        ctrl = er32(CTRL);
        ew32(CTRL, ctrl | E1000_CTRL_RST);
 
@@ -144,7 +142,7 @@ static s32 e1000_reset_hw_vf(struct e1000_hw *hw)
                /* mailbox timeout can now become active */
                mbx->timeout = E1000_VF_MBX_INIT_TIMEOUT;
 
-               /* notify pf of vf reset completion */
+               /* notify PF of VF reset completion */
                msgbuf[0] = E1000_VF_RESET;
                mbx->ops.write_posted(hw, msgbuf, 1);
 
@@ -153,7 +151,8 @@ static s32 e1000_reset_hw_vf(struct e1000_hw *hw)
                /* set our "perm_addr" based on info provided by PF */
                ret_val = mbx->ops.read_posted(hw, msgbuf, 3);
                if (!ret_val) {
-                       if (msgbuf[0] == (E1000_VF_RESET | E1000_VT_MSGTYPE_ACK))
+                       if (msgbuf[0] == (E1000_VF_RESET |
+                                         E1000_VT_MSGTYPE_ACK))
                                memcpy(hw->mac.perm_addr, addr, ETH_ALEN);
                        else
                                ret_val = -E1000_ERR_MAC_INIT;
@@ -194,15 +193,14 @@ static u32 e1000_hash_mc_addr_vf(struct e1000_hw *hw, u8 *mc_addr)
        /* Register count multiplied by bits per register */
        hash_mask = (hw->mac.mta_reg_count * 32) - 1;
 
-       /*
-        * The bit_shift is the number of left-shifts
+       /* The bit_shift is the number of left-shifts
         * where 0xFF would still fall within the hash mask.
         */
        while (hash_mask >> bit_shift != 0xFF)
                bit_shift++;
 
        hash_value = hash_mask & (((mc_addr[4] >> (8 - bit_shift)) |
-                                 (((u16) mc_addr[5]) << bit_shift)));
+                                 (((u16)mc_addr[5]) << bit_shift)));
 
        return hash_value;
 }
@@ -221,8 +219,8 @@ static u32 e1000_hash_mc_addr_vf(struct e1000_hw *hw, u8 *mc_addr)
  *  unless there are workarounds that change this.
  **/
 static void e1000_update_mc_addr_list_vf(struct e1000_hw *hw,
-                                  u8 *mc_addr_list, u32 mc_addr_count,
-                                  u32 rar_used_count, u32 rar_count)
+                                        u8 *mc_addr_list, u32 mc_addr_count,
+                                        u32 rar_used_count, u32 rar_count)
 {
        struct e1000_mbx_info *mbx = &hw->mbx;
        u32 msgbuf[E1000_VFMAILBOX_SIZE];
@@ -305,7 +303,7 @@ void e1000_rlpml_set_vf(struct e1000_hw *hw, u16 max_size)
  *  @addr: pointer to the receive address
  *  @index: receive address array register
  **/
-static void e1000_rar_set_vf(struct e1000_hw *hw, u8 * addr, u32 index)
+static void e1000_rar_set_vf(struct e1000_hw *hw, u8 *addr, u32 index)
 {
        struct e1000_mbx_info *mbx = &hw->mbx;
        u32 msgbuf[3];
@@ -354,8 +352,7 @@ static s32 e1000_check_for_link_vf(struct e1000_hw *hw)
        s32 ret_val = E1000_SUCCESS;
        u32 in_msg = 0;
 
-       /*
-        * We only want to run this if there has been a rst asserted.
+       /* We only want to run this if there has been a rst asserted.
         * in this case that could mean a link change, device reset,
         * or a virtual function reset
         */
@@ -367,31 +364,33 @@ static s32 e1000_check_for_link_vf(struct e1000_hw *hw)
        if (!mac->get_link_status)
                goto out;
 
-       /* if link status is down no point in checking to see if pf is up */
+       /* if link status is down no point in checking to see if PF is up */
        if (!(er32(STATUS) & E1000_STATUS_LU))
                goto out;
 
        /* if the read failed it could just be a mailbox collision, best wait
-        * until we are called again and don't report an error */
+        * until we are called again and don't report an error
+        */
        if (mbx->ops.read(hw, &in_msg, 1))
                goto out;
 
        /* if incoming message isn't clear to send we are waiting on response */
        if (!(in_msg & E1000_VT_MSGTYPE_CTS)) {
-               /* message is not CTS and is NACK we must have lost CTS status */
+               /* msg is not CTS and is NACK we must have lost CTS status */
                if (in_msg & E1000_VT_MSGTYPE_NACK)
                        ret_val = -E1000_ERR_MAC_INIT;
                goto out;
        }
 
-       /* the pf is talking, if we timed out in the past we reinit */
+       /* the PF is talking, if we timed out in the past we reinit */
        if (!mbx->timeout) {
                ret_val = -E1000_ERR_MAC_INIT;
                goto out;
        }
 
        /* if we passed all the tests above then the link is up and we no
-        * longer need to check for link */
+        * longer need to check for link
+        */
        mac->get_link_status = false;
 
 out:
index 57db3c68dfcd2f5a8fca5a06de4aa52aa901b1eb..0f1eca639f680caadff0a13b53622e6672fee20d 100644 (file)
@@ -13,8 +13,7 @@
   more details.
 
   You should have received a copy of the GNU General Public License along with
-  this program; if not, write to the Free Software Foundation, Inc.,
-  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+  this program; if not, see <http://www.gnu.org/licenses/>.
 
   The full GNU General Public License is included in this distribution in
   the file called "COPYING".
 
 struct e1000_hw;
 
-#define E1000_DEV_ID_82576_VF                 0x10CA
-#define E1000_DEV_ID_I350_VF                  0x1520
-#define E1000_REVISION_0 0
-#define E1000_REVISION_1 1
-#define E1000_REVISION_2 2
-#define E1000_REVISION_3 3
-#define E1000_REVISION_4 4
+#define E1000_DEV_ID_82576_VF          0x10CA
+#define E1000_DEV_ID_I350_VF           0x1520
+#define E1000_REVISION_0       0
+#define E1000_REVISION_1       1
+#define E1000_REVISION_2       2
+#define E1000_REVISION_3       3
+#define E1000_REVISION_4       4
 
-#define E1000_FUNC_0     0
-#define E1000_FUNC_1     1
+#define E1000_FUNC_0   0
+#define E1000_FUNC_1   1
 
-/*
- * Receive Address Register Count
+/* Receive Address Register Count
  * Number of high/low register pairs in the RAR.  The RAR (Receive Address
  * Registers) holds the directed and multicast addresses that we monitor.
  * These entries are also used for MAC-based filtering.
  */
-#define E1000_RAR_ENTRIES_VF      1
+#define E1000_RAR_ENTRIES_VF   1
 
 /* Receive Descriptor - Advanced */
 union e1000_adv_rx_desc {
        struct {
-               u64 pkt_addr;             /* Packet buffer address */
-               u64 hdr_addr;             /* Header buffer address */
+               u64 pkt_addr; /* Packet buffer address */
+               u64 hdr_addr; /* Header buffer address */
        } read;
        struct {
                struct {
@@ -69,53 +67,53 @@ union e1000_adv_rx_desc {
                                u32 data;
                                struct {
                                        u16 pkt_info; /* RSS/Packet type */
-                                       u16 hdr_info; /* Split Header,
-                                                      * hdr buffer length */
+                                       /* Split Header, hdr buffer length */
+                                       u16 hdr_info;
                                } hs_rss;
                        } lo_dword;
                        union {
-                               u32 rss;          /* RSS Hash */
+                               u32 rss; /* RSS Hash */
                                struct {
-                                       u16 ip_id;    /* IP id */
-                                       u16 csum;     /* Packet Checksum */
+                                       u16 ip_id; /* IP id */
+                                       u16 csum;  /* Packet Checksum */
                                } csum_ip;
                        } hi_dword;
                } lower;
                struct {
-                       u32 status_error;     /* ext status/error */
-                       u16 length;           /* Packet length */
-                       u16 vlan;             /* VLAN tag */
+                       u32 status_error; /* ext status/error */
+                       u16 length; /* Packet length */
+                       u16 vlan;   /* VLAN tag */
                } upper;
        } wb;  /* writeback */
 };
 
-#define E1000_RXDADV_HDRBUFLEN_MASK      0x7FE0
-#define E1000_RXDADV_HDRBUFLEN_SHIFT     5
+#define E1000_RXDADV_HDRBUFLEN_MASK    0x7FE0
+#define E1000_RXDADV_HDRBUFLEN_SHIFT   5
 
 /* Transmit Descriptor - Advanced */
 union e1000_adv_tx_desc {
        struct {
-               u64 buffer_addr;    /* Address of descriptor's data buf */
+               u64 buffer_addr; /* Address of descriptor's data buf */
                u32 cmd_type_len;
                u32 olinfo_status;
        } read;
        struct {
-               u64 rsvd;       /* Reserved */
+               u64 rsvd; /* Reserved */
                u32 nxtseq_seed;
                u32 status;
        } wb;
 };
 
 /* Adv Transmit Descriptor Config Masks */
-#define E1000_ADVTXD_DTYP_CTXT    0x00200000 /* Advanced Context Descriptor */
-#define E1000_ADVTXD_DTYP_DATA    0x00300000 /* Advanced Data Descriptor */
-#define E1000_ADVTXD_DCMD_EOP     0x01000000 /* End of Packet */
-#define E1000_ADVTXD_DCMD_IFCS    0x02000000 /* Insert FCS (Ethernet CRC) */
-#define E1000_ADVTXD_DCMD_RS      0x08000000 /* Report Status */
-#define E1000_ADVTXD_DCMD_DEXT    0x20000000 /* Descriptor extension (1=Adv) */
-#define E1000_ADVTXD_DCMD_VLE     0x40000000 /* VLAN pkt enable */
-#define E1000_ADVTXD_DCMD_TSE     0x80000000 /* TCP Seg enable */
-#define E1000_ADVTXD_PAYLEN_SHIFT    14 /* Adv desc PAYLEN shift */
+#define E1000_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Descriptor */
+#define E1000_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */
+#define E1000_ADVTXD_DCMD_EOP  0x01000000 /* End of Packet */
+#define E1000_ADVTXD_DCMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */
+#define E1000_ADVTXD_DCMD_RS   0x08000000 /* Report Status */
+#define E1000_ADVTXD_DCMD_DEXT 0x20000000 /* Descriptor extension (1=Adv) */
+#define E1000_ADVTXD_DCMD_VLE  0x40000000 /* VLAN pkt enable */
+#define E1000_ADVTXD_DCMD_TSE  0x80000000 /* TCP Seg enable */
+#define E1000_ADVTXD_PAYLEN_SHIFT      14 /* Adv desc PAYLEN shift */
 
 /* Context descriptors */
 struct e1000_adv_tx_context_desc {
@@ -125,11 +123,11 @@ struct e1000_adv_tx_context_desc {
        u32 mss_l4len_idx;
 };
 
-#define E1000_ADVTXD_MACLEN_SHIFT    9  /* Adv ctxt desc mac len shift */
-#define E1000_ADVTXD_TUCMD_IPV4    0x00000400  /* IP Packet Type: 1=IPv4 */
-#define E1000_ADVTXD_TUCMD_L4T_TCP 0x00000800  /* L4 Packet TYPE of TCP */
-#define E1000_ADVTXD_L4LEN_SHIFT     8  /* Adv ctxt L4LEN shift */
-#define E1000_ADVTXD_MSS_SHIFT      16  /* Adv ctxt MSS shift */
+#define E1000_ADVTXD_MACLEN_SHIFT      9  /* Adv ctxt desc mac len shift */
+#define E1000_ADVTXD_TUCMD_IPV4                0x00000400 /* IP Packet Type: 1=IPv4 */
+#define E1000_ADVTXD_TUCMD_L4T_TCP     0x00000800 /* L4 Packet TYPE of TCP */
+#define E1000_ADVTXD_L4LEN_SHIFT       8  /* Adv ctxt L4LEN shift */
+#define E1000_ADVTXD_MSS_SHIFT         16 /* Adv ctxt MSS shift */
 
 enum e1000_mac_type {
        e1000_undefined = 0,
@@ -262,5 +260,4 @@ struct e1000_hw {
 void e1000_rlpml_set_vf(struct e1000_hw *, u16);
 void e1000_init_function_pointers_vf(struct e1000_hw *hw);
 
-
 #endif /* _E1000_VF_H_ */
index 11a1bdbe3fd9dcdd7e3da5094e34c83d1c151b56..31f91459312f1757fcd795eb3cd512cd12911b59 100644 (file)
@@ -285,6 +285,8 @@ ixgb_down(struct ixgb_adapter *adapter, bool kill_watchdog)
        /* prevent the interrupt handler from restarting watchdog */
        set_bit(__IXGB_DOWN, &adapter->flags);
 
+       netif_carrier_off(netdev);
+
        napi_disable(&adapter->napi);
        /* waiting for NAPI to complete can re-enable interrupts */
        ixgb_irq_disable(adapter);
@@ -298,7 +300,6 @@ ixgb_down(struct ixgb_adapter *adapter, bool kill_watchdog)
 
        adapter->link_speed = 0;
        adapter->link_duplex = 0;
-       netif_carrier_off(netdev);
        netif_stop_queue(netdev);
 
        ixgb_reset(adapter);
index 7dcbbec09a705153d1d385ed789db21f65da840d..7068e9c3691dd84af5faf6712eafae23b81b901b 100644 (file)
@@ -613,7 +613,6 @@ struct ixgbe_adapter {
 #define IXGBE_FLAG_RX_1BUF_CAPABLE              (u32)(1 << 4)
 #define IXGBE_FLAG_RX_PS_CAPABLE                (u32)(1 << 5)
 #define IXGBE_FLAG_RX_PS_ENABLED                (u32)(1 << 6)
-#define IXGBE_FLAG_IN_NETPOLL                   (u32)(1 << 7)
 #define IXGBE_FLAG_DCA_ENABLED                  (u32)(1 << 8)
 #define IXGBE_FLAG_DCA_CAPABLE                  (u32)(1 << 9)
 #define IXGBE_FLAG_IMIR_ENABLED                 (u32)(1 << 10)
index c5c97b483d7ceb03e5fc96beec277845bed69ea9..824a7ab79ab6bd66580bcdefef23b168f3163e7a 100644 (file)
@@ -171,17 +171,21 @@ static s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw)
  *  @hw: pointer to hardware structure
  *
  *  Starts the hardware using the generic start_hw function.
- *  Disables relaxed ordering Then set pcie completion timeout
+ *  Disables relaxed ordering for archs other than SPARC
+ *  Then set pcie completion timeout
  *
  **/
 static s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw)
 {
+#ifndef CONFIG_SPARC
        u32 regval;
        u32 i;
+#endif
        s32 ret_val;
 
        ret_val = ixgbe_start_hw_generic(hw);
 
+#ifndef CONFIG_SPARC
        /* Disable relaxed ordering */
        for (i = 0; ((i < hw->mac.max_tx_queues) &&
             (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
@@ -197,7 +201,7 @@ static s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw)
                            IXGBE_DCA_RXCTRL_HEAD_WRO_EN);
                IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
        }
-
+#endif
        if (ret_val)
                return ret_val;
 
@@ -1193,6 +1197,8 @@ static struct ixgbe_mac_operations mac_ops_82598 = {
        .init_thermal_sensor_thresh = NULL,
        .prot_autoc_read        = &prot_autoc_read_generic,
        .prot_autoc_write       = &prot_autoc_write_generic,
+       .enable_rx              = &ixgbe_enable_rx_generic,
+       .disable_rx             = &ixgbe_disable_rx_generic,
 };
 
 static struct ixgbe_eeprom_operations eeprom_ops_82598 = {
index cf55a0df877b594bf49de77b28a46c90b00023cb..e0c363948bf46696df96413304fea826f3e054ce 100644 (file)
@@ -1977,7 +1977,10 @@ static s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval)
         */
        hw->mac.ops.disable_rx_buff(hw);
 
-       IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, regval);
+       if (regval & IXGBE_RXCTRL_RXEN)
+               hw->mac.ops.enable_rx(hw);
+       else
+               hw->mac.ops.disable_rx(hw);
 
        hw->mac.ops.enable_rx_buff(hw);
 
@@ -2336,6 +2339,8 @@ static struct ixgbe_mac_operations mac_ops_82599 = {
        .init_thermal_sensor_thresh = &ixgbe_init_thermal_sensor_thresh_generic,
        .prot_autoc_read        = &prot_autoc_read_82599,
        .prot_autoc_write       = &prot_autoc_write_82599,
+       .enable_rx              = &ixgbe_enable_rx_generic,
+       .disable_rx             = &ixgbe_disable_rx_generic,
 };
 
 static struct ixgbe_eeprom_operations eeprom_ops_82599 = {
index 9c66babd4edd6116139575fe4fab3cf37d8470e9..06d8f3cfa099b74a83534a8fce953efd8d6ac066 100644 (file)
@@ -312,7 +312,6 @@ s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw)
 s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw)
 {
        u32 i;
-       u32 regval;
 
        /* Clear the rate limiters */
        for (i = 0; i < hw->mac.max_tx_queues; i++) {
@@ -321,20 +320,25 @@ s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw)
        }
        IXGBE_WRITE_FLUSH(hw);
 
+#ifndef CONFIG_SPARC
        /* Disable relaxed ordering */
        for (i = 0; i < hw->mac.max_tx_queues; i++) {
+               u32 regval;
+
                regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
                regval &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
                IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval);
        }
 
        for (i = 0; i < hw->mac.max_rx_queues; i++) {
+               u32 regval;
+
                regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
                regval &= ~(IXGBE_DCA_RXCTRL_DATA_WRO_EN |
                            IXGBE_DCA_RXCTRL_HEAD_WRO_EN);
                IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
        }
-
+#endif
        return 0;
 }
 
@@ -703,7 +707,7 @@ s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw)
        hw->adapter_stopped = true;
 
        /* Disable the receive unit */
-       IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, 0);
+       hw->mac.ops.disable_rx(hw);
 
        /* Clear interrupt mask to stop interrupts from being generated */
        IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
@@ -2639,7 +2643,10 @@ s32 ixgbe_enable_rx_buff_generic(struct ixgbe_hw *hw)
  **/
 s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval)
 {
-       IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, regval);
+       if (regval & IXGBE_RXCTRL_RXEN)
+               hw->mac.ops.enable_rx(hw);
+       else
+               hw->mac.ops.disable_rx(hw);
 
        return 0;
 }
@@ -3850,3 +3857,44 @@ s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw)
        return 0;
 }
 
+void ixgbe_disable_rx_generic(struct ixgbe_hw *hw)
+{
+       u32 rxctrl;
+
+       rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
+       if (rxctrl & IXGBE_RXCTRL_RXEN) {
+               if (hw->mac.type != ixgbe_mac_82598EB) {
+                       u32 pfdtxgswc;
+
+                       pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC);
+                       if (pfdtxgswc & IXGBE_PFDTXGSWC_VT_LBEN) {
+                               pfdtxgswc &= ~IXGBE_PFDTXGSWC_VT_LBEN;
+                               IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc);
+                               hw->mac.set_lben = true;
+                       } else {
+                               hw->mac.set_lben = false;
+                       }
+               }
+               rxctrl &= ~IXGBE_RXCTRL_RXEN;
+               IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl);
+       }
+}
+
+void ixgbe_enable_rx_generic(struct ixgbe_hw *hw)
+{
+       u32 rxctrl;
+
+       rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
+       IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, (rxctrl | IXGBE_RXCTRL_RXEN));
+
+       if (hw->mac.type != ixgbe_mac_82598EB) {
+               if (hw->mac.set_lben) {
+                       u32 pfdtxgswc;
+
+                       pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC);
+                       pfdtxgswc |= IXGBE_PFDTXGSWC_VT_LBEN;
+                       IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc);
+                       hw->mac.set_lben = false;
+               }
+       }
+}
index 8cfadcb2676ed3a30386927b359e70408e692d9a..f21f8a165ec4ff4c104d5ac8aedb385c7f841901 100644 (file)
@@ -130,6 +130,8 @@ void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, int num_pb,
 
 s32 ixgbe_get_thermal_sensor_data_generic(struct ixgbe_hw *hw);
 s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw);
+void ixgbe_disable_rx_generic(struct ixgbe_hw *hw);
+void ixgbe_enable_rx_generic(struct ixgbe_hw *hw);
 
 #define IXGBE_FAILED_READ_REG 0xffffffffU
 #define IXGBE_FAILED_READ_CFG_DWORD 0xffffffffU
index e5be0dd508deab592d5219d031ccaff8f2662e9b..ccaecb1b86194823ae7f0b62212295adc7ce7cdb 100644 (file)
@@ -1351,7 +1351,7 @@ static bool reg_pattern_test(struct ixgbe_adapter *adapter, u64 *data, int reg,
 
        if (ixgbe_removed(adapter->hw.hw_addr)) {
                *data = 1;
-               return 1;
+               return true;
        }
        for (pat = 0; pat < ARRAY_SIZE(test_pattern); pat++) {
                before = ixgbe_read_reg(&adapter->hw, reg);
@@ -1376,7 +1376,7 @@ static bool reg_set_and_check(struct ixgbe_adapter *adapter, u64 *data, int reg,
 
        if (ixgbe_removed(adapter->hw.hw_addr)) {
                *data = 1;
-               return 1;
+               return true;
        }
        before = ixgbe_read_reg(&adapter->hw, reg);
        ixgbe_write_reg(&adapter->hw, reg, write & mask);
@@ -1637,9 +1637,7 @@ static void ixgbe_free_desc_rings(struct ixgbe_adapter *adapter)
        /* shut down the DMA engines now so they can be reinitialized later */
 
        /* first Rx */
-       reg_ctl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
-       reg_ctl &= ~IXGBE_RXCTRL_RXEN;
-       IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg_ctl);
+       hw->mac.ops.disable_rx(hw);
        ixgbe_disable_rx_queue(adapter, rx_ring);
 
        /* now Tx */
@@ -1670,6 +1668,7 @@ static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter)
 {
        struct ixgbe_ring *tx_ring = &adapter->test_tx_ring;
        struct ixgbe_ring *rx_ring = &adapter->test_rx_ring;
+       struct ixgbe_hw *hw = &adapter->hw;
        u32 rctl, reg_data;
        int ret_val;
        int err;
@@ -1713,14 +1712,16 @@ static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter)
                goto err_nomem;
        }
 
-       rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXCTRL);
-       IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCTRL, rctl & ~IXGBE_RXCTRL_RXEN);
+       hw->mac.ops.disable_rx(hw);
 
        ixgbe_configure_rx_ring(adapter, rx_ring);
 
-       rctl |= IXGBE_RXCTRL_RXEN | IXGBE_RXCTRL_DMBYPS;
+       rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXCTRL);
+       rctl |= IXGBE_RXCTRL_DMBYPS;
        IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCTRL, rctl);
 
+       hw->mac.ops.enable_rx(hw);
+
        return 0;
 
 err_nomem:
index 70cc4c5c0a0130e82b44bd2aa597aca401e670de..395dc6bb5d82139b6e4a502517f171256d06eac9 100644 (file)
@@ -1619,14 +1619,10 @@ static void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring,
 static void ixgbe_rx_skb(struct ixgbe_q_vector *q_vector,
                         struct sk_buff *skb)
 {
-       struct ixgbe_adapter *adapter = q_vector->adapter;
-
        if (ixgbe_qv_busy_polling(q_vector))
                netif_receive_skb(skb);
-       else if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL))
-               napi_gro_receive(&q_vector->napi, skb);
        else
-               netif_rx(skb);
+               napi_gro_receive(&q_vector->napi, skb);
 }
 
 /**
@@ -2609,7 +2605,7 @@ static irqreturn_t ixgbe_msix_other(int irq, void *data)
        eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
 
        /* The lower 16bits of the EICR register are for the queue interrupts
-        * which should be masked here in order to not accidently clear them if
+        * which should be masked here in order to not accidentally clear them if
         * the bits are high when ixgbe_msix_other is called. There is a race
         * condition otherwise which results in possible performance loss
         * especially if the ixgbe_msix_other interrupt is triggering
@@ -3705,8 +3701,7 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
        u32 rxctrl, rfctl;
 
        /* disable receives while setting up the descriptors */
-       rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
-       IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
+       hw->mac.ops.disable_rx(hw);
 
        ixgbe_setup_psrtype(adapter);
        ixgbe_setup_rdrxctl(adapter);
@@ -3731,6 +3726,7 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
        for (i = 0; i < adapter->num_rx_queues; i++)
                ixgbe_configure_rx_ring(adapter, adapter->rx_ring[i]);
 
+       rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
        /* disable drop enable for 82598 parts */
        if (hw->mac.type == ixgbe_mac_82598EB)
                rxctrl |= IXGBE_RXCTRL_DMBYPS;
@@ -3924,7 +3920,7 @@ static void ixgbe_flush_sw_mac_table(struct ixgbe_adapter *adapter)
        for (i = 0; i < hw->mac.num_rar_entries; i++) {
                adapter->mac_table[i].state |= IXGBE_MAC_STATE_MODIFIED;
                adapter->mac_table[i].state &= ~IXGBE_MAC_STATE_IN_USE;
-               memset(adapter->mac_table[i].addr, 0, ETH_ALEN);
+               eth_zero_addr(adapter->mac_table[i].addr);
                adapter->mac_table[i].queue = 0;
        }
        ixgbe_sync_mac_table(adapter);
@@ -3992,7 +3988,7 @@ int ixgbe_del_mac_filter(struct ixgbe_adapter *adapter, u8 *addr, u16 queue)
                    adapter->mac_table[i].queue == queue) {
                        adapter->mac_table[i].state |= IXGBE_MAC_STATE_MODIFIED;
                        adapter->mac_table[i].state &= ~IXGBE_MAC_STATE_IN_USE;
-                       memset(adapter->mac_table[i].addr, 0, ETH_ALEN);
+                       eth_zero_addr(adapter->mac_table[i].addr);
                        adapter->mac_table[i].queue = 0;
                        ixgbe_sync_mac_table(adapter);
                        return 0;
@@ -5014,7 +5010,6 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
        struct ixgbe_hw *hw = &adapter->hw;
        struct net_device *upper;
        struct list_head *iter;
-       u32 rxctrl;
        int i;
 
        /* signal that we are down to the interrupt handler */
@@ -5022,8 +5017,7 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
                return; /* do nothing if already down */
 
        /* disable receives */
-       rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
-       IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
+       hw->mac.ops.disable_rx(hw);
 
        /* disable all enabled rx queues */
        for (i = 0; i < adapter->num_rx_queues; i++)
@@ -6174,7 +6168,6 @@ static void ixgbe_check_hang_subtask(struct ixgbe_adapter *adapter)
 
        /* Cause software interrupt to ensure rings are cleaned */
        ixgbe_irq_rearm_queues(adapter, eics);
-
 }
 
 /**
@@ -7507,14 +7500,9 @@ static void ixgbe_netpoll(struct net_device *netdev)
        if (test_bit(__IXGBE_DOWN, &adapter->state))
                return;
 
-       adapter->flags |= IXGBE_FLAG_IN_NETPOLL;
-       if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
-               for (i = 0; i < adapter->num_q_vectors; i++)
-                       ixgbe_msix_clean_rings(0, adapter->q_vector[i]);
-       } else {
-               ixgbe_intr(adapter->pdev->irq, netdev);
-       }
-       adapter->flags &= ~IXGBE_FLAG_IN_NETPOLL;
+       /* loop through and schedule all active queues */
+       for (i = 0; i < adapter->num_q_vectors; i++)
+               ixgbe_msix_clean_rings(0, adapter->q_vector[i]);
 }
 
 #endif
index 79c00f57d3e7de72a1f33039702b6b188ab04f58..e5ba04025e2b9dc76d58090cd5a46a563fbf22e4 100644 (file)
@@ -279,20 +279,18 @@ static int ixgbe_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
  * read the timecounter and return the correct value on ns,
  * after converting it into a struct timespec.
  */
-static int ixgbe_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
+static int ixgbe_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
 {
        struct ixgbe_adapter *adapter =
                container_of(ptp, struct ixgbe_adapter, ptp_caps);
        u64 ns;
-       u32 remainder;
        unsigned long flags;
 
        spin_lock_irqsave(&adapter->tmreg_lock, flags);
        ns = timecounter_read(&adapter->tc);
        spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
 
-       ts->tv_sec = div_u64_rem(ns, 1000000000ULL, &remainder);
-       ts->tv_nsec = remainder;
+       *ts = ns_to_timespec64(ns);
 
        return 0;
 }
@@ -306,15 +304,14 @@ static int ixgbe_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
  * wall timer value.
  */
 static int ixgbe_ptp_settime(struct ptp_clock_info *ptp,
-                            const struct timespec *ts)
+                            const struct timespec64 *ts)
 {
        struct ixgbe_adapter *adapter =
                container_of(ptp, struct ixgbe_adapter, ptp_caps);
        u64 ns;
        unsigned long flags;
 
-       ns = ts->tv_sec * 1000000000ULL;
-       ns += ts->tv_nsec;
+       ns = timespec64_to_ns(ts);
 
        /* reset the timecounter */
        spin_lock_irqsave(&adapter->tmreg_lock, flags);
@@ -407,7 +404,7 @@ void ixgbe_ptp_overflow_check(struct ixgbe_adapter *adapter)
 {
        bool timeout = time_is_before_jiffies(adapter->last_overflow_check +
                                             IXGBE_OVERFLOW_PERIOD);
-       struct timespec ts;
+       struct timespec64 ts;
 
        if (timeout) {
                ixgbe_ptp_gettime(&adapter->ptp_caps, &ts);
@@ -488,7 +485,7 @@ static void ixgbe_ptp_tx_hwtstamp(struct ixgbe_adapter *adapter)
  * @work: pointer to the work struct
  *
  * This work item polls TSYNCTXCTL valid bit to determine when a Tx hardware
- * timestamp has been taken for the current skb. It is necesary, because the
+ * timestamp has been taken for the current skb. It is necessary, because the
  * descriptor's "done" bit does not correlate with the timestamp event.
  */
 static void ixgbe_ptp_tx_hwtstamp_work(struct work_struct *work)
@@ -874,8 +871,8 @@ static int ixgbe_ptp_create_clock(struct ixgbe_adapter *adapter)
                adapter->ptp_caps.pps = 1;
                adapter->ptp_caps.adjfreq = ixgbe_ptp_adjfreq;
                adapter->ptp_caps.adjtime = ixgbe_ptp_adjtime;
-               adapter->ptp_caps.gettime = ixgbe_ptp_gettime;
-               adapter->ptp_caps.settime = ixgbe_ptp_settime;
+               adapter->ptp_caps.gettime64 = ixgbe_ptp_gettime;
+               adapter->ptp_caps.settime64 = ixgbe_ptp_settime;
                adapter->ptp_caps.enable = ixgbe_ptp_feature_enable;
                break;
        case ixgbe_mac_82599EB:
@@ -890,8 +887,8 @@ static int ixgbe_ptp_create_clock(struct ixgbe_adapter *adapter)
                adapter->ptp_caps.pps = 0;
                adapter->ptp_caps.adjfreq = ixgbe_ptp_adjfreq;
                adapter->ptp_caps.adjtime = ixgbe_ptp_adjtime;
-               adapter->ptp_caps.gettime = ixgbe_ptp_gettime;
-               adapter->ptp_caps.settime = ixgbe_ptp_settime;
+               adapter->ptp_caps.gettime64 = ixgbe_ptp_gettime;
+               adapter->ptp_caps.settime64 = ixgbe_ptp_settime;
                adapter->ptp_caps.enable = ixgbe_ptp_feature_enable;
                break;
        default:
index 7f37fe7269a7360c0edbccfab6f8e7e03ff5a3b0..09a291bb7c343c6b202e8bff6f7594329ec2a564 100644 (file)
@@ -141,7 +141,7 @@ void ixgbe_enable_sriov(struct ixgbe_adapter *adapter)
                 * The 82599 supports up to 64 VFs per physical function
                 * but this implementation limits allocation to 63 so that
                 * basic networking resources are still available to the
-                * physical function.  If the user requests greater thn
+                * physical function.  If the user requests greater than
                 * 63 VFs then it is an error - reset to default of zero.
                 */
                adapter->num_vfs = min_t(unsigned int, adapter->num_vfs, IXGBE_MAX_VFS_DRV_LIMIT);
index fc5ecee56ca8f18dd3fbd77bf68101087da0aeec..c3ddc944f1e95722b9db06a14d0dcb74df704484 100644 (file)
@@ -1690,7 +1690,7 @@ enum {
 #define IXGBE_MACC_FS        0x00040000
 #define IXGBE_MAC_RX2TX_LPBK 0x00000002
 
-/* Veto Bit definiton */
+/* Veto Bit definition */
 #define IXGBE_MMNGC_MNG_VETO  0x00000001
 
 /* LINKS Bit Masks */
@@ -2462,8 +2462,8 @@ struct ixgbe_hic_read_shadow_ram {
 
 struct ixgbe_hic_write_shadow_ram {
        union ixgbe_hic_hdr2 hdr;
-       u32 address;
-       u16 length;
+       __be32 address;
+       __be16 length;
        u16 pad2;
        u16 data;
        u16 pad3;
@@ -3067,6 +3067,8 @@ struct ixgbe_mac_operations {
        s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8);
        s32 (*get_thermal_sensor_data)(struct ixgbe_hw *);
        s32 (*init_thermal_sensor_thresh)(struct ixgbe_hw *hw);
+       void (*disable_rx)(struct ixgbe_hw *hw);
+       void (*enable_rx)(struct ixgbe_hw *hw);
        void (*set_ethertype_anti_spoofing)(struct ixgbe_hw *, bool, int);
 
        /* DMA Coalescing */
@@ -3137,6 +3139,7 @@ struct ixgbe_mac_info {
        u8                              flags;
        u8                              san_mac_rar_index;
        struct ixgbe_thermal_sensor_data  thermal_sensor_data;
+       bool                            set_lben;
 };
 
 struct ixgbe_phy_info {
index 49395420c9b35ff2762d6dd584b9783146a8ee49..f5f948d08b436147ca20c076ad80622544cd11a3 100644 (file)
@@ -820,6 +820,8 @@ static struct ixgbe_mac_operations mac_ops_X540 = {
        .init_thermal_sensor_thresh = NULL,
        .prot_autoc_read        = &prot_autoc_read_generic,
        .prot_autoc_write       = &prot_autoc_write_generic,
+       .enable_rx              = &ixgbe_enable_rx_generic,
+       .disable_rx             = &ixgbe_disable_rx_generic,
 };
 
 static struct ixgbe_eeprom_operations eeprom_ops_X540 = {
index 50bf81908dd6839bc96f84a7e34e2e1c288af928..58a3155af7cd0215a8bf552973218859b59f1ed8 100644 (file)
@@ -557,6 +557,47 @@ static s32 ixgbe_update_flash_X550(struct ixgbe_hw *hw)
        return status;
 }
 
+/** ixgbe_disable_rx_x550 - Disable RX unit
+ *
+ *  Enables the Rx DMA unit for x550
+ **/
+static void ixgbe_disable_rx_x550(struct ixgbe_hw *hw)
+{
+       u32 rxctrl, pfdtxgswc;
+       s32 status;
+       struct ixgbe_hic_disable_rxen fw_cmd;
+
+       rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
+       if (rxctrl & IXGBE_RXCTRL_RXEN) {
+               pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC);
+               if (pfdtxgswc & IXGBE_PFDTXGSWC_VT_LBEN) {
+                       pfdtxgswc &= ~IXGBE_PFDTXGSWC_VT_LBEN;
+                       IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc);
+                       hw->mac.set_lben = true;
+               } else {
+                       hw->mac.set_lben = false;
+               }
+
+               fw_cmd.hdr.cmd = FW_DISABLE_RXEN_CMD;
+               fw_cmd.hdr.buf_len = FW_DISABLE_RXEN_LEN;
+               fw_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
+               fw_cmd.port_number = (u8)hw->bus.lan_id;
+
+               status = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd,
+                                       sizeof(struct ixgbe_hic_disable_rxen),
+                                       IXGBE_HI_COMMAND_TIMEOUT, true);
+
+               /* If we fail - disable RX using register write */
+               if (status) {
+                       rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
+                       if (rxctrl & IXGBE_RXCTRL_RXEN) {
+                               rxctrl &= ~IXGBE_RXCTRL_RXEN;
+                               IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl);
+                       }
+               }
+       }
+}
+
 /** ixgbe_update_eeprom_checksum_X550 - Updates the EEPROM checksum and flash
  *  @hw: pointer to hardware structure
  *
@@ -1306,8 +1347,8 @@ mac_reset_top:
  *  @enable: enable or disable switch for Ethertype anti-spoofing
  *  @vf: Virtual Function pool - VF Pool to set for Ethertype anti-spoofing
  **/
-void ixgbe_set_ethertype_anti_spoofing_X550(struct ixgbe_hw *hw, bool enable,
-                                           int vf)
+static void ixgbe_set_ethertype_anti_spoofing_X550(struct ixgbe_hw *hw,
+                                                  bool enable, int vf)
 {
        int vf_target_reg = vf >> 3;
        int vf_target_shift = vf % 8 + IXGBE_SPOOF_ETHERTYPEAS_SHIFT;
@@ -1366,6 +1407,8 @@ void ixgbe_set_ethertype_anti_spoofing_X550(struct ixgbe_hw *hw, bool enable,
        .init_thermal_sensor_thresh     = NULL, \
        .prot_autoc_read                = &prot_autoc_read_generic, \
        .prot_autoc_write               = &prot_autoc_write_generic, \
+       .enable_rx                      = &ixgbe_enable_rx_generic, \
+       .disable_rx                     = &ixgbe_disable_rx_x550, \
 
 static struct ixgbe_mac_operations mac_ops_X550 = {
        X550_COMMON_MAC
index 7412d378b77b95cb6032d4293086715002d72893..770e21a643880a9032cdc5b1e6460075acfd7250 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 82599 Virtual Function driver
-  Copyright(c) 1999 - 2012 Intel Corporation.
+  Copyright(c) 1999 - 2015 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -13,8 +13,7 @@
   more details.
 
   You should have received a copy of the GNU General Public License along with
-  this program; if not, write to the Free Software Foundation, Inc.,
-  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+  this program; if not, see <http://www.gnu.org/licenses/>.
 
   The full GNU General Public License is included in this distribution in
   the file called "COPYING".
 #define _IXGBEVF_DEFINES_H_
 
 /* Device IDs */
-#define IXGBE_DEV_ID_82599_VF           0x10ED
-#define IXGBE_DEV_ID_X540_VF            0x1515
+#define IXGBE_DEV_ID_82599_VF          0x10ED
+#define IXGBE_DEV_ID_X540_VF           0x1515
 #define IXGBE_DEV_ID_X550_VF           0x1565
 #define IXGBE_DEV_ID_X550EM_X_VF       0x15A8
 
-#define IXGBE_VF_IRQ_CLEAR_MASK         7
-#define IXGBE_VF_MAX_TX_QUEUES          8
-#define IXGBE_VF_MAX_RX_QUEUES          8
+#define IXGBE_VF_IRQ_CLEAR_MASK                7
+#define IXGBE_VF_MAX_TX_QUEUES         8
+#define IXGBE_VF_MAX_RX_QUEUES         8
 
 /* DCB define */
 #define IXGBE_VF_MAX_TRAFFIC_CLASS     8
 
 /* Link speed */
 typedef u32 ixgbe_link_speed;
-#define IXGBE_LINK_SPEED_1GB_FULL       0x0020
-#define IXGBE_LINK_SPEED_10GB_FULL      0x0080
+#define IXGBE_LINK_SPEED_1GB_FULL      0x0020
+#define IXGBE_LINK_SPEED_10GB_FULL     0x0080
 #define IXGBE_LINK_SPEED_100_FULL      0x0008
 
-#define IXGBE_CTRL_RST              0x04000000 /* Reset (SW) */
-#define IXGBE_RXDCTL_ENABLE         0x02000000 /* Enable specific Rx Queue */
-#define IXGBE_TXDCTL_ENABLE         0x02000000 /* Enable specific Tx Queue */
-#define IXGBE_LINKS_UP              0x40000000
-#define IXGBE_LINKS_SPEED_82599     0x30000000
-#define IXGBE_LINKS_SPEED_10G_82599 0x30000000
-#define IXGBE_LINKS_SPEED_1G_82599  0x20000000
-#define IXGBE_LINKS_SPEED_100_82599 0x10000000
+#define IXGBE_CTRL_RST         0x04000000 /* Reset (SW) */
+#define IXGBE_RXDCTL_ENABLE    0x02000000 /* Enable specific Rx Queue */
+#define IXGBE_TXDCTL_ENABLE    0x02000000 /* Enable specific Tx Queue */
+#define IXGBE_LINKS_UP         0x40000000
+#define IXGBE_LINKS_SPEED_82599                0x30000000
+#define IXGBE_LINKS_SPEED_10G_82599    0x30000000
+#define IXGBE_LINKS_SPEED_1G_82599     0x20000000
+#define IXGBE_LINKS_SPEED_100_82599    0x10000000
 
 /* Number of Transmit and Receive Descriptors must be a multiple of 8 */
-#define IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE  8
-#define IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE  8
-#define IXGBE_REQ_TX_BUFFER_GRANULARITY   1024
+#define IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE       8
+#define IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE       8
+#define IXGBE_REQ_TX_BUFFER_GRANULARITY                1024
 
 /* Interrupt Vector Allocation Registers */
-#define IXGBE_IVAR_ALLOC_VAL    0x80 /* Interrupt Allocation valid */
+#define IXGBE_IVAR_ALLOC_VAL   0x80 /* Interrupt Allocation valid */
 
-#define IXGBE_VF_INIT_TIMEOUT   200 /* Number of retries to clear RSTI */
+#define IXGBE_VF_INIT_TIMEOUT  200 /* Number of retries to clear RSTI */
 
 /* Receive Config masks */
-#define IXGBE_RXCTRL_RXEN       0x00000001  /* Enable Receiver */
-#define IXGBE_RXCTRL_DMBYPS     0x00000002  /* Descriptor Monitor Bypass */
-#define IXGBE_RXDCTL_ENABLE     0x02000000  /* Enable specific Rx Queue */
-#define IXGBE_RXDCTL_VME        0x40000000  /* VLAN mode enable */
-#define IXGBE_RXDCTL_RLPMLMASK  0x00003FFF  /* Only supported on the X540 */
-#define IXGBE_RXDCTL_RLPML_EN   0x00008000
+#define IXGBE_RXCTRL_RXEN      0x00000001  /* Enable Receiver */
+#define IXGBE_RXCTRL_DMBYPS    0x00000002  /* Descriptor Monitor Bypass */
+#define IXGBE_RXDCTL_ENABLE    0x02000000  /* Enable specific Rx Queue */
+#define IXGBE_RXDCTL_VME       0x40000000  /* VLAN mode enable */
+#define IXGBE_RXDCTL_RLPMLMASK 0x00003FFF  /* Only supported on the X540 */
+#define IXGBE_RXDCTL_RLPML_EN  0x00008000
 
 /* DCA Control */
 #define IXGBE_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */
 
 /* PSRTYPE bit definitions */
-#define IXGBE_PSRTYPE_TCPHDR    0x00000010
-#define IXGBE_PSRTYPE_UDPHDR    0x00000020
-#define IXGBE_PSRTYPE_IPV4HDR   0x00000100
-#define IXGBE_PSRTYPE_IPV6HDR   0x00000200
-#define IXGBE_PSRTYPE_L2HDR     0x00001000
+#define IXGBE_PSRTYPE_TCPHDR   0x00000010
+#define IXGBE_PSRTYPE_UDPHDR   0x00000020
+#define IXGBE_PSRTYPE_IPV4HDR  0x00000100
+#define IXGBE_PSRTYPE_IPV6HDR  0x00000200
+#define IXGBE_PSRTYPE_L2HDR    0x00001000
 
 /* SRRCTL bit definitions */
-#define IXGBE_SRRCTL_BSIZEPKT_SHIFT     10     /* so many KBs */
-#define IXGBE_SRRCTL_RDMTS_SHIFT        22
-#define IXGBE_SRRCTL_RDMTS_MASK         0x01C00000
-#define IXGBE_SRRCTL_DROP_EN            0x10000000
-#define IXGBE_SRRCTL_BSIZEPKT_MASK      0x0000007F
-#define IXGBE_SRRCTL_BSIZEHDR_MASK      0x00003F00
-#define IXGBE_SRRCTL_DESCTYPE_LEGACY    0x00000000
+#define IXGBE_SRRCTL_BSIZEPKT_SHIFT    10     /* so many KBs */
+#define IXGBE_SRRCTL_RDMTS_SHIFT       22
+#define IXGBE_SRRCTL_RDMTS_MASK                0x01C00000
+#define IXGBE_SRRCTL_DROP_EN           0x10000000
+#define IXGBE_SRRCTL_BSIZEPKT_MASK     0x0000007F
+#define IXGBE_SRRCTL_BSIZEHDR_MASK     0x00003F00
+#define IXGBE_SRRCTL_DESCTYPE_LEGACY   0x00000000
 #define IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000
-#define IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT  0x04000000
+#define IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT        0x04000000
 #define IXGBE_SRRCTL_DESCTYPE_HDR_REPLICATION_LARGE_PKT 0x08000000
 #define IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS 0x0A000000
-#define IXGBE_SRRCTL_DESCTYPE_MASK      0x0E000000
+#define IXGBE_SRRCTL_DESCTYPE_MASK     0x0E000000
 
 /* Receive Descriptor bit definitions */
-#define IXGBE_RXD_STAT_DD         0x01    /* Descriptor Done */
-#define IXGBE_RXD_STAT_EOP        0x02    /* End of Packet */
-#define IXGBE_RXD_STAT_FLM        0x04    /* FDir Match */
-#define IXGBE_RXD_STAT_VP         0x08    /* IEEE VLAN Packet */
-#define IXGBE_RXDADV_NEXTP_MASK   0x000FFFF0 /* Next Descriptor Index */
-#define IXGBE_RXDADV_NEXTP_SHIFT  0x00000004
-#define IXGBE_RXD_STAT_UDPCS      0x10    /* UDP xsum calculated */
-#define IXGBE_RXD_STAT_L4CS       0x20    /* L4 xsum calculated */
-#define IXGBE_RXD_STAT_IPCS       0x40    /* IP xsum calculated */
-#define IXGBE_RXD_STAT_PIF        0x80    /* passed in-exact filter */
-#define IXGBE_RXD_STAT_CRCV       0x100   /* Speculative CRC Valid */
-#define IXGBE_RXD_STAT_VEXT       0x200   /* 1st VLAN found */
-#define IXGBE_RXD_STAT_UDPV       0x400   /* Valid UDP checksum */
-#define IXGBE_RXD_STAT_DYNINT     0x800   /* Pkt caused INT via DYNINT */
-#define IXGBE_RXD_STAT_TS         0x10000 /* Time Stamp */
-#define IXGBE_RXD_STAT_SECP       0x20000 /* Security Processing */
-#define IXGBE_RXD_STAT_LB         0x40000 /* Loopback Status */
-#define IXGBE_RXD_STAT_ACK        0x8000  /* ACK Packet indication */
-#define IXGBE_RXD_ERR_CE          0x01    /* CRC Error */
-#define IXGBE_RXD_ERR_LE          0x02    /* Length Error */
-#define IXGBE_RXD_ERR_PE          0x08    /* Packet Error */
-#define IXGBE_RXD_ERR_OSE         0x10    /* Oversize Error */
-#define IXGBE_RXD_ERR_USE         0x20    /* Undersize Error */
-#define IXGBE_RXD_ERR_TCPE        0x40    /* TCP/UDP Checksum Error */
-#define IXGBE_RXD_ERR_IPE         0x80    /* IP Checksum Error */
-#define IXGBE_RXDADV_ERR_MASK     0xFFF00000 /* RDESC.ERRORS mask */
-#define IXGBE_RXDADV_ERR_SHIFT    20         /* RDESC.ERRORS shift */
-#define IXGBE_RXDADV_ERR_HBO      0x00800000 /*Header Buffer Overflow */
-#define IXGBE_RXDADV_ERR_CE       0x01000000 /* CRC Error */
-#define IXGBE_RXDADV_ERR_LE       0x02000000 /* Length Error */
-#define IXGBE_RXDADV_ERR_PE       0x08000000 /* Packet Error */
-#define IXGBE_RXDADV_ERR_OSE      0x10000000 /* Oversize Error */
-#define IXGBE_RXDADV_ERR_USE      0x20000000 /* Undersize Error */
-#define IXGBE_RXDADV_ERR_TCPE     0x40000000 /* TCP/UDP Checksum Error */
-#define IXGBE_RXDADV_ERR_IPE      0x80000000 /* IP Checksum Error */
-#define IXGBE_RXD_VLAN_ID_MASK    0x0FFF  /* VLAN ID is in lower 12 bits */
-#define IXGBE_RXD_PRI_MASK        0xE000  /* Priority is in upper 3 bits */
-#define IXGBE_RXD_PRI_SHIFT       13
-#define IXGBE_RXD_CFI_MASK        0x1000  /* CFI is bit 12 */
-#define IXGBE_RXD_CFI_SHIFT       12
-
-#define IXGBE_RXDADV_STAT_DD            IXGBE_RXD_STAT_DD  /* Done */
-#define IXGBE_RXDADV_STAT_EOP           IXGBE_RXD_STAT_EOP /* End of Packet */
-#define IXGBE_RXDADV_STAT_FLM           IXGBE_RXD_STAT_FLM /* FDir Match */
-#define IXGBE_RXDADV_STAT_VP            IXGBE_RXD_STAT_VP  /* IEEE VLAN Pkt */
-#define IXGBE_RXDADV_STAT_MASK          0x000FFFFF /* Stat/NEXTP: bit 0-19 */
-#define IXGBE_RXDADV_STAT_FCEOFS        0x00000040 /* FCoE EOF/SOF Stat */
-#define IXGBE_RXDADV_STAT_FCSTAT        0x00000030 /* FCoE Pkt Stat */
-#define IXGBE_RXDADV_STAT_FCSTAT_NOMTCH 0x00000000 /* 00: No Ctxt Match */
-#define IXGBE_RXDADV_STAT_FCSTAT_NODDP  0x00000010 /* 01: Ctxt w/o DDP */
-#define IXGBE_RXDADV_STAT_FCSTAT_FCPRSP 0x00000020 /* 10: Recv. FCP_RSP */
-#define IXGBE_RXDADV_STAT_FCSTAT_DDP    0x00000030 /* 11: Ctxt w/ DDP */
-
-#define IXGBE_RXDADV_RSSTYPE_MASK       0x0000000F
-#define IXGBE_RXDADV_PKTTYPE_MASK       0x0000FFF0
-#define IXGBE_RXDADV_PKTTYPE_MASK_EX    0x0001FFF0
-#define IXGBE_RXDADV_HDRBUFLEN_MASK     0x00007FE0
-#define IXGBE_RXDADV_RSCCNT_MASK        0x001E0000
-#define IXGBE_RXDADV_RSCCNT_SHIFT       17
-#define IXGBE_RXDADV_HDRBUFLEN_SHIFT    5
-#define IXGBE_RXDADV_SPLITHEADER_EN     0x00001000
-#define IXGBE_RXDADV_SPH                0x8000
+#define IXGBE_RXD_STAT_DD      0x01    /* Descriptor Done */
+#define IXGBE_RXD_STAT_EOP     0x02    /* End of Packet */
+#define IXGBE_RXD_STAT_FLM     0x04    /* FDir Match */
+#define IXGBE_RXD_STAT_VP      0x08    /* IEEE VLAN Packet */
+#define IXGBE_RXDADV_NEXTP_MASK        0x000FFFF0 /* Next Descriptor Index */
+#define IXGBE_RXDADV_NEXTP_SHIFT       0x00000004
+#define IXGBE_RXD_STAT_UDPCS   0x10    /* UDP xsum calculated */
+#define IXGBE_RXD_STAT_L4CS    0x20    /* L4 xsum calculated */
+#define IXGBE_RXD_STAT_IPCS    0x40    /* IP xsum calculated */
+#define IXGBE_RXD_STAT_PIF     0x80    /* passed in-exact filter */
+#define IXGBE_RXD_STAT_CRCV    0x100   /* Speculative CRC Valid */
+#define IXGBE_RXD_STAT_VEXT    0x200   /* 1st VLAN found */
+#define IXGBE_RXD_STAT_UDPV    0x400   /* Valid UDP checksum */
+#define IXGBE_RXD_STAT_DYNINT  0x800   /* Pkt caused INT via DYNINT */
+#define IXGBE_RXD_STAT_TS      0x10000 /* Time Stamp */
+#define IXGBE_RXD_STAT_SECP    0x20000 /* Security Processing */
+#define IXGBE_RXD_STAT_LB      0x40000 /* Loopback Status */
+#define IXGBE_RXD_STAT_ACK     0x8000  /* ACK Packet indication */
+#define IXGBE_RXD_ERR_CE       0x01    /* CRC Error */
+#define IXGBE_RXD_ERR_LE       0x02    /* Length Error */
+#define IXGBE_RXD_ERR_PE       0x08    /* Packet Error */
+#define IXGBE_RXD_ERR_OSE      0x10    /* Oversize Error */
+#define IXGBE_RXD_ERR_USE      0x20    /* Undersize Error */
+#define IXGBE_RXD_ERR_TCPE     0x40    /* TCP/UDP Checksum Error */
+#define IXGBE_RXD_ERR_IPE      0x80    /* IP Checksum Error */
+#define IXGBE_RXDADV_ERR_MASK  0xFFF00000 /* RDESC.ERRORS mask */
+#define IXGBE_RXDADV_ERR_SHIFT 20         /* RDESC.ERRORS shift */
+#define IXGBE_RXDADV_ERR_HBO   0x00800000 /*Header Buffer Overflow */
+#define IXGBE_RXDADV_ERR_CE    0x01000000 /* CRC Error */
+#define IXGBE_RXDADV_ERR_LE    0x02000000 /* Length Error */
+#define IXGBE_RXDADV_ERR_PE    0x08000000 /* Packet Error */
+#define IXGBE_RXDADV_ERR_OSE   0x10000000 /* Oversize Error */
+#define IXGBE_RXDADV_ERR_USE   0x20000000 /* Undersize Error */
+#define IXGBE_RXDADV_ERR_TCPE  0x40000000 /* TCP/UDP Checksum Error */
+#define IXGBE_RXDADV_ERR_IPE   0x80000000 /* IP Checksum Error */
+#define IXGBE_RXD_VLAN_ID_MASK 0x0FFF  /* VLAN ID is in lower 12 bits */
+#define IXGBE_RXD_PRI_MASK     0xE000  /* Priority is in upper 3 bits */
+#define IXGBE_RXD_PRI_SHIFT    13
+#define IXGBE_RXD_CFI_MASK     0x1000  /* CFI is bit 12 */
+#define IXGBE_RXD_CFI_SHIFT    12
+
+#define IXGBE_RXDADV_STAT_DD           IXGBE_RXD_STAT_DD  /* Done */
+#define IXGBE_RXDADV_STAT_EOP          IXGBE_RXD_STAT_EOP /* End of Packet */
+#define IXGBE_RXDADV_STAT_FLM          IXGBE_RXD_STAT_FLM /* FDir Match */
+#define IXGBE_RXDADV_STAT_VP           IXGBE_RXD_STAT_VP  /* IEEE VLAN Pkt */
+#define IXGBE_RXDADV_STAT_MASK         0x000FFFFF /* Stat/NEXTP: bit 0-19 */
+#define IXGBE_RXDADV_STAT_FCEOFS       0x00000040 /* FCoE EOF/SOF Stat */
+#define IXGBE_RXDADV_STAT_FCSTAT       0x00000030 /* FCoE Pkt Stat */
+#define IXGBE_RXDADV_STAT_FCSTAT_NOMTCH        0x00000000 /* 00: No Ctxt Match */
+#define IXGBE_RXDADV_STAT_FCSTAT_NODDP 0x00000010 /* 01: Ctxt w/o DDP */
+#define IXGBE_RXDADV_STAT_FCSTAT_FCPRSP        0x00000020 /* 10: Recv. FCP_RSP */
+#define IXGBE_RXDADV_STAT_FCSTAT_DDP   0x00000030 /* 11: Ctxt w/ DDP */
+
+#define IXGBE_RXDADV_RSSTYPE_MASK      0x0000000F
+#define IXGBE_RXDADV_PKTTYPE_MASK      0x0000FFF0
+#define IXGBE_RXDADV_PKTTYPE_MASK_EX   0x0001FFF0
+#define IXGBE_RXDADV_HDRBUFLEN_MASK    0x00007FE0
+#define IXGBE_RXDADV_RSCCNT_MASK       0x001E0000
+#define IXGBE_RXDADV_RSCCNT_SHIFT      17
+#define IXGBE_RXDADV_HDRBUFLEN_SHIFT   5
+#define IXGBE_RXDADV_SPLITHEADER_EN    0x00001000
+#define IXGBE_RXDADV_SPH               0x8000
 
 #define IXGBE_RXD_ERR_FRAME_ERR_MASK ( \
                                      IXGBE_RXD_ERR_CE |  \
@@ -176,16 +175,16 @@ typedef u32 ixgbe_link_speed;
                                         IXGBE_RXDADV_ERR_OSE | \
                                         IXGBE_RXDADV_ERR_USE)
 
-#define IXGBE_TXD_POPTS_IXSM 0x01       /* Insert IP checksum */
-#define IXGBE_TXD_POPTS_TXSM 0x02       /* Insert TCP/UDP checksum */
-#define IXGBE_TXD_CMD_EOP    0x01000000 /* End of Packet */
-#define IXGBE_TXD_CMD_IFCS   0x02000000 /* Insert FCS (Ethernet CRC) */
-#define IXGBE_TXD_CMD_IC     0x04000000 /* Insert Checksum */
-#define IXGBE_TXD_CMD_RS     0x08000000 /* Report Status */
-#define IXGBE_TXD_CMD_DEXT   0x20000000 /* Descriptor extension (0 = legacy) */
-#define IXGBE_TXD_CMD_VLE    0x40000000 /* Add VLAN tag */
-#define IXGBE_TXD_STAT_DD    0x00000001 /* Descriptor Done */
-#define IXGBE_TXD_CMD       (IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS)
+#define IXGBE_TXD_POPTS_IXSM   0x01       /* Insert IP checksum */
+#define IXGBE_TXD_POPTS_TXSM   0x02       /* Insert TCP/UDP checksum */
+#define IXGBE_TXD_CMD_EOP      0x01000000 /* End of Packet */
+#define IXGBE_TXD_CMD_IFCS     0x02000000 /* Insert FCS (Ethernet CRC) */
+#define IXGBE_TXD_CMD_IC       0x04000000 /* Insert Checksum */
+#define IXGBE_TXD_CMD_RS       0x08000000 /* Report Status */
+#define IXGBE_TXD_CMD_DEXT     0x20000000 /* Descriptor ext (0 = legacy) */
+#define IXGBE_TXD_CMD_VLE      0x40000000 /* Add VLAN tag */
+#define IXGBE_TXD_STAT_DD      0x00000001 /* Descriptor Done */
+#define IXGBE_TXD_CMD          (IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS)
 
 /* Transmit Descriptor - Advanced */
 union ixgbe_adv_tx_desc {
@@ -241,44 +240,44 @@ struct ixgbe_adv_tx_context_desc {
 };
 
 /* Adv Transmit Descriptor Config Masks */
-#define IXGBE_ADVTXD_DTYP_MASK  0x00F00000 /* DTYP mask */
-#define IXGBE_ADVTXD_DTYP_CTXT  0x00200000 /* Advanced Context Desc */
-#define IXGBE_ADVTXD_DTYP_DATA  0x00300000 /* Advanced Data Descriptor */
-#define IXGBE_ADVTXD_DCMD_EOP   IXGBE_TXD_CMD_EOP  /* End of Packet */
-#define IXGBE_ADVTXD_DCMD_IFCS  IXGBE_TXD_CMD_IFCS /* Insert FCS */
-#define IXGBE_ADVTXD_DCMD_RS    IXGBE_TXD_CMD_RS   /* Report Status */
-#define IXGBE_ADVTXD_DCMD_DEXT  IXGBE_TXD_CMD_DEXT /* Desc ext (1=Adv) */
-#define IXGBE_ADVTXD_DCMD_VLE   IXGBE_TXD_CMD_VLE  /* VLAN pkt enable */
-#define IXGBE_ADVTXD_DCMD_TSE   0x80000000 /* TCP Seg enable */
-#define IXGBE_ADVTXD_STAT_DD    IXGBE_TXD_STAT_DD  /* Descriptor Done */
-#define IXGBE_ADVTXD_TUCMD_IPV4      0x00000400  /* IP Packet Type: 1=IPv4 */
-#define IXGBE_ADVTXD_TUCMD_IPV6      0x00000000  /* IP Packet Type: 0=IPv6 */
-#define IXGBE_ADVTXD_TUCMD_L4T_UDP   0x00000000  /* L4 Packet TYPE of UDP */
-#define IXGBE_ADVTXD_TUCMD_L4T_TCP   0x00000800  /* L4 Packet TYPE of TCP */
-#define IXGBE_ADVTXD_TUCMD_L4T_SCTP  0x00001000  /* L4 Packet TYPE of SCTP */
-#define IXGBE_ADVTXD_IDX_SHIFT  4 /* Adv desc Index shift */
+#define IXGBE_ADVTXD_DTYP_MASK 0x00F00000 /* DTYP mask */
+#define IXGBE_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Desc */
+#define IXGBE_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */
+#define IXGBE_ADVTXD_DCMD_EOP  IXGBE_TXD_CMD_EOP  /* End of Packet */
+#define IXGBE_ADVTXD_DCMD_IFCS IXGBE_TXD_CMD_IFCS /* Insert FCS */
+#define IXGBE_ADVTXD_DCMD_RS   IXGBE_TXD_CMD_RS   /* Report Status */
+#define IXGBE_ADVTXD_DCMD_DEXT IXGBE_TXD_CMD_DEXT /* Desc ext (1=Adv) */
+#define IXGBE_ADVTXD_DCMD_VLE  IXGBE_TXD_CMD_VLE  /* VLAN pkt enable */
+#define IXGBE_ADVTXD_DCMD_TSE  0x80000000 /* TCP Seg enable */
+#define IXGBE_ADVTXD_STAT_DD   IXGBE_TXD_STAT_DD  /* Descriptor Done */
+#define IXGBE_ADVTXD_TUCMD_IPV4        0x00000400  /* IP Packet Type: 1=IPv4 */
+#define IXGBE_ADVTXD_TUCMD_IPV6        0x00000000  /* IP Packet Type: 0=IPv6 */
+#define IXGBE_ADVTXD_TUCMD_L4T_UDP     0x00000000  /* L4 Packet TYPE of UDP */
+#define IXGBE_ADVTXD_TUCMD_L4T_TCP     0x00000800  /* L4 Packet TYPE of TCP */
+#define IXGBE_ADVTXD_TUCMD_L4T_SCTP    0x00001000  /* L4 Packet TYPE of SCTP */
+#define IXGBE_ADVTXD_IDX_SHIFT 4 /* Adv desc Index shift */
 #define IXGBE_ADVTXD_CC                0x00000080 /* Check Context */
-#define IXGBE_ADVTXD_POPTS_SHIFT      8  /* Adv desc POPTS shift */
-#define IXGBE_ADVTXD_POPTS_IXSM (IXGBE_TXD_POPTS_IXSM << \
+#define IXGBE_ADVTXD_POPTS_SHIFT       8  /* Adv desc POPTS shift */
+#define IXGBE_ADVTXD_POPTS_IXSM        (IXGBE_TXD_POPTS_IXSM << \
                                 IXGBE_ADVTXD_POPTS_SHIFT)
-#define IXGBE_ADVTXD_POPTS_TXSM (IXGBE_TXD_POPTS_TXSM << \
+#define IXGBE_ADVTXD_POPTS_TXSM        (IXGBE_TXD_POPTS_TXSM << \
                                 IXGBE_ADVTXD_POPTS_SHIFT)
-#define IXGBE_ADVTXD_PAYLEN_SHIFT    14 /* Adv desc PAYLEN shift */
-#define IXGBE_ADVTXD_MACLEN_SHIFT    9  /* Adv ctxt desc mac len shift */
-#define IXGBE_ADVTXD_VLAN_SHIFT      16  /* Adv ctxt vlan tag shift */
-#define IXGBE_ADVTXD_L4LEN_SHIFT     8  /* Adv ctxt L4LEN shift */
-#define IXGBE_ADVTXD_MSS_SHIFT       16  /* Adv ctxt MSS shift */
+#define IXGBE_ADVTXD_PAYLEN_SHIFT      14 /* Adv desc PAYLEN shift */
+#define IXGBE_ADVTXD_MACLEN_SHIFT      9  /* Adv ctxt desc mac len shift */
+#define IXGBE_ADVTXD_VLAN_SHIFT                16 /* Adv ctxt vlan tag shift */
+#define IXGBE_ADVTXD_L4LEN_SHIFT       8  /* Adv ctxt L4LEN shift */
+#define IXGBE_ADVTXD_MSS_SHIFT         16 /* Adv ctxt MSS shift */
 
 /* Interrupt register bitmasks */
 
-#define IXGBE_EITR_CNT_WDIS     0x80000000
+#define IXGBE_EITR_CNT_WDIS    0x80000000
 #define IXGBE_MAX_EITR         0x00000FF8
 #define IXGBE_MIN_EITR         8
 
 /* Error Codes */
-#define IXGBE_ERR_INVALID_MAC_ADDR              -1
-#define IXGBE_ERR_RESET_FAILED                  -2
-#define IXGBE_ERR_INVALID_ARGUMENT              -3
+#define IXGBE_ERR_INVALID_MAC_ADDR     -1
+#define IXGBE_ERR_RESET_FAILED         -2
+#define IXGBE_ERR_INVALID_ARGUMENT     -3
 
 /* Transmit Config masks */
 #define IXGBE_TXDCTL_ENABLE            0x02000000 /* Ena specific Tx Queue */
index cc0e5b7ff041e58752dfa62728bb08eab5d11a5a..e83c85bf2602094271c8446bb1a644410a3c0c94 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 82599 Virtual Function driver
-  Copyright(c) 1999 - 2014 Intel Corporation.
+  Copyright(c) 1999 - 2015 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -13,8 +13,7 @@
   more details.
 
   You should have received a copy of the GNU General Public License along with
-  this program; if not, write to the Free Software Foundation, Inc.,
-  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+  this program; if not, see <http://www.gnu.org/licenses/>.
 
   The full GNU General Public License is included in this distribution in
   the file called "COPYING".
@@ -100,6 +99,7 @@ static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = {
        "Register test  (offline)",
        "Link test   (on/offline)"
 };
+
 #define IXGBE_TEST_LEN (sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN)
 
 static int ixgbevf_get_settings(struct net_device *netdev,
@@ -120,6 +120,7 @@ static int ixgbevf_get_settings(struct net_device *netdev,
 
        if (link_up) {
                __u32 speed = SPEED_10000;
+
                switch (link_speed) {
                case IXGBE_LINK_SPEED_10GB_FULL:
                        speed = SPEED_10000;
@@ -145,12 +146,14 @@ static int ixgbevf_get_settings(struct net_device *netdev,
 static u32 ixgbevf_get_msglevel(struct net_device *netdev)
 {
        struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+
        return adapter->msg_enable;
 }
 
 static void ixgbevf_set_msglevel(struct net_device *netdev, u32 data)
 {
        struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+
        adapter->msg_enable = data;
 }
 
@@ -185,7 +188,8 @@ static void ixgbevf_get_regs(struct net_device *netdev,
 
        /* Interrupt */
        /* don't read EICR because it can clear interrupt causes, instead
-        * read EICS which is a shadow but doesn't clear EICR */
+        * read EICS which is a shadow but doesn't clear EICR
+        */
        regs_buff[5] = IXGBE_READ_REG(hw, IXGBE_VTEICS);
        regs_buff[6] = IXGBE_READ_REG(hw, IXGBE_VTEICS);
        regs_buff[7] = IXGBE_READ_REG(hw, IXGBE_VTEIMS);
@@ -390,21 +394,21 @@ clear_reset:
 
 static int ixgbevf_get_sset_count(struct net_device *dev, int stringset)
 {
-       switch (stringset) {
-       case ETH_SS_TEST:
-              return IXGBE_TEST_LEN;
-       case ETH_SS_STATS:
-              return IXGBE_GLOBAL_STATS_LEN;
-       default:
-              return -EINVAL;
-       }
+       switch (stringset) {
+       case ETH_SS_TEST:
+               return IXGBE_TEST_LEN;
+       case ETH_SS_STATS:
+               return IXGBE_GLOBAL_STATS_LEN;
+       default:
+               return -EINVAL;
+       }
 }
 
 static void ixgbevf_get_ethtool_stats(struct net_device *netdev,
                                      struct ethtool_stats *stats, u64 *data)
 {
        struct ixgbevf_adapter *adapter = netdev_priv(netdev);
-       char *base = (char *) adapter;
+       char *base = (char *)adapter;
        int i;
 #ifdef BP_EXTENDED_STATS
        u64 rx_yields = 0, rx_cleaned = 0, rx_missed = 0,
@@ -594,8 +598,7 @@ static int ixgbevf_reg_test(struct ixgbevf_adapter *adapter, u64 *data)
        }
        test = reg_test_vf;
 
-       /*
-        * Perform the register test, looping through the test table
+       /* Perform the register test, looping through the test table
         * until we either fail or reach the null entry.
         */
        while (test->reg) {
@@ -617,8 +620,8 @@ static int ixgbevf_reg_test(struct ixgbevf_adapter *adapter, u64 *data)
                                break;
                        case WRITE_NO_TEST:
                                ixgbe_write_reg(&adapter->hw,
-                                                 test->reg + (i * 0x40),
-                                                 test->write);
+                                               test->reg + (i * 0x40),
+                                               test->write);
                                break;
                        case TABLE32_TEST:
                                b = reg_pattern_test(adapter, data,
@@ -670,7 +673,8 @@ static void ixgbevf_diag_test(struct net_device *netdev,
                hw_dbg(&adapter->hw, "offline testing starting\n");
 
                /* Link test performed before hardware reset so autoneg doesn't
-                * interfere with test result */
+                * interfere with test result
+                */
                if (ixgbevf_link_test(adapter, &data[1]))
                        eth_test->flags |= ETH_TEST_FL_FAILED;
 
@@ -724,7 +728,7 @@ static int ixgbevf_get_coalesce(struct net_device *netdev,
        else
                ec->rx_coalesce_usecs = adapter->rx_itr_setting >> 2;
 
-       /* if in mixed tx/rx queues per vector mode, report only rx settings */
+       /* if in mixed Tx/Rx queues per vector mode, report only Rx settings */
        if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count)
                return 0;
 
@@ -745,12 +749,11 @@ static int ixgbevf_set_coalesce(struct net_device *netdev,
        int num_vectors, i;
        u16 tx_itr_param, rx_itr_param;
 
-       /* don't accept tx specific changes if we've got mixed RxTx vectors */
-       if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count
-           && ec->tx_coalesce_usecs)
+       /* don't accept Tx specific changes if we've got mixed RxTx vectors */
+       if (adapter->q_vector[0]->tx.count &&
+           adapter->q_vector[0]->rx.count && ec->tx_coalesce_usecs)
                return -EINVAL;
 
-
        if ((ec->rx_coalesce_usecs > (IXGBE_MAX_EITR >> 2)) ||
            (ec->tx_coalesce_usecs > (IXGBE_MAX_EITR >> 2)))
                return -EINVAL;
@@ -765,7 +768,6 @@ static int ixgbevf_set_coalesce(struct net_device *netdev,
        else
                rx_itr_param = adapter->rx_itr_setting;
 
-
        if (ec->tx_coalesce_usecs > 1)
                adapter->tx_itr_setting = ec->tx_coalesce_usecs << 2;
        else
@@ -781,10 +783,10 @@ static int ixgbevf_set_coalesce(struct net_device *netdev,
        for (i = 0; i < num_vectors; i++) {
                q_vector = adapter->q_vector[i];
                if (q_vector->tx.count && !q_vector->rx.count)
-                       /* tx only */
+                       /* Tx only */
                        q_vector->itr = tx_itr_param;
                else
-                       /* rx only or mixed */
+                       /* Rx only or mixed */
                        q_vector->itr = rx_itr_param;
                ixgbevf_write_eitr(q_vector);
        }
@@ -793,22 +795,22 @@ static int ixgbevf_set_coalesce(struct net_device *netdev,
 }
 
 static const struct ethtool_ops ixgbevf_ethtool_ops = {
-       .get_settings           = ixgbevf_get_settings,
-       .get_drvinfo            = ixgbevf_get_drvinfo,
-       .get_regs_len           = ixgbevf_get_regs_len,
-       .get_regs               = ixgbevf_get_regs,
-       .nway_reset             = ixgbevf_nway_reset,
-       .get_link               = ethtool_op_get_link,
-       .get_ringparam          = ixgbevf_get_ringparam,
-       .set_ringparam          = ixgbevf_set_ringparam,
-       .get_msglevel           = ixgbevf_get_msglevel,
-       .set_msglevel           = ixgbevf_set_msglevel,
-       .self_test              = ixgbevf_diag_test,
-       .get_sset_count         = ixgbevf_get_sset_count,
-       .get_strings            = ixgbevf_get_strings,
-       .get_ethtool_stats      = ixgbevf_get_ethtool_stats,
-       .get_coalesce           = ixgbevf_get_coalesce,
-       .set_coalesce           = ixgbevf_set_coalesce,
+       .get_settings           = ixgbevf_get_settings,
+       .get_drvinfo            = ixgbevf_get_drvinfo,
+       .get_regs_len           = ixgbevf_get_regs_len,
+       .get_regs               = ixgbevf_get_regs,
+       .nway_reset             = ixgbevf_nway_reset,
+       .get_link               = ethtool_op_get_link,
+       .get_ringparam          = ixgbevf_get_ringparam,
+       .set_ringparam          = ixgbevf_set_ringparam,
+       .get_msglevel           = ixgbevf_get_msglevel,
+       .set_msglevel           = ixgbevf_set_msglevel,
+       .self_test              = ixgbevf_diag_test,
+       .get_sset_count         = ixgbevf_get_sset_count,
+       .get_strings            = ixgbevf_get_strings,
+       .get_ethtool_stats      = ixgbevf_get_ethtool_stats,
+       .get_coalesce           = ixgbevf_get_coalesce,
+       .set_coalesce           = ixgbevf_set_coalesce,
 };
 
 void ixgbevf_set_ethtool_ops(struct net_device *netdev)
index 3a9b356dff014b3be96d3fba3e42d52a91d042be..bc939a1fcb3cf0b073ebfb1a5873196e2eed3a1f 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 82599 Virtual Function driver
-  Copyright(c) 1999 - 2014 Intel Corporation.
+  Copyright(c) 1999 - 2015 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -13,8 +13,7 @@
   more details.
 
   You should have received a copy of the GNU General Public License along with
-  this program; if not, write to the Free Software Foundation, Inc.,
-  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+  this program; if not, see <http://www.gnu.org/licenses/>.
 
   The full GNU General Public License is included in this distribution in
   the file called "COPYING".
@@ -51,7 +50,8 @@
 #define DESC_NEEDED (MAX_SKB_FRAGS + 4)
 
 /* wrapper around a pointer to a socket buffer,
- * so a DMA handle can be stored along with the buffer */
+ * so a DMA handle can be stored along with the buffer
+ */
 struct ixgbevf_tx_buffer {
        union ixgbe_adv_tx_desc *next_to_watch;
        unsigned long time_stamp;
@@ -132,9 +132,10 @@ struct ixgbevf_ring {
        u8 __iomem *tail;
        struct sk_buff *skb;
 
-       u16 reg_idx; /* holds the special value that gets the hardware register
-                     * offset associated with this ring, which is different
-                     * for DCB and RSS modes */
+       /* holds the special value that gets the hardware register offset
+        * associated with this ring, which is different for DCB and RSS modes
+        */
+       u16 reg_idx;
        int queue_index; /* needed for multiqueue queue management */
 };
 
@@ -143,21 +144,21 @@ struct ixgbevf_ring {
 
 #define MAX_RX_QUEUES IXGBE_VF_MAX_RX_QUEUES
 #define MAX_TX_QUEUES IXGBE_VF_MAX_TX_QUEUES
-#define IXGBEVF_MAX_RSS_QUEUES 2
+#define IXGBEVF_MAX_RSS_QUEUES 2
 
-#define IXGBEVF_DEFAULT_TXD   1024
-#define IXGBEVF_DEFAULT_RXD   512
-#define IXGBEVF_MAX_TXD       4096
-#define IXGBEVF_MIN_TXD       64
-#define IXGBEVF_MAX_RXD       4096
-#define IXGBEVF_MIN_RXD       64
+#define IXGBEVF_DEFAULT_TXD    1024
+#define IXGBEVF_DEFAULT_RXD    512
+#define IXGBEVF_MAX_TXD                4096
+#define IXGBEVF_MIN_TXD                64
+#define IXGBEVF_MAX_RXD                4096
+#define IXGBEVF_MIN_RXD                64
 
 /* Supported Rx Buffer Sizes */
-#define IXGBEVF_RXBUFFER_256   256    /* Used for packet split */
-#define IXGBEVF_RXBUFFER_2048  2048
+#define IXGBEVF_RXBUFFER_256   256    /* Used for packet split */
+#define IXGBEVF_RXBUFFER_2048  2048
 
-#define IXGBEVF_RX_HDR_SIZE IXGBEVF_RXBUFFER_256
-#define IXGBEVF_RX_BUFSZ    IXGBEVF_RXBUFFER_2048
+#define IXGBEVF_RX_HDR_SIZE    IXGBEVF_RXBUFFER_256
+#define IXGBEVF_RX_BUFSZ       IXGBEVF_RXBUFFER_2048
 
 #define MAXIMUM_ETHERNET_VLAN_SIZE (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN)
 
@@ -186,10 +187,11 @@ struct ixgbevf_ring_container {
  */
 struct ixgbevf_q_vector {
        struct ixgbevf_adapter *adapter;
-       u16 v_idx;              /* index of q_vector within array, also used for
-                                * finding the bit in EICR and friends that
-                                * represents the vector for this ring */
-       u16 itr;                /* Interrupt throttle rate written to EITR */
+       /* index of q_vector within array, also used for finding the bit in
+        * EICR and friends that represents the vector for this ring
+        */
+       u16 v_idx;
+       u16 itr; /* Interrupt throttle rate written to EITR */
        struct napi_struct napi;
        struct ixgbevf_ring_container rx, tx;
        char name[IFNAMSIZ + 9];
@@ -199,19 +201,21 @@ struct ixgbevf_q_vector {
 #define IXGBEVF_QV_STATE_NAPI          1    /* NAPI owns this QV */
 #define IXGBEVF_QV_STATE_POLL          2    /* poll owns this QV */
 #define IXGBEVF_QV_STATE_DISABLED      4    /* QV is disabled */
-#define IXGBEVF_QV_OWNED (IXGBEVF_QV_STATE_NAPI | IXGBEVF_QV_STATE_POLL)
-#define IXGBEVF_QV_LOCKED (IXGBEVF_QV_OWNED | IXGBEVF_QV_STATE_DISABLED)
+#define IXGBEVF_QV_OWNED       (IXGBEVF_QV_STATE_NAPI | IXGBEVF_QV_STATE_POLL)
+#define IXGBEVF_QV_LOCKED      (IXGBEVF_QV_OWNED | IXGBEVF_QV_STATE_DISABLED)
 #define IXGBEVF_QV_STATE_NAPI_YIELD    8    /* NAPI yielded this QV */
 #define IXGBEVF_QV_STATE_POLL_YIELD    16   /* poll yielded this QV */
-#define IXGBEVF_QV_YIELD (IXGBEVF_QV_STATE_NAPI_YIELD | IXGBEVF_QV_STATE_POLL_YIELD)
-#define IXGBEVF_QV_USER_PEND (IXGBEVF_QV_STATE_POLL | IXGBEVF_QV_STATE_POLL_YIELD)
+#define IXGBEVF_QV_YIELD       (IXGBEVF_QV_STATE_NAPI_YIELD | \
+                                IXGBEVF_QV_STATE_POLL_YIELD)
+#define IXGBEVF_QV_USER_PEND   (IXGBEVF_QV_STATE_POLL | \
+                                IXGBEVF_QV_STATE_POLL_YIELD)
        spinlock_t lock;
 #endif /* CONFIG_NET_RX_BUSY_POLL */
 };
+
 #ifdef CONFIG_NET_RX_BUSY_POLL
 static inline void ixgbevf_qv_init_lock(struct ixgbevf_q_vector *q_vector)
 {
-
        spin_lock_init(&q_vector->lock);
        q_vector->state = IXGBEVF_QV_STATE_IDLE;
 }
@@ -220,6 +224,7 @@ static inline void ixgbevf_qv_init_lock(struct ixgbevf_q_vector *q_vector)
 static inline bool ixgbevf_qv_lock_napi(struct ixgbevf_q_vector *q_vector)
 {
        int rc = true;
+
        spin_lock_bh(&q_vector->lock);
        if (q_vector->state & IXGBEVF_QV_LOCKED) {
                WARN_ON(q_vector->state & IXGBEVF_QV_STATE_NAPI);
@@ -240,6 +245,7 @@ static inline bool ixgbevf_qv_lock_napi(struct ixgbevf_q_vector *q_vector)
 static inline bool ixgbevf_qv_unlock_napi(struct ixgbevf_q_vector *q_vector)
 {
        int rc = false;
+
        spin_lock_bh(&q_vector->lock);
        WARN_ON(q_vector->state & (IXGBEVF_QV_STATE_POLL |
                                   IXGBEVF_QV_STATE_NAPI_YIELD));
@@ -256,6 +262,7 @@ static inline bool ixgbevf_qv_unlock_napi(struct ixgbevf_q_vector *q_vector)
 static inline bool ixgbevf_qv_lock_poll(struct ixgbevf_q_vector *q_vector)
 {
        int rc = true;
+
        spin_lock_bh(&q_vector->lock);
        if ((q_vector->state & IXGBEVF_QV_LOCKED)) {
                q_vector->state |= IXGBEVF_QV_STATE_POLL_YIELD;
@@ -275,6 +282,7 @@ static inline bool ixgbevf_qv_lock_poll(struct ixgbevf_q_vector *q_vector)
 static inline bool ixgbevf_qv_unlock_poll(struct ixgbevf_q_vector *q_vector)
 {
        int rc = false;
+
        spin_lock_bh(&q_vector->lock);
        WARN_ON(q_vector->state & (IXGBEVF_QV_STATE_NAPI));
 
@@ -297,6 +305,7 @@ static inline bool ixgbevf_qv_busy_polling(struct ixgbevf_q_vector *q_vector)
 static inline bool ixgbevf_qv_disable(struct ixgbevf_q_vector *q_vector)
 {
        int rc = true;
+
        spin_lock_bh(&q_vector->lock);
        if (q_vector->state & IXGBEVF_QV_OWNED)
                rc = false;
@@ -307,8 +316,7 @@ static inline bool ixgbevf_qv_disable(struct ixgbevf_q_vector *q_vector)
 
 #endif /* CONFIG_NET_RX_BUSY_POLL */
 
-/*
- * microsecond values for various ITR rates shifted by 2 to fit itr register
+/* microsecond values for various ITR rates shifted by 2 to fit itr register
  * with the first 3 bits reserved 0
  */
 #define IXGBE_MIN_RSC_ITR      24
@@ -345,22 +353,22 @@ static inline void ixgbevf_write_tail(struct ixgbevf_ring *ring, u32 value)
        writel(value, ring->tail);
 }
 
-#define IXGBEVF_RX_DESC(R, i)      \
+#define IXGBEVF_RX_DESC(R, i)  \
        (&(((union ixgbe_adv_rx_desc *)((R)->desc))[i]))
-#define IXGBEVF_TX_DESC(R, i)      \
+#define IXGBEVF_TX_DESC(R, i)  \
        (&(((union ixgbe_adv_tx_desc *)((R)->desc))[i]))
-#define IXGBEVF_TX_CTXTDESC(R, i)          \
+#define IXGBEVF_TX_CTXTDESC(R, i)      \
        (&(((struct ixgbe_adv_tx_context_desc *)((R)->desc))[i]))
 
 #define IXGBE_MAX_JUMBO_FRAME_SIZE     9728 /* Maximum Supported Size 9.5KB */
 
-#define OTHER_VECTOR 1
-#define NON_Q_VECTORS (OTHER_VECTOR)
+#define OTHER_VECTOR   1
+#define NON_Q_VECTORS  (OTHER_VECTOR)
 
-#define MAX_MSIX_Q_VECTORS 2
+#define MAX_MSIX_Q_VECTORS     2
 
-#define MIN_MSIX_Q_VECTORS 1
-#define MIN_MSIX_COUNT (MIN_MSIX_Q_VECTORS + NON_Q_VECTORS)
+#define MIN_MSIX_Q_VECTORS     1
+#define MIN_MSIX_COUNT         (MIN_MSIX_Q_VECTORS + NON_Q_VECTORS)
 
 /* board specific private data structure */
 struct ixgbevf_adapter {
index 4186981e562d0c395825785aa07f32d15fab324f..4ee15adb3bd9a6f7b7ecfa76b3acc7f2a5b0885c 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 82599 Virtual Function driver
-  Copyright(c) 1999 - 2014 Intel Corporation.
+  Copyright(c) 1999 - 2015 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -13,8 +13,7 @@
   more details.
 
   You should have received a copy of the GNU General Public License along with
-  this program; if not, write to the Free Software Foundation, Inc.,
-  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+  this program; if not, see <http://www.gnu.org/licenses/>.
 
   The full GNU General Public License is included in this distribution in
   the file called "COPYING".
@@ -25,7 +24,6 @@
 
 *******************************************************************************/
 
-
 /******************************************************************************
  Copyright (c)2006 - 2007 Myricom, Inc. for some LRO specific code
 ******************************************************************************/
@@ -170,12 +168,13 @@ u32 ixgbevf_read_reg(struct ixgbe_hw *hw, u32 reg)
  * @direction: 0 for Rx, 1 for Tx, -1 for other causes
  * @queue: queue to map the corresponding interrupt to
  * @msix_vector: the vector to map to the corresponding queue
- */
+ **/
 static void ixgbevf_set_ivar(struct ixgbevf_adapter *adapter, s8 direction,
                             u8 queue, u8 msix_vector)
 {
        u32 ivar, index;
        struct ixgbe_hw *hw = &adapter->hw;
+
        if (direction == -1) {
                /* other causes */
                msix_vector |= IXGBE_IVAR_ALLOC_VAL;
@@ -184,7 +183,7 @@ static void ixgbevf_set_ivar(struct ixgbevf_adapter *adapter, s8 direction,
                ivar |= msix_vector;
                IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
        } else {
-               /* tx or rx causes */
+               /* Tx or Rx causes */
                msix_vector |= IXGBE_IVAR_ALLOC_VAL;
                index = ((16 * (queue & 1)) + (8 * direction));
                ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(queue >> 1));
@@ -458,11 +457,12 @@ static void ixgbevf_rx_skb(struct ixgbevf_q_vector *q_vector,
        napi_gro_receive(&q_vector->napi, skb);
 }
 
-/* ixgbevf_rx_checksum - indicate in skb if hw indicated a good cksum
+/**
+ * ixgbevf_rx_checksum - indicate in skb if hw indicated a good cksum
  * @ring: structure containig ring specific data
  * @rx_desc: current Rx descriptor being processed
  * @skb: skb currently being received and modified
- */
+ **/
 static inline void ixgbevf_rx_checksum(struct ixgbevf_ring *ring,
                                       union ixgbe_adv_rx_desc *rx_desc,
                                       struct sk_buff *skb)
@@ -492,7 +492,8 @@ static inline void ixgbevf_rx_checksum(struct ixgbevf_ring *ring,
        skb->ip_summed = CHECKSUM_UNNECESSARY;
 }
 
-/* ixgbevf_process_skb_fields - Populate skb header fields from Rx descriptor
+/**
+ * ixgbevf_process_skb_fields - Populate skb header fields from Rx descriptor
  * @rx_ring: rx descriptor ring packet is being transacted on
  * @rx_desc: pointer to the EOP Rx descriptor
  * @skb: pointer to current skb being populated
@@ -500,7 +501,7 @@ static inline void ixgbevf_rx_checksum(struct ixgbevf_ring *ring,
  * This function checks the ring, descriptor, and packet information in
  * order to populate the checksum, VLAN, protocol, and other fields within
  * the skb.
- */
+ **/
 static void ixgbevf_process_skb_fields(struct ixgbevf_ring *rx_ring,
                                       union ixgbe_adv_rx_desc *rx_desc,
                                       struct sk_buff *skb)
@@ -647,7 +648,8 @@ static void ixgbevf_alloc_rx_buffers(struct ixgbevf_ring *rx_ring,
        }
 }
 
-/* ixgbevf_pull_tail - ixgbevf specific version of skb_pull_tail
+/**
+ * ixgbevf_pull_tail - ixgbevf specific version of skb_pull_tail
  * @rx_ring: rx descriptor ring packet is being transacted on
  * @skb: pointer to current skb being adjusted
  *
@@ -657,7 +659,7 @@ static void ixgbevf_alloc_rx_buffers(struct ixgbevf_ring *rx_ring,
  * that allow for significant optimizations versus the standard function.
  * As a result we can do things like drop a frag and maintain an accurate
  * truesize for the skb.
- */
+ **/
 static void ixgbevf_pull_tail(struct ixgbevf_ring *rx_ring,
                              struct sk_buff *skb)
 {
@@ -686,7 +688,8 @@ static void ixgbevf_pull_tail(struct ixgbevf_ring *rx_ring,
        skb->tail += pull_len;
 }
 
-/* ixgbevf_cleanup_headers - Correct corrupted or empty headers
+/**
+ * ixgbevf_cleanup_headers - Correct corrupted or empty headers
  * @rx_ring: rx descriptor ring packet is being transacted on
  * @rx_desc: pointer to the EOP Rx descriptor
  * @skb: pointer to current skb being fixed
@@ -702,7 +705,7 @@ static void ixgbevf_pull_tail(struct ixgbevf_ring *rx_ring,
  * it is large enough to qualify as a valid Ethernet frame.
  *
  * Returns true if an error was encountered and skb was freed.
- */
+ **/
 static bool ixgbevf_cleanup_headers(struct ixgbevf_ring *rx_ring,
                                    union ixgbe_adv_rx_desc *rx_desc,
                                    struct sk_buff *skb)
@@ -729,12 +732,13 @@ static bool ixgbevf_cleanup_headers(struct ixgbevf_ring *rx_ring,
        return false;
 }
 
-/* ixgbevf_reuse_rx_page - page flip buffer and store it back on the ring
+/**
+ * ixgbevf_reuse_rx_page - page flip buffer and store it back on the ring
  * @rx_ring: rx descriptor ring to store buffers on
  * @old_buff: donor buffer to have page reused
  *
  * Synchronizes page for reuse by the adapter
- */
+ **/
 static void ixgbevf_reuse_rx_page(struct ixgbevf_ring *rx_ring,
                                  struct ixgbevf_rx_buffer *old_buff)
 {
@@ -764,7 +768,8 @@ static inline bool ixgbevf_page_is_reserved(struct page *page)
        return (page_to_nid(page) != numa_mem_id()) || page->pfmemalloc;
 }
 
-/* ixgbevf_add_rx_frag - Add contents of Rx buffer to sk_buff
+/**
+ * ixgbevf_add_rx_frag - Add contents of Rx buffer to sk_buff
  * @rx_ring: rx descriptor ring to transact packets on
  * @rx_buffer: buffer containing page to add
  * @rx_desc: descriptor containing length of buffer written by hardware
@@ -777,7 +782,7 @@ static inline bool ixgbevf_page_is_reserved(struct page *page)
  *
  * The function will then update the page offset if necessary and return
  * true if the buffer can be reused by the adapter.
- */
+ **/
 static bool ixgbevf_add_rx_frag(struct ixgbevf_ring *rx_ring,
                                struct ixgbevf_rx_buffer *rx_buffer,
                                union ixgbe_adv_rx_desc *rx_desc,
@@ -958,7 +963,7 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
                 * source pruning.
                 */
                if ((skb->pkt_type == PACKET_BROADCAST ||
-                   skb->pkt_type == PACKET_MULTICAST) &&
+                    skb->pkt_type == PACKET_MULTICAST) &&
                    ether_addr_equal(rx_ring->netdev->dev_addr,
                                     eth_hdr(skb)->h_source)) {
                        dev_kfree_skb_irq(skb);
@@ -1016,7 +1021,8 @@ static int ixgbevf_poll(struct napi_struct *napi, int budget)
 #endif
 
        /* attempt to distribute budget to each queue fairly, but don't allow
-        * the budget to go below 1 because we'll exit polling */
+        * the budget to go below 1 because we'll exit polling
+        */
        if (q_vector->rx.count > 1)
                per_ring_budget = max(budget/q_vector->rx.count, 1);
        else
@@ -1049,7 +1055,7 @@ static int ixgbevf_poll(struct napi_struct *napi, int budget)
 /**
  * ixgbevf_write_eitr - write VTEITR register in hardware specific way
  * @q_vector: structure containing interrupt and ring information
- */
+ **/
 void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector)
 {
        struct ixgbevf_adapter *adapter = q_vector->adapter;
@@ -1057,8 +1063,7 @@ void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector)
        int v_idx = q_vector->v_idx;
        u32 itr_reg = q_vector->itr & IXGBE_MAX_EITR;
 
-       /*
-        * set the WDIS bit to not clear the timer bits and cause an
+       /* set the WDIS bit to not clear the timer bits and cause an
         * immediate assertion of the interrupt
         */
        itr_reg |= IXGBE_EITR_CNT_WDIS;
@@ -1115,12 +1120,12 @@ static void ixgbevf_configure_msix(struct ixgbevf_adapter *adapter)
        q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
        adapter->eims_enable_mask = 0;
 
-       /*
-        * Populate the IVAR table and set the ITR values to the
+       /* Populate the IVAR table and set the ITR values to the
         * corresponding register.
         */
        for (v_idx = 0; v_idx < q_vectors; v_idx++) {
                struct ixgbevf_ring *ring;
+
                q_vector = adapter->q_vector[v_idx];
 
                ixgbevf_for_each_ring(ring, q_vector->rx)
@@ -1130,13 +1135,13 @@ static void ixgbevf_configure_msix(struct ixgbevf_adapter *adapter)
                        ixgbevf_set_ivar(adapter, 1, ring->reg_idx, v_idx);
 
                if (q_vector->tx.ring && !q_vector->rx.ring) {
-                       /* tx only vector */
+                       /* Tx only vector */
                        if (adapter->tx_itr_setting == 1)
                                q_vector->itr = IXGBE_10K_ITR;
                        else
                                q_vector->itr = adapter->tx_itr_setting;
                } else {
-                       /* rx or rx/tx vector */
+                       /* Rx or Rx/Tx vector */
                        if (adapter->rx_itr_setting == 1)
                                q_vector->itr = IXGBE_20K_ITR;
                        else
@@ -1167,13 +1172,13 @@ enum latency_range {
  * @q_vector: structure containing interrupt and ring information
  * @ring_container: structure containing ring performance data
  *
- *      Stores a new ITR value based on packets and byte
- *      counts during the last interrupt.  The advantage of per interrupt
- *      computation is faster updates and more accurate ITR for the current
- *      traffic pattern.  Constants in this function were computed
- *      based on theoretical maximum wire speed and thresholds were set based
- *      on testing data as well as attempting to minimize response time
- *      while increasing bulk throughput.
+ * Stores a new ITR value based on packets and byte
+ * counts during the last interrupt.  The advantage of per interrupt
+ * computation is faster updates and more accurate ITR for the current
+ * traffic pattern.  Constants in this function were computed
+ * based on theoretical maximum wire speed and thresholds were set based
+ * on testing data as well as attempting to minimize response time
+ * while increasing bulk throughput.
  **/
 static void ixgbevf_update_itr(struct ixgbevf_q_vector *q_vector,
                               struct ixgbevf_ring_container *ring_container)
@@ -1187,7 +1192,7 @@ static void ixgbevf_update_itr(struct ixgbevf_q_vector *q_vector,
        if (packets == 0)
                return;
 
-       /* simple throttlerate management
+       /* simple throttle rate management
         *    0-20MB/s lowest (100000 ints/s)
         *   20-100MB/s low   (20000 ints/s)
         *  100-1249MB/s bulk (8000 ints/s)
@@ -1330,8 +1335,7 @@ static int ixgbevf_map_rings_to_vectors(struct ixgbevf_adapter *adapter)
 
        q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
 
-       /*
-        * The ideal configuration...
+       /* The ideal configuration...
         * We have enough vectors to map one per queue.
         */
        if (q_vectors == adapter->num_rx_queues + adapter->num_tx_queues) {
@@ -1343,8 +1347,7 @@ static int ixgbevf_map_rings_to_vectors(struct ixgbevf_adapter *adapter)
                goto out;
        }
 
-       /*
-        * If we don't have enough vectors for a 1-to-1
+       /* If we don't have enough vectors for a 1-to-1
         * mapping, we'll have to group them so there are
         * multiple queues per vector.
         */
@@ -1406,8 +1409,8 @@ static int ixgbevf_request_msix_irqs(struct ixgbevf_adapter *adapter)
                                  q_vector->name, q_vector);
                if (err) {
                        hw_dbg(&adapter->hw,
-                              "request_irq failed for MSIX interrupt "
-                              "Error: %d\n", err);
+                              "request_irq failed for MSIX interrupt Error: %d\n",
+                              err);
                        goto free_queue_irqs;
                }
        }
@@ -1415,8 +1418,8 @@ static int ixgbevf_request_msix_irqs(struct ixgbevf_adapter *adapter)
        err = request_irq(adapter->msix_entries[vector].vector,
                          &ixgbevf_msix_other, 0, netdev->name, adapter);
        if (err) {
-               hw_dbg(&adapter->hw,
-                      "request_irq for msix_other failed: %d\n", err);
+               hw_dbg(&adapter->hw, "request_irq for msix_other failed: %d\n",
+                      err);
                goto free_queue_irqs;
        }
 
@@ -1448,6 +1451,7 @@ static inline void ixgbevf_reset_q_vectors(struct ixgbevf_adapter *adapter)
 
        for (i = 0; i < q_vectors; i++) {
                struct ixgbevf_q_vector *q_vector = adapter->q_vector[i];
+
                q_vector->rx.ring = NULL;
                q_vector->tx.ring = NULL;
                q_vector->rx.count = 0;
@@ -1469,8 +1473,7 @@ static int ixgbevf_request_irq(struct ixgbevf_adapter *adapter)
        err = ixgbevf_request_msix_irqs(adapter);
 
        if (err)
-               hw_dbg(&adapter->hw,
-                      "request_irq failed, Error %d\n", err);
+               hw_dbg(&adapter->hw, "request_irq failed, Error %d\n", err);
 
        return err;
 }
@@ -1659,7 +1662,7 @@ static void ixgbevf_disable_rx_queue(struct ixgbevf_adapter *adapter,
        /* write value back with RXDCTL.ENABLE bit cleared */
        IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(reg_idx), rxdctl);
 
-       /* the hardware may take up to 100us to really disable the rx queue */
+       /* the hardware may take up to 100us to really disable the Rx queue */
        do {
                udelay(10);
                rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
@@ -1786,7 +1789,8 @@ static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter)
        ixgbevf_rlpml_set_vf(hw, netdev->mtu + ETH_HLEN + ETH_FCS_LEN);
 
        /* Setup the HW Rx Head and Tail Descriptor Pointers and
-        * the Base and Length of the Rx Descriptor Ring */
+        * the Base and Length of the Rx Descriptor Ring
+        */
        for (i = 0; i < adapter->num_rx_queues; i++)
                ixgbevf_configure_rx_ring(adapter, adapter->rx_ring[i]);
 }
@@ -1858,14 +1862,14 @@ static int ixgbevf_write_uc_addr_list(struct net_device *netdev)
 
        if (!netdev_uc_empty(netdev)) {
                struct netdev_hw_addr *ha;
+
                netdev_for_each_uc_addr(ha, netdev) {
                        hw->mac.ops.set_uc_addr(hw, ++count, ha->addr);
                        udelay(200);
                }
        } else {
-               /*
-                * If the list is empty then send message to PF driver to
-                * clear all macvlans on this VF.
+               /* If the list is empty then send message to PF driver to
+                * clear all MAC VLANs on this VF.
                 */
                hw->mac.ops.set_uc_addr(hw, 0, NULL);
        }
@@ -2184,7 +2188,7 @@ void ixgbevf_down(struct ixgbevf_adapter *adapter)
        if (test_and_set_bit(__IXGBEVF_DOWN, &adapter->state))
                return; /* do nothing if already down */
 
-       /* disable all enabled rx queues */
+       /* disable all enabled Rx queues */
        for (i = 0; i < adapter->num_rx_queues; i++)
                ixgbevf_disable_rx_queue(adapter, adapter->rx_ring[i]);
 
@@ -2406,8 +2410,7 @@ static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter *adapter)
        int err = 0;
        int vector, v_budget;
 
-       /*
-        * It's easy to be greedy for MSI-X vectors, but it really
+       /* It's easy to be greedy for MSI-X vectors, but it really
         * doesn't do us much good if we have a lot more vectors
         * than CPU's.  So let's be conservative and only ask for
         * (roughly) the same number of vectors as there are CPU's.
@@ -2418,7 +2421,8 @@ static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter *adapter)
        v_budget += NON_Q_VECTORS;
 
        /* A failure in MSI-X entry allocation isn't fatal, but it does
-        * mean we disable MSI-X capabilities of the adapter. */
+        * mean we disable MSI-X capabilities of the adapter.
+        */
        adapter->msix_entries = kcalloc(v_budget,
                                        sizeof(struct msix_entry), GFP_KERNEL);
        if (!adapter->msix_entries) {
@@ -2544,8 +2548,7 @@ static int ixgbevf_init_interrupt_scheme(struct ixgbevf_adapter *adapter)
 
        err = ixgbevf_alloc_q_vectors(adapter);
        if (err) {
-               hw_dbg(&adapter->hw, "Unable to allocate memory for queue "
-                      "vectors\n");
+               hw_dbg(&adapter->hw, "Unable to allocate memory for queue vectors\n");
                goto err_alloc_q_vectors;
        }
 
@@ -2555,8 +2558,7 @@ static int ixgbevf_init_interrupt_scheme(struct ixgbevf_adapter *adapter)
                goto err_alloc_queues;
        }
 
-       hw_dbg(&adapter->hw, "Multiqueue %s: Rx Queue count = %u, "
-              "Tx Queue count = %u\n",
+       hw_dbg(&adapter->hw, "Multiqueue %s: Rx Queue count = %u, Tx Queue count = %u\n",
               (adapter->num_rx_queues > 1) ? "Enabled" :
               "Disabled", adapter->num_rx_queues, adapter->num_tx_queues);
 
@@ -2600,7 +2602,6 @@ static void ixgbevf_clear_interrupt_scheme(struct ixgbevf_adapter *adapter)
 
 /**
  * ixgbevf_sw_init - Initialize general software structures
- * (struct ixgbevf_adapter)
  * @adapter: board private structure to initialize
  *
  * ixgbevf_sw_init initializes the Adapter private data structure.
@@ -2615,7 +2616,6 @@ static int ixgbevf_sw_init(struct ixgbevf_adapter *adapter)
        int err;
 
        /* PCI config space info */
-
        hw->vendor_id = pdev->vendor;
        hw->device_id = pdev->device;
        hw->revision_id = pdev->revision;
@@ -2686,8 +2686,8 @@ out:
        {                                                                \
                u64 current_counter_lsb = IXGBE_READ_REG(hw, reg_lsb);   \
                u64 current_counter_msb = IXGBE_READ_REG(hw, reg_msb);   \
-               u64 current_counter = (current_counter_msb << 32) |      \
-                       current_counter_lsb;                             \
+               u64 current_counter = (current_counter_msb << 32) |      \
+                       current_counter_lsb;                             \
                if (current_counter < last_counter)                      \
                        counter += 0x1000000000LL;                       \
                last_counter = current_counter;                          \
@@ -2758,14 +2758,15 @@ static void ixgbevf_reset_subtask(struct ixgbevf_adapter *adapter)
        ixgbevf_reinit_locked(adapter);
 }
 
-/* ixgbevf_check_hang_subtask - check for hung queues and dropped interrupts
- * @adapter - pointer to the device adapter structure
+/**
+ * ixgbevf_check_hang_subtask - check for hung queues and dropped interrupts
+ * @adapter: pointer to the device adapter structure
  *
  * This function serves two purposes.  First it strobes the interrupt lines
  * in order to make certain interrupts are occurring.  Secondly it sets the
  * bits needed to check for TX hangs.  As a result we should immediately
  * determine if a hang has occurred.
- */
+ **/
 static void ixgbevf_check_hang_subtask(struct ixgbevf_adapter *adapter)
 {
        struct ixgbe_hw *hw = &adapter->hw;
@@ -2783,7 +2784,7 @@ static void ixgbevf_check_hang_subtask(struct ixgbevf_adapter *adapter)
                        set_check_for_tx_hang(adapter->tx_ring[i]);
        }
 
-       /* get one bit for every active tx/rx interrupt vector */
+       /* get one bit for every active Tx/Rx interrupt vector */
        for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) {
                struct ixgbevf_q_vector *qv = adapter->q_vector[i];
 
@@ -2797,7 +2798,7 @@ static void ixgbevf_check_hang_subtask(struct ixgbevf_adapter *adapter)
 
 /**
  * ixgbevf_watchdog_update_link - update the link status
- * @adapter - pointer to the device adapter structure
+ * @adapter: pointer to the device adapter structure
  **/
 static void ixgbevf_watchdog_update_link(struct ixgbevf_adapter *adapter)
 {
@@ -2825,7 +2826,7 @@ static void ixgbevf_watchdog_update_link(struct ixgbevf_adapter *adapter)
 /**
  * ixgbevf_watchdog_link_is_up - update netif_carrier status and
  *                              print link up message
- * @adapter - pointer to the device adapter structure
+ * @adapter: pointer to the device adapter structure
  **/
 static void ixgbevf_watchdog_link_is_up(struct ixgbevf_adapter *adapter)
 {
@@ -2850,7 +2851,7 @@ static void ixgbevf_watchdog_link_is_up(struct ixgbevf_adapter *adapter)
 /**
  * ixgbevf_watchdog_link_is_down - update netif_carrier status and
  *                                print link down message
- * @adapter - pointer to the adapter structure
+ * @adapter: pointer to the adapter structure
  **/
 static void ixgbevf_watchdog_link_is_down(struct ixgbevf_adapter *adapter)
 {
@@ -2956,7 +2957,7 @@ static void ixgbevf_free_all_tx_resources(struct ixgbevf_adapter *adapter)
 
 /**
  * ixgbevf_setup_tx_resources - allocate Tx resources (Descriptors)
- * @tx_ring:    tx descriptor ring (for a specific queue) to setup
+ * @tx_ring: Tx descriptor ring (for a specific queue) to setup
  *
  * Return 0 on success, negative on failure
  **/
@@ -2983,8 +2984,7 @@ int ixgbevf_setup_tx_resources(struct ixgbevf_ring *tx_ring)
 err:
        vfree(tx_ring->tx_buffer_info);
        tx_ring->tx_buffer_info = NULL;
-       hw_dbg(&adapter->hw, "Unable to allocate memory for the transmit "
-              "descriptor ring\n");
+       hw_dbg(&adapter->hw, "Unable to allocate memory for the transmit descriptor ring\n");
        return -ENOMEM;
 }
 
@@ -3006,8 +3006,7 @@ static int ixgbevf_setup_all_tx_resources(struct ixgbevf_adapter *adapter)
                err = ixgbevf_setup_tx_resources(adapter->tx_ring[i]);
                if (!err)
                        continue;
-               hw_dbg(&adapter->hw,
-                      "Allocation for Tx Queue %u failed\n", i);
+               hw_dbg(&adapter->hw, "Allocation for Tx Queue %u failed\n", i);
                break;
        }
 
@@ -3016,7 +3015,7 @@ static int ixgbevf_setup_all_tx_resources(struct ixgbevf_adapter *adapter)
 
 /**
  * ixgbevf_setup_rx_resources - allocate Rx resources (Descriptors)
- * @rx_ring:    rx descriptor ring (for a specific queue) to setup
+ * @rx_ring: Rx descriptor ring (for a specific queue) to setup
  *
  * Returns 0 on success, negative on failure
  **/
@@ -3065,8 +3064,7 @@ static int ixgbevf_setup_all_rx_resources(struct ixgbevf_adapter *adapter)
                err = ixgbevf_setup_rx_resources(adapter->rx_ring[i]);
                if (!err)
                        continue;
-               hw_dbg(&adapter->hw,
-                      "Allocation for Rx Queue %u failed\n", i);
+               hw_dbg(&adapter->hw, "Allocation for Rx Queue %u failed\n", i);
                break;
        }
        return err;
@@ -3136,11 +3134,11 @@ static int ixgbevf_open(struct net_device *netdev)
        if (hw->adapter_stopped) {
                ixgbevf_reset(adapter);
                /* if adapter is still stopped then PF isn't up and
-                * the vf can't start. */
+                * the VF can't start.
+                */
                if (hw->adapter_stopped) {
                        err = IXGBE_ERR_MBX;
-                       pr_err("Unable to start - perhaps the PF Driver isn't "
-                              "up yet\n");
+                       pr_err("Unable to start - perhaps the PF Driver isn't up yet\n");
                        goto err_setup_reset;
                }
        }
@@ -3163,8 +3161,7 @@ static int ixgbevf_open(struct net_device *netdev)
 
        ixgbevf_configure(adapter);
 
-       /*
-        * Map the Tx/Rx rings to the vectors we were allotted.
+       /* Map the Tx/Rx rings to the vectors we were allotted.
         * if request_irq will be called in this function map_rings
         * must be called *before* up_complete
         */
@@ -3288,6 +3285,7 @@ static int ixgbevf_tso(struct ixgbevf_ring *tx_ring,
 
        if (first->protocol == htons(ETH_P_IP)) {
                struct iphdr *iph = ip_hdr(skb);
+
                iph->tot_len = 0;
                iph->check = 0;
                tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
@@ -3313,7 +3311,7 @@ static int ixgbevf_tso(struct ixgbevf_ring *tx_ring,
        *hdr_len += l4len;
        *hdr_len = skb_transport_offset(skb) + l4len;
 
-       /* update gso size and bytecount with header size */
+       /* update GSO size and bytecount with header size */
        first->gso_segs = skb_shinfo(skb)->gso_segs;
        first->bytecount += (first->gso_segs - 1) * *hdr_len;
 
@@ -3343,6 +3341,7 @@ static void ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring,
 
        if (skb->ip_summed == CHECKSUM_PARTIAL) {
                u8 l4_hdr = 0;
+
                switch (first->protocol) {
                case htons(ETH_P_IP):
                        vlan_macip_lens |= skb_network_header_len(skb);
@@ -3356,8 +3355,8 @@ static void ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring,
                default:
                        if (unlikely(net_ratelimit())) {
                                dev_warn(tx_ring->dev,
-                                "partial checksum but proto=%x!\n",
-                                first->protocol);
+                                        "partial checksum but proto=%x!\n",
+                                        first->protocol);
                        }
                        break;
                }
@@ -3380,8 +3379,8 @@ static void ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring,
                default:
                        if (unlikely(net_ratelimit())) {
                                dev_warn(tx_ring->dev,
-                                "partial checksum but l4 proto=%x!\n",
-                                l4_hdr);
+                                        "partial checksum but l4 proto=%x!\n",
+                                        l4_hdr);
                        }
                        break;
                }
@@ -3405,7 +3404,7 @@ static __le32 ixgbevf_tx_cmd_type(u32 tx_flags)
                                      IXGBE_ADVTXD_DCMD_IFCS |
                                      IXGBE_ADVTXD_DCMD_DEXT);
 
-       /* set HW vlan bit if vlan is present */
+       /* set HW VLAN bit if VLAN is present */
        if (tx_flags & IXGBE_TX_FLAGS_VLAN)
                cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_VLE);
 
@@ -3572,11 +3571,13 @@ static int __ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size)
        netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
        /* Herbert's original patch had:
         *  smp_mb__after_netif_stop_queue();
-        * but since that doesn't exist yet, just open code it. */
+        * but since that doesn't exist yet, just open code it.
+        */
        smp_mb();
 
        /* We need to check again in a case another CPU has just
-        * made room available. */
+        * made room available.
+        */
        if (likely(ixgbevf_desc_unused(tx_ring) < size))
                return -EBUSY;
 
@@ -3615,8 +3616,7 @@ static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
 
        tx_ring = adapter->tx_ring[skb->queue_mapping];
 
-       /*
-        * need: 1 descriptor per page * PAGE_SIZE/IXGBE_MAX_DATA_PER_TXD,
+       /* need: 1 descriptor per page * PAGE_SIZE/IXGBE_MAX_DATA_PER_TXD,
         *       + 1 desc for skb_headlen/IXGBE_MAX_DATA_PER_TXD,
         *       + 2 desc gap to keep tail from touching head,
         *       + 1 desc for context descriptor,
@@ -3794,8 +3794,7 @@ static int ixgbevf_resume(struct pci_dev *pdev)
        u32 err;
 
        pci_restore_state(pdev);
-       /*
-        * pci_restore_state clears dev->state_saved so call
+       /* pci_restore_state clears dev->state_saved so call
         * pci_save_state to restore it.
         */
        pci_save_state(pdev);
@@ -3930,8 +3929,7 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        } else {
                err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
                if (err) {
-                       dev_err(&pdev->dev, "No usable DMA "
-                               "configuration, aborting\n");
+                       dev_err(&pdev->dev, "No usable DMA configuration, aborting\n");
                        goto err_dma;
                }
                pci_using_dac = 0;
@@ -3962,8 +3960,7 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        hw->back = adapter;
        adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
 
-       /*
-        * call save state here in standalone driver because it relies on
+       /* call save state here in standalone driver because it relies on
         * adapter struct to exist, and needs to call netdev_priv
         */
        pci_save_state(pdev);
@@ -3978,7 +3975,7 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 
        ixgbevf_assign_netdev_ops(netdev);
 
-       /* Setup hw api */
+       /* Setup HW API */
        memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops));
        hw->mac.type  = ii->mac;
 
@@ -3998,11 +3995,11 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        }
 
        netdev->hw_features = NETIF_F_SG |
-                          NETIF_F_IP_CSUM |
-                          NETIF_F_IPV6_CSUM |
-                          NETIF_F_TSO |
-                          NETIF_F_TSO6 |
-                          NETIF_F_RXCSUM;
+                             NETIF_F_IP_CSUM |
+                             NETIF_F_IPV6_CSUM |
+                             NETIF_F_TSO |
+                             NETIF_F_TSO6 |
+                             NETIF_F_RXCSUM;
 
        netdev->features = netdev->hw_features |
                           NETIF_F_HW_VLAN_CTAG_TX |
@@ -4131,7 +4128,7 @@ static void ixgbevf_remove(struct pci_dev *pdev)
  *
  * This function is called after a PCI bus error affecting
  * this device has been detected.
- */
+ **/
 static pci_ers_result_t ixgbevf_io_error_detected(struct pci_dev *pdev,
                                                  pci_channel_state_t state)
 {
@@ -4166,7 +4163,7 @@ static pci_ers_result_t ixgbevf_io_error_detected(struct pci_dev *pdev,
  *
  * Restart the card from scratch, as if from a cold-boot. Implementation
  * resembles the first-half of the ixgbevf_resume routine.
- */
+ **/
 static pci_ers_result_t ixgbevf_io_slot_reset(struct pci_dev *pdev)
 {
        struct net_device *netdev = pci_get_drvdata(pdev);
@@ -4194,7 +4191,7 @@ static pci_ers_result_t ixgbevf_io_slot_reset(struct pci_dev *pdev)
  * This callback is called when the error recovery driver tells us that
  * its OK to resume normal operation. Implementation resembles the
  * second-half of the ixgbevf_resume routine.
- */
+ **/
 static void ixgbevf_io_resume(struct pci_dev *pdev)
 {
        struct net_device *netdev = pci_get_drvdata(pdev);
@@ -4214,17 +4211,17 @@ static const struct pci_error_handlers ixgbevf_err_handler = {
 };
 
 static struct pci_driver ixgbevf_driver = {
-       .name     = ixgbevf_driver_name,
-       .id_table = ixgbevf_pci_tbl,
-       .probe    = ixgbevf_probe,
-       .remove   = ixgbevf_remove,
+       .name           = ixgbevf_driver_name,
+       .id_table       = ixgbevf_pci_tbl,
+       .probe          = ixgbevf_probe,
+       .remove         = ixgbevf_remove,
 #ifdef CONFIG_PM
        /* Power Management Hooks */
-       .suspend  = ixgbevf_suspend,
-       .resume   = ixgbevf_resume,
+       .suspend        = ixgbevf_suspend,
+       .resume         = ixgbevf_resume,
 #endif
-       .shutdown = ixgbevf_shutdown,
-       .err_handler = &ixgbevf_err_handler
+       .shutdown       = ixgbevf_shutdown,
+       .err_handler    = &ixgbevf_err_handler
 };
 
 /**
@@ -4236,6 +4233,7 @@ static struct pci_driver ixgbevf_driver = {
 static int __init ixgbevf_init_module(void)
 {
        int ret;
+
        pr_info("%s - version %s\n", ixgbevf_driver_string,
                ixgbevf_driver_version);
 
@@ -4266,6 +4264,7 @@ static void __exit ixgbevf_exit_module(void)
 char *ixgbevf_get_hw_dev_name(struct ixgbe_hw *hw)
 {
        struct ixgbevf_adapter *adapter = hw->back;
+
        return adapter->netdev->name;
 }
 
index d5028ddf4b318c5721d9f5b46ead7a76bb3b81b4..dc68fea4894b4dabab0f900f116d182c7a68c0e3 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 82599 Virtual Function driver
-  Copyright(c) 1999 - 2012 Intel Corporation.
+  Copyright(c) 1999 - 2015 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -13,8 +13,7 @@
   more details.
 
   You should have received a copy of the GNU General Public License along with
-  this program; if not, write to the Free Software Foundation, Inc.,
-  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+  this program; if not, see <http://www.gnu.org/licenses/>.
 
   The full GNU General Public License is included in this distribution in
   the file called "COPYING".
@@ -52,10 +51,10 @@ static s32 ixgbevf_poll_for_msg(struct ixgbe_hw *hw)
 }
 
 /**
- *  ixgbevf_poll_for_ack - Wait for message acknowledgement
+ *  ixgbevf_poll_for_ack - Wait for message acknowledgment
  *  @hw: pointer to the HW structure
  *
- *  returns 0 if it successfully received a message acknowledgement
+ *  returns 0 if it successfully received a message acknowledgment
  **/
 static s32 ixgbevf_poll_for_ack(struct ixgbe_hw *hw)
 {
@@ -213,7 +212,7 @@ static s32 ixgbevf_check_for_rst_vf(struct ixgbe_hw *hw)
        s32 ret_val = IXGBE_ERR_MBX;
 
        if (!ixgbevf_check_for_bit_vf(hw, (IXGBE_VFMAILBOX_RSTD |
-                                        IXGBE_VFMAILBOX_RSTI))) {
+                                          IXGBE_VFMAILBOX_RSTI))) {
                ret_val = 0;
                hw->mbx.stats.rsts++;
        }
@@ -234,7 +233,7 @@ static s32 ixgbevf_obtain_mbx_lock_vf(struct ixgbe_hw *hw)
        /* Take ownership of the buffer */
        IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, IXGBE_VFMAILBOX_VFU);
 
-       /* reserve mailbox for vf use */
+       /* reserve mailbox for VF use */
        if (ixgbevf_read_v2p_mailbox(hw) & IXGBE_VFMAILBOX_VFU)
                ret_val = 0;
 
@@ -254,8 +253,7 @@ static s32 ixgbevf_write_mbx_vf(struct ixgbe_hw *hw, u32 *msg, u16 size)
        s32 ret_val;
        u16 i;
 
-
-       /* lock the mailbox to prevent pf/vf race condition */
+       /* lock the mailbox to prevent PF/VF race condition */
        ret_val = ixgbevf_obtain_mbx_lock_vf(hw);
        if (ret_val)
                goto out_no_write;
@@ -279,7 +277,7 @@ out_no_write:
 }
 
 /**
- *  ixgbevf_read_mbx_vf - Reads a message from the inbox intended for vf
+ *  ixgbevf_read_mbx_vf - Reads a message from the inbox intended for VF
  *  @hw: pointer to the HW structure
  *  @msg: The message buffer
  *  @size: Length of buffer
@@ -291,7 +289,7 @@ static s32 ixgbevf_read_mbx_vf(struct ixgbe_hw *hw, u32 *msg, u16 size)
        s32 ret_val = 0;
        u16 i;
 
-       /* lock the mailbox to prevent pf/vf race condition */
+       /* lock the mailbox to prevent PF/VF race condition */
        ret_val = ixgbevf_obtain_mbx_lock_vf(hw);
        if (ret_val)
                goto out_no_read;
@@ -311,17 +309,18 @@ out_no_read:
 }
 
 /**
- *  ixgbevf_init_mbx_params_vf - set initial values for vf mailbox
+ *  ixgbevf_init_mbx_params_vf - set initial values for VF mailbox
  *  @hw: pointer to the HW structure
  *
- *  Initializes the hw->mbx struct to correct values for vf mailbox
+ *  Initializes the hw->mbx struct to correct values for VF mailbox
  */
 static s32 ixgbevf_init_mbx_params_vf(struct ixgbe_hw *hw)
 {
        struct ixgbe_mbx_info *mbx = &hw->mbx;
 
        /* start mailbox as timed out and let the reset_hw call set the timeout
-        * value to begin communications */
+        * value to begin communications
+        */
        mbx->timeout = 0;
        mbx->udelay = IXGBE_VF_MBX_INIT_DELAY;
 
@@ -337,13 +336,13 @@ static s32 ixgbevf_init_mbx_params_vf(struct ixgbe_hw *hw)
 }
 
 const struct ixgbe_mbx_operations ixgbevf_mbx_ops = {
-       .init_params   = ixgbevf_init_mbx_params_vf,
-       .read          = ixgbevf_read_mbx_vf,
-       .write         = ixgbevf_write_mbx_vf,
-       .read_posted   = ixgbevf_read_posted_mbx,
-       .write_posted  = ixgbevf_write_posted_mbx,
-       .check_for_msg = ixgbevf_check_for_msg_vf,
-       .check_for_ack = ixgbevf_check_for_ack_vf,
-       .check_for_rst = ixgbevf_check_for_rst_vf,
+       .init_params    = ixgbevf_init_mbx_params_vf,
+       .read           = ixgbevf_read_mbx_vf,
+       .write          = ixgbevf_write_mbx_vf,
+       .read_posted    = ixgbevf_read_posted_mbx,
+       .write_posted   = ixgbevf_write_posted_mbx,
+       .check_for_msg  = ixgbevf_check_for_msg_vf,
+       .check_for_ack  = ixgbevf_check_for_ack_vf,
+       .check_for_rst  = ixgbevf_check_for_rst_vf,
 };
 
index 0bc30058ff82010c81bcc7c15906f2d962e0f4a8..6253e9335cae47bd18ab330f966820708c010d37 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 82599 Virtual Function driver
-  Copyright(c) 1999 - 2012 Intel Corporation.
+  Copyright(c) 1999 - 2015 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -13,8 +13,7 @@
   more details.
 
   You should have received a copy of the GNU General Public License along with
-  this program; if not, write to the Free Software Foundation, Inc.,
-  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+  this program; if not, see <http://www.gnu.org/licenses/>.
 
   The full GNU General Public License is included in this distribution in
   the file called "COPYING".
 
 #include "vf.h"
 
-#define IXGBE_VFMAILBOX_SIZE        16 /* 16 32 bit words - 64 bytes */
-#define IXGBE_ERR_MBX               -100
+#define IXGBE_VFMAILBOX_SIZE   16 /* 16 32 bit words - 64 bytes */
+#define IXGBE_ERR_MBX          -100
 
-#define IXGBE_VFMAILBOX             0x002FC
-#define IXGBE_VFMBMEM               0x00200
+#define IXGBE_VFMAILBOX                0x002FC
+#define IXGBE_VFMBMEM          0x00200
 
 /* Define mailbox register bits */
-#define IXGBE_VFMAILBOX_REQ      0x00000001 /* Request for PF Ready bit */
-#define IXGBE_VFMAILBOX_ACK      0x00000002 /* Ack PF message received */
-#define IXGBE_VFMAILBOX_VFU      0x00000004 /* VF owns the mailbox buffer */
-#define IXGBE_VFMAILBOX_PFU      0x00000008 /* PF owns the mailbox buffer */
-#define IXGBE_VFMAILBOX_PFSTS    0x00000010 /* PF wrote a message in the MB */
-#define IXGBE_VFMAILBOX_PFACK    0x00000020 /* PF ack the previous VF msg */
-#define IXGBE_VFMAILBOX_RSTI     0x00000040 /* PF has reset indication */
-#define IXGBE_VFMAILBOX_RSTD     0x00000080 /* PF has indicated reset done */
+#define IXGBE_VFMAILBOX_REQ    0x00000001 /* Request for PF Ready bit */
+#define IXGBE_VFMAILBOX_ACK    0x00000002 /* Ack PF message received */
+#define IXGBE_VFMAILBOX_VFU    0x00000004 /* VF owns the mailbox buffer */
+#define IXGBE_VFMAILBOX_PFU    0x00000008 /* PF owns the mailbox buffer */
+#define IXGBE_VFMAILBOX_PFSTS  0x00000010 /* PF wrote a message in the MB */
+#define IXGBE_VFMAILBOX_PFACK  0x00000020 /* PF ack the previous VF msg */
+#define IXGBE_VFMAILBOX_RSTI   0x00000040 /* PF has reset indication */
+#define IXGBE_VFMAILBOX_RSTD   0x00000080 /* PF has indicated reset done */
 #define IXGBE_VFMAILBOX_R2C_BITS 0x000000B0 /* All read to clear bits */
 
-#define IXGBE_PFMAILBOX(x)          (0x04B00 + (4 * (x)))
-#define IXGBE_PFMBMEM(vfn)          (0x13000 + (64 * (vfn)))
+#define IXGBE_PFMAILBOX(x)     (0x04B00 + (4 * (x)))
+#define IXGBE_PFMBMEM(vfn)     (0x13000 + (64 * (vfn)))
 
-#define IXGBE_PFMAILBOX_STS   0x00000001 /* Initiate message send to VF */
-#define IXGBE_PFMAILBOX_ACK   0x00000002 /* Ack message recv'd from VF */
-#define IXGBE_PFMAILBOX_VFU   0x00000004 /* VF owns the mailbox buffer */
-#define IXGBE_PFMAILBOX_PFU   0x00000008 /* PF owns the mailbox buffer */
-#define IXGBE_PFMAILBOX_RVFU  0x00000010 /* Reset VFU - used when VF stuck */
+#define IXGBE_PFMAILBOX_STS    0x00000001 /* Initiate message send to VF */
+#define IXGBE_PFMAILBOX_ACK    0x00000002 /* Ack message recv'd from VF */
+#define IXGBE_PFMAILBOX_VFU    0x00000004 /* VF owns the mailbox buffer */
+#define IXGBE_PFMAILBOX_PFU    0x00000008 /* PF owns the mailbox buffer */
+#define IXGBE_PFMAILBOX_RVFU   0x00000010 /* Reset VFU - used when VF stuck */
 
 #define IXGBE_MBVFICR_VFREQ_MASK 0x0000FFFF /* bits for VF messages */
-#define IXGBE_MBVFICR_VFREQ_VF1  0x00000001 /* bit for VF 1 message */
+#define IXGBE_MBVFICR_VFREQ_VF1        0x00000001 /* bit for VF 1 message */
 #define IXGBE_MBVFICR_VFACK_MASK 0xFFFF0000 /* bits for VF acks */
-#define IXGBE_MBVFICR_VFACK_VF1  0x00010000 /* bit for VF 1 ack */
-
+#define IXGBE_MBVFICR_VFACK_VF1        0x00010000 /* bit for VF 1 ack */
 
 /* If it's a IXGBE_VF_* msg then it originates in the VF and is sent to the
  * PF.  The reverse is true if it is IXGBE_PF_*.
  * Message ACK's are the value or'd with 0xF0000000
  */
-#define IXGBE_VT_MSGTYPE_ACK      0x80000000  /* Messages below or'd with
-                                              * this are the ACK */
-#define IXGBE_VT_MSGTYPE_NACK     0x40000000  /* Messages below or'd with
-                                              * this are the NACK */
-#define IXGBE_VT_MSGTYPE_CTS      0x20000000  /* Indicates that VF is still
-                                              *  clear to send requests */
-#define IXGBE_VT_MSGINFO_SHIFT    16
+/* Messages below or'd with this are the ACK */
+#define IXGBE_VT_MSGTYPE_ACK   0x80000000
+/* Messages below or'd with this are the NACK */
+#define IXGBE_VT_MSGTYPE_NACK  0x40000000
+/* Indicates that VF is still clear to send requests */
+#define IXGBE_VT_MSGTYPE_CTS   0x20000000
+#define IXGBE_VT_MSGINFO_SHIFT 16
 /* bits 23:16 are used for exra info for certain messages */
-#define IXGBE_VT_MSGINFO_MASK     (0xFF << IXGBE_VT_MSGINFO_SHIFT)
+#define IXGBE_VT_MSGINFO_MASK  (0xFF << IXGBE_VT_MSGINFO_SHIFT)
 
 /* definitions to support mailbox API version negotiation */
 
-/*
- * each element denotes a version of the API; existing numbers may not
+/* each element denotes a version of the API; existing numbers may not
  * change; any additions must go at the end
  */
 enum ixgbe_pfvf_api_rev {
@@ -91,10 +88,10 @@ enum ixgbe_pfvf_api_rev {
 };
 
 /* mailbox API, legacy requests */
-#define IXGBE_VF_RESET            0x01 /* VF requests reset */
-#define IXGBE_VF_SET_MAC_ADDR     0x02 /* VF requests PF to set MAC addr */
-#define IXGBE_VF_SET_MULTICAST    0x03 /* VF requests PF to set MC addr */
-#define IXGBE_VF_SET_VLAN         0x04 /* VF requests PF to set VLAN */
+#define IXGBE_VF_RESET         0x01 /* VF requests reset */
+#define IXGBE_VF_SET_MAC_ADDR  0x02 /* VF requests PF to set MAC addr */
+#define IXGBE_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */
+#define IXGBE_VF_SET_VLAN      0x04 /* VF requests PF to set VLAN */
 
 /* mailbox API, version 1.0 VF requests */
 #define IXGBE_VF_SET_LPE       0x05 /* VF requests PF to set VMOLR.LPE */
@@ -105,20 +102,20 @@ enum ixgbe_pfvf_api_rev {
 #define IXGBE_VF_GET_QUEUE     0x09 /* get queue configuration */
 
 /* GET_QUEUES return data indices within the mailbox */
-#define IXGBE_VF_TX_QUEUES     1       /* number of Tx queues supported */
-#define IXGBE_VF_RX_QUEUES     2       /* number of Rx queues supported */
-#define IXGBE_VF_TRANS_VLAN    3       /* Indication of port vlan */
-#define IXGBE_VF_DEF_QUEUE     4       /* Default queue offset */
+#define IXGBE_VF_TX_QUEUES     1 /* number of Tx queues supported */
+#define IXGBE_VF_RX_QUEUES     2 /* number of Rx queues supported */
+#define IXGBE_VF_TRANS_VLAN    3 /* Indication of port VLAN */
+#define IXGBE_VF_DEF_QUEUE     4 /* Default queue offset */
 
 /* length of permanent address message returned from PF */
-#define IXGBE_VF_PERMADDR_MSG_LEN 4
+#define IXGBE_VF_PERMADDR_MSG_LEN      4
 /* word in permanent address message with the current multicast type */
-#define IXGBE_VF_MC_TYPE_WORD     3
+#define IXGBE_VF_MC_TYPE_WORD          3
 
-#define IXGBE_PF_CONTROL_MSG      0x0100 /* PF control message */
+#define IXGBE_PF_CONTROL_MSG           0x0100 /* PF control message */
 
-#define IXGBE_VF_MBX_INIT_TIMEOUT 2000 /* number of retries on mailbox */
-#define IXGBE_VF_MBX_INIT_DELAY   500  /* microseconds between retries */
+#define IXGBE_VF_MBX_INIT_TIMEOUT      2000 /* number of retries on mailbox */
+#define IXGBE_VF_MBX_INIT_DELAY                500  /* microseconds between retries */
 
 /* forward declaration of the HW struct */
 struct ixgbe_hw;
index 3e712fd6e695e5b83ec1a45951968548314f63bf..2764fd16261ffef7e97608347b77941b2a15c5d5 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 82599 Virtual Function driver
-  Copyright(c) 1999 - 2014 Intel Corporation.
+  Copyright(c) 1999 - 2015 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -13,8 +13,7 @@
   more details.
 
   You should have received a copy of the GNU General Public License along with
-  this program; if not, write to the Free Software Foundation, Inc.,
-  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+  this program; if not, see <http://www.gnu.org/licenses/>.
 
   The full GNU General Public License is included in this distribution in
   the file called "COPYING".
 #ifndef _IXGBEVF_REGS_H_
 #define _IXGBEVF_REGS_H_
 
-#define IXGBE_VFCTRL           0x00000
-#define IXGBE_VFSTATUS         0x00008
-#define IXGBE_VFLINKS          0x00010
-#define IXGBE_VFFRTIMER        0x00048
-#define IXGBE_VFRXMEMWRAP      0x03190
-#define IXGBE_VTEICR           0x00100
-#define IXGBE_VTEICS           0x00104
-#define IXGBE_VTEIMS           0x00108
-#define IXGBE_VTEIMC           0x0010C
-#define IXGBE_VTEIAC           0x00110
-#define IXGBE_VTEIAM           0x00114
-#define IXGBE_VTEITR(x)        (0x00820 + (4 * (x)))
-#define IXGBE_VTIVAR(x)        (0x00120 + (4 * (x)))
-#define IXGBE_VTIVAR_MISC      0x00140
-#define IXGBE_VTRSCINT(x)      (0x00180 + (4 * (x)))
-#define IXGBE_VFRDBAL(x)       (0x01000 + (0x40 * (x)))
-#define IXGBE_VFRDBAH(x)       (0x01004 + (0x40 * (x)))
-#define IXGBE_VFRDLEN(x)       (0x01008 + (0x40 * (x)))
-#define IXGBE_VFRDH(x)         (0x01010 + (0x40 * (x)))
-#define IXGBE_VFRDT(x)         (0x01018 + (0x40 * (x)))
-#define IXGBE_VFRXDCTL(x)      (0x01028 + (0x40 * (x)))
-#define IXGBE_VFSRRCTL(x)      (0x01014 + (0x40 * (x)))
-#define IXGBE_VFRSCCTL(x)      (0x0102C + (0x40 * (x)))
-#define IXGBE_VFPSRTYPE        0x00300
-#define IXGBE_VFTDBAL(x)       (0x02000 + (0x40 * (x)))
-#define IXGBE_VFTDBAH(x)       (0x02004 + (0x40 * (x)))
-#define IXGBE_VFTDLEN(x)       (0x02008 + (0x40 * (x)))
-#define IXGBE_VFTDH(x)         (0x02010 + (0x40 * (x)))
-#define IXGBE_VFTDT(x)         (0x02018 + (0x40 * (x)))
-#define IXGBE_VFTXDCTL(x)      (0x02028 + (0x40 * (x)))
-#define IXGBE_VFTDWBAL(x)      (0x02038 + (0x40 * (x)))
-#define IXGBE_VFTDWBAH(x)      (0x0203C + (0x40 * (x)))
-#define IXGBE_VFDCA_RXCTRL(x)  (0x0100C + (0x40 * (x)))
-#define IXGBE_VFDCA_TXCTRL(x)  (0x0200c + (0x40 * (x)))
-#define IXGBE_VFGPRC           0x0101C
-#define IXGBE_VFGPTC           0x0201C
-#define IXGBE_VFGORC_LSB       0x01020
-#define IXGBE_VFGORC_MSB       0x01024
-#define IXGBE_VFGOTC_LSB       0x02020
-#define IXGBE_VFGOTC_MSB       0x02024
-#define IXGBE_VFMPRC           0x01034
-#define IXGBE_VFMRQC           0x3000
-#define IXGBE_VFRSSRK(x)       (0x3100 + ((x) * 4))
-#define IXGBE_VFRETA(x)        (0x3200 + ((x) * 4))
+#define IXGBE_VFCTRL           0x00000
+#define IXGBE_VFSTATUS         0x00008
+#define IXGBE_VFLINKS          0x00010
+#define IXGBE_VFFRTIMER                0x00048
+#define IXGBE_VFRXMEMWRAP      0x03190
+#define IXGBE_VTEICR           0x00100
+#define IXGBE_VTEICS           0x00104
+#define IXGBE_VTEIMS           0x00108
+#define IXGBE_VTEIMC           0x0010C
+#define IXGBE_VTEIAC           0x00110
+#define IXGBE_VTEIAM           0x00114
+#define IXGBE_VTEITR(x)                (0x00820 + (4 * (x)))
+#define IXGBE_VTIVAR(x)                (0x00120 + (4 * (x)))
+#define IXGBE_VTIVAR_MISC      0x00140
+#define IXGBE_VTRSCINT(x)      (0x00180 + (4 * (x)))
+#define IXGBE_VFRDBAL(x)       (0x01000 + (0x40 * (x)))
+#define IXGBE_VFRDBAH(x)       (0x01004 + (0x40 * (x)))
+#define IXGBE_VFRDLEN(x)       (0x01008 + (0x40 * (x)))
+#define IXGBE_VFRDH(x)         (0x01010 + (0x40 * (x)))
+#define IXGBE_VFRDT(x)         (0x01018 + (0x40 * (x)))
+#define IXGBE_VFRXDCTL(x)      (0x01028 + (0x40 * (x)))
+#define IXGBE_VFSRRCTL(x)      (0x01014 + (0x40 * (x)))
+#define IXGBE_VFRSCCTL(x)      (0x0102C + (0x40 * (x)))
+#define IXGBE_VFPSRTYPE                0x00300
+#define IXGBE_VFTDBAL(x)       (0x02000 + (0x40 * (x)))
+#define IXGBE_VFTDBAH(x)       (0x02004 + (0x40 * (x)))
+#define IXGBE_VFTDLEN(x)       (0x02008 + (0x40 * (x)))
+#define IXGBE_VFTDH(x)         (0x02010 + (0x40 * (x)))
+#define IXGBE_VFTDT(x)         (0x02018 + (0x40 * (x)))
+#define IXGBE_VFTXDCTL(x)      (0x02028 + (0x40 * (x)))
+#define IXGBE_VFTDWBAL(x)      (0x02038 + (0x40 * (x)))
+#define IXGBE_VFTDWBAH(x)      (0x0203C + (0x40 * (x)))
+#define IXGBE_VFDCA_RXCTRL(x)  (0x0100C + (0x40 * (x)))
+#define IXGBE_VFDCA_TXCTRL(x)  (0x0200c + (0x40 * (x)))
+#define IXGBE_VFGPRC           0x0101C
+#define IXGBE_VFGPTC           0x0201C
+#define IXGBE_VFGORC_LSB       0x01020
+#define IXGBE_VFGORC_MSB       0x01024
+#define IXGBE_VFGOTC_LSB       0x02020
+#define IXGBE_VFGOTC_MSB       0x02024
+#define IXGBE_VFMPRC           0x01034
+#define IXGBE_VFMRQC           0x3000
+#define IXGBE_VFRSSRK(x)       (0x3100 + ((x) * 4))
+#define IXGBE_VFRETA(x)                (0x3200 + ((x) * 4))
 
 /* VFMRQC bits */
-#define IXGBE_VFMRQC_RSSEN              0x00000001  /* RSS Enable */
-#define IXGBE_VFMRQC_RSS_FIELD_IPV4_TCP 0x00010000
-#define IXGBE_VFMRQC_RSS_FIELD_IPV4     0x00020000
-#define IXGBE_VFMRQC_RSS_FIELD_IPV6     0x00100000
-#define IXGBE_VFMRQC_RSS_FIELD_IPV6_TCP 0x00200000
+#define IXGBE_VFMRQC_RSSEN             0x00000001  /* RSS Enable */
+#define IXGBE_VFMRQC_RSS_FIELD_IPV4_TCP        0x00010000
+#define IXGBE_VFMRQC_RSS_FIELD_IPV4    0x00020000
+#define IXGBE_VFMRQC_RSS_FIELD_IPV6    0x00100000
+#define IXGBE_VFMRQC_RSS_FIELD_IPV6_TCP        0x00200000
 
-#define IXGBE_WRITE_FLUSH(a) (IXGBE_READ_REG(a, IXGBE_VFSTATUS))
+#define IXGBE_WRITE_FLUSH(a)   (IXGBE_READ_REG(a, IXGBE_VFSTATUS))
 
 #endif /* _IXGBEVF_REGS_H_ */
index cdb53be7d9958e4cb0f92456f459eacea122e038..2614fd328e47d8a3e8380429d010a3a452b13375 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 82599 Virtual Function driver
-  Copyright(c) 1999 - 2012 Intel Corporation.
+  Copyright(c) 1999 - 2015 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -13,8 +13,7 @@
   more details.
 
   You should have received a copy of the GNU General Public License along with
-  this program; if not, write to the Free Software Foundation, Inc.,
-  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+  this program; if not, see <http://www.gnu.org/licenses/>.
 
   The full GNU General Public License is included in this distribution in
   the file called "COPYING".
@@ -65,7 +64,7 @@ static s32 ixgbevf_init_hw_vf(struct ixgbe_hw *hw)
  *  ixgbevf_reset_hw_vf - Performs hardware reset
  *  @hw: pointer to hardware structure
  *
- *  Resets the hardware by reseting the transmit and receive units, masks and
+ *  Resets the hardware by resetting the transmit and receive units, masks and
  *  clears all interrupts.
  **/
 static s32 ixgbevf_reset_hw_vf(struct ixgbe_hw *hw)
@@ -102,9 +101,10 @@ static s32 ixgbevf_reset_hw_vf(struct ixgbe_hw *hw)
 
        mdelay(10);
 
-       /* set our "perm_addr" based on info provided by PF */
-       /* also set up the mc_filter_type which is piggy backed
-        * on the mac address in word 3 */
+       /* set our "perm_addr" based on info provided by PF
+        * also set up the mc_filter_type which is piggy backed
+        * on the mac address in word 3
+        */
        ret_val = mbx->ops.read_posted(hw, msgbuf, IXGBE_VF_PERMADDR_MSG_LEN);
        if (ret_val)
                return ret_val;
@@ -117,7 +117,7 @@ static s32 ixgbevf_reset_hw_vf(struct ixgbe_hw *hw)
            msgbuf[0] != (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_NACK))
                return IXGBE_ERR_INVALID_MAC_ADDR;
 
-       memcpy(hw->mac.perm_addr, addr, ETH_ALEN);
+       ether_addr_copy(hw->mac.perm_addr, addr);
        hw->mac.mc_filter_type = msgbuf[IXGBE_VF_MC_TYPE_WORD];
 
        return 0;
@@ -138,8 +138,7 @@ static s32 ixgbevf_stop_hw_vf(struct ixgbe_hw *hw)
        u32 reg_val;
        u16 i;
 
-       /*
-        * Set the adapter_stopped flag so other driver functions stop touching
+       /* Set the adapter_stopped flag so other driver functions stop touching
         * the hardware
         */
        hw->adapter_stopped = true;
@@ -182,7 +181,7 @@ static s32 ixgbevf_stop_hw_vf(struct ixgbe_hw *hw)
  *
  *  Extracts the 12 bits, from a multicast address, to determine which
  *  bit-vector to set in the multicast table. The hardware uses 12 bits, from
- *  incoming rx multicast addresses, to determine the bit-vector to check in
+ *  incoming Rx multicast addresses, to determine the bit-vector to check in
  *  the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set
  *  by the MO field of the MCSTCTRL. The MO field is set during initialization
  *  to mc_filter_type.
@@ -220,7 +219,7 @@ static s32 ixgbevf_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr)
  **/
 static s32 ixgbevf_get_mac_addr_vf(struct ixgbe_hw *hw, u8 *mac_addr)
 {
-       memcpy(mac_addr, hw->mac.perm_addr, ETH_ALEN);
+       ether_addr_copy(mac_addr, hw->mac.perm_addr);
 
        return 0;
 }
@@ -233,8 +232,7 @@ static s32 ixgbevf_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr)
        s32 ret_val;
 
        memset(msgbuf, 0, sizeof(msgbuf));
-       /*
-        * If index is one then this is the start of a new list and needs
+       /* If index is one then this is the start of a new list and needs
         * indication to the PF so it can do it's own list management.
         * If it is zero then that tells the PF to just clear all of
         * this VF's macvlans and there is no new list.
@@ -242,7 +240,7 @@ static s32 ixgbevf_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr)
        msgbuf[0] |= index << IXGBE_VT_MSGINFO_SHIFT;
        msgbuf[0] |= IXGBE_VF_SET_MACVLAN;
        if (addr)
-               memcpy(msg_addr, addr, ETH_ALEN);
+               ether_addr_copy(msg_addr, addr);
        ret_val = mbx->ops.write_posted(hw, msgbuf, 3);
 
        if (!ret_val)
@@ -275,7 +273,7 @@ static s32 ixgbevf_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr,
 
        memset(msgbuf, 0, sizeof(msgbuf));
        msgbuf[0] = IXGBE_VF_SET_MAC_ADDR;
-       memcpy(msg_addr, addr, ETH_ALEN);
+       ether_addr_copy(msg_addr, addr);
        ret_val = mbx->ops.write_posted(hw, msgbuf, 3);
 
        if (!ret_val)
@@ -292,7 +290,7 @@ static s32 ixgbevf_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr,
 }
 
 static void ixgbevf_write_msg_read_ack(struct ixgbe_hw *hw,
-                                       u32 *msg, u16 size)
+                                      u32 *msg, u16 size)
 {
        struct ixgbe_mbx_info *mbx = &hw->mbx;
        u32 retmsg[IXGBE_VFMAILBOX_SIZE];
@@ -348,7 +346,7 @@ static s32 ixgbevf_update_mc_addr_list_vf(struct ixgbe_hw *hw,
 }
 
 /**
- *  ixgbevf_set_vfta_vf - Set/Unset vlan filter table address
+ *  ixgbevf_set_vfta_vf - Set/Unset VLAN filter table address
  *  @hw: pointer to the HW structure
  *  @vlan: 12 bit VLAN ID
  *  @vind: unused by VF drivers
@@ -462,7 +460,8 @@ static s32 ixgbevf_check_mac_link_vf(struct ixgbe_hw *hw,
        }
 
        /* if the read failed it could just be a mailbox collision, best wait
-        * until we are called again and don't report an error */
+        * until we are called again and don't report an error
+        */
        if (mbx->ops.read(hw, &in_msg, 1))
                goto out;
 
@@ -480,7 +479,8 @@ static s32 ixgbevf_check_mac_link_vf(struct ixgbe_hw *hw,
        }
 
        /* if we passed all the tests above then the link is up and we no
-        * longer need to check for link */
+        * longer need to check for link
+        */
        mac->get_link_status = false;
 
 out:
@@ -561,8 +561,7 @@ int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs,
        if (!err) {
                msg[0] &= ~IXGBE_VT_MSGTYPE_CTS;
 
-               /*
-                * if we we didn't get an ACK there must have been
+               /* if we we didn't get an ACK there must have been
                 * some sort of mailbox error so we should treat it
                 * as such
                 */
@@ -595,17 +594,17 @@ int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs,
 }
 
 static const struct ixgbe_mac_operations ixgbevf_mac_ops = {
-       .init_hw             = ixgbevf_init_hw_vf,
-       .reset_hw            = ixgbevf_reset_hw_vf,
-       .start_hw            = ixgbevf_start_hw_vf,
-       .get_mac_addr        = ixgbevf_get_mac_addr_vf,
-       .stop_adapter        = ixgbevf_stop_hw_vf,
-       .setup_link          = ixgbevf_setup_mac_link_vf,
-       .check_link          = ixgbevf_check_mac_link_vf,
-       .set_rar             = ixgbevf_set_rar_vf,
-       .update_mc_addr_list = ixgbevf_update_mc_addr_list_vf,
-       .set_uc_addr         = ixgbevf_set_uc_addr_vf,
-       .set_vfta            = ixgbevf_set_vfta_vf,
+       .init_hw                = ixgbevf_init_hw_vf,
+       .reset_hw               = ixgbevf_reset_hw_vf,
+       .start_hw               = ixgbevf_start_hw_vf,
+       .get_mac_addr           = ixgbevf_get_mac_addr_vf,
+       .stop_adapter           = ixgbevf_stop_hw_vf,
+       .setup_link             = ixgbevf_setup_mac_link_vf,
+       .check_link             = ixgbevf_check_mac_link_vf,
+       .set_rar                = ixgbevf_set_rar_vf,
+       .update_mc_addr_list    = ixgbevf_update_mc_addr_list_vf,
+       .set_uc_addr            = ixgbevf_set_uc_addr_vf,
+       .set_vfta               = ixgbevf_set_vfta_vf,
 };
 
 const struct ixgbevf_info ixgbevf_82599_vf_info = {
index 5b172427f459a1c30070fdf52856ca2f0116bedb..6688250da7a12afc41a7570e0121e477fce2d621 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 82599 Virtual Function driver
-  Copyright(c) 1999 - 2014 Intel Corporation.
+  Copyright(c) 1999 - 2015 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -13,8 +13,7 @@
   more details.
 
   You should have received a copy of the GNU General Public License along with
-  this program; if not, write to the Free Software Foundation, Inc.,
-  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+  this program; if not, see <http://www.gnu.org/licenses/>.
 
   The full GNU General Public License is included in this distribution in
   the file called "COPYING".
@@ -169,7 +168,7 @@ struct ixgbevf_hw_stats {
 };
 
 struct ixgbevf_info {
-       enum ixgbe_mac_type             mac;
+       enum ixgbe_mac_type mac;
        const struct ixgbe_mac_operations *mac_ops;
 };
 
@@ -185,23 +184,26 @@ static inline void ixgbe_write_reg(struct ixgbe_hw *hw, u32 reg, u32 value)
                return;
        writel(value, reg_addr + reg);
 }
+
 #define IXGBE_WRITE_REG(h, r, v) ixgbe_write_reg(h, r, v)
 
 u32 ixgbevf_read_reg(struct ixgbe_hw *hw, u32 reg);
 #define IXGBE_READ_REG(h, r) ixgbevf_read_reg(h, r)
 
 static inline void ixgbe_write_reg_array(struct ixgbe_hw *hw, u32 reg,
-                                         u32 offset, u32 value)
+                                        u32 offset, u32 value)
 {
        ixgbe_write_reg(hw, reg + (offset << 2), value);
 }
+
 #define IXGBE_WRITE_REG_ARRAY(h, r, o, v) ixgbe_write_reg_array(h, r, o, v)
 
 static inline u32 ixgbe_read_reg_array(struct ixgbe_hw *hw, u32 reg,
-                                       u32 offset)
+                                      u32 offset)
 {
        return ixgbevf_read_reg(hw, reg + (offset << 2));
 }
+
 #define IXGBE_READ_REG_ARRAY(h, r, o) ixgbe_read_reg_array(h, r, o)
 
 void ixgbevf_rlpml_set_vf(struct ixgbe_hw *hw, u16 max_size);
@@ -209,4 +211,3 @@ int ixgbevf_negotiate_api_version(struct ixgbe_hw *hw, int api);
 int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs,
                       unsigned int *default_tc);
 #endif /* __IXGBE_VF_H__ */
-
index 96208f17bb53be6240ae9e8d0cd30cc09041163e..ce5f7f9cff060868db1ad0ba3bde378d65222793 100644 (file)
 #define MVNETA_TXQ_CMD                           0x2448
 #define      MVNETA_TXQ_DISABLE_SHIFT            8
 #define      MVNETA_TXQ_ENABLE_MASK              0x000000ff
+#define MVNETA_GMAC_CLOCK_DIVIDER                0x24f4
+#define      MVNETA_GMAC_1MS_CLOCK_ENABLE        BIT(31)
 #define MVNETA_ACC_MODE                          0x2500
 #define MVNETA_CPU_MAP(cpu)                      (0x2540 + ((cpu) << 2))
 #define      MVNETA_CPU_RXQ_ACCESS_ALL_MASK      0x000000ff
 #define      MVNETA_TX_INTR_MASK_ALL             (0xff << 0)
 #define      MVNETA_RX_INTR_MASK(nr_rxqs)        (((1 << nr_rxqs) - 1) << 8)
 #define      MVNETA_RX_INTR_MASK_ALL             (0xff << 8)
+#define      MVNETA_MISCINTR_INTR_MASK           BIT(31)
 
 #define MVNETA_INTR_OLD_CAUSE                    0x25a8
 #define MVNETA_INTR_OLD_MASK                     0x25ac
 #define      MVNETA_GMAC_MAX_RX_SIZE_MASK        0x7ffc
 #define      MVNETA_GMAC0_PORT_ENABLE            BIT(0)
 #define MVNETA_GMAC_CTRL_2                       0x2c08
+#define      MVNETA_GMAC2_INBAND_AN_ENABLE       BIT(0)
 #define      MVNETA_GMAC2_PCS_ENABLE             BIT(3)
 #define      MVNETA_GMAC2_PORT_RGMII             BIT(4)
 #define      MVNETA_GMAC2_PORT_RESET             BIT(6)
 #define MVNETA_GMAC_AUTONEG_CONFIG               0x2c0c
 #define      MVNETA_GMAC_FORCE_LINK_DOWN         BIT(0)
 #define      MVNETA_GMAC_FORCE_LINK_PASS         BIT(1)
+#define      MVNETA_GMAC_INBAND_AN_ENABLE        BIT(2)
 #define      MVNETA_GMAC_CONFIG_MII_SPEED        BIT(5)
 #define      MVNETA_GMAC_CONFIG_GMII_SPEED       BIT(6)
 #define      MVNETA_GMAC_AN_SPEED_EN             BIT(7)
+#define      MVNETA_GMAC_AN_FLOW_CTRL_EN         BIT(11)
 #define      MVNETA_GMAC_CONFIG_FULL_DUPLEX      BIT(12)
 #define      MVNETA_GMAC_AN_DUPLEX_EN            BIT(13)
 #define MVNETA_MIB_COUNTERS_BASE                 0x3080
@@ -304,6 +310,7 @@ struct mvneta_port {
        unsigned int link;
        unsigned int duplex;
        unsigned int speed;
+       int use_inband_status:1;
 };
 
 /* The mvneta_tx_desc and mvneta_rx_desc structures describe the
@@ -994,6 +1001,20 @@ static void mvneta_defaults_set(struct mvneta_port *pp)
        val &= ~MVNETA_PHY_POLLING_ENABLE;
        mvreg_write(pp, MVNETA_UNIT_CONTROL, val);
 
+       if (pp->use_inband_status) {
+               val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
+               val &= ~(MVNETA_GMAC_FORCE_LINK_PASS |
+                        MVNETA_GMAC_FORCE_LINK_DOWN |
+                        MVNETA_GMAC_AN_FLOW_CTRL_EN);
+               val |= MVNETA_GMAC_INBAND_AN_ENABLE |
+                      MVNETA_GMAC_AN_SPEED_EN |
+                      MVNETA_GMAC_AN_DUPLEX_EN;
+               mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
+               val = mvreg_read(pp, MVNETA_GMAC_CLOCK_DIVIDER);
+               val |= MVNETA_GMAC_1MS_CLOCK_ENABLE;
+               mvreg_write(pp, MVNETA_GMAC_CLOCK_DIVIDER, val);
+       }
+
        mvneta_set_ucast_table(pp, -1);
        mvneta_set_special_mcast_table(pp, -1);
        mvneta_set_other_mcast_table(pp, -1);
@@ -2043,6 +2064,28 @@ static irqreturn_t mvneta_isr(int irq, void *dev_id)
        return IRQ_HANDLED;
 }
 
+static int mvneta_fixed_link_update(struct mvneta_port *pp,
+                                   struct phy_device *phy)
+{
+       struct fixed_phy_status status;
+       struct fixed_phy_status changed = {};
+       u32 gmac_stat = mvreg_read(pp, MVNETA_GMAC_STATUS);
+
+       status.link = !!(gmac_stat & MVNETA_GMAC_LINK_UP);
+       if (gmac_stat & MVNETA_GMAC_SPEED_1000)
+               status.speed = SPEED_1000;
+       else if (gmac_stat & MVNETA_GMAC_SPEED_100)
+               status.speed = SPEED_100;
+       else
+               status.speed = SPEED_10;
+       status.duplex = !!(gmac_stat & MVNETA_GMAC_FULL_DUPLEX);
+       changed.link = 1;
+       changed.speed = 1;
+       changed.duplex = 1;
+       fixed_phy_update_state(phy, &status, &changed);
+       return 0;
+}
+
 /* NAPI handler
  * Bits 0 - 7 of the causeRxTx register indicate that are transmitted
  * packets on the corresponding TXQ (Bit 0 is for TX queue 1).
@@ -2063,8 +2106,18 @@ static int mvneta_poll(struct napi_struct *napi, int budget)
        }
 
        /* Read cause register */
-       cause_rx_tx = mvreg_read(pp, MVNETA_INTR_NEW_CAUSE) &
-               (MVNETA_RX_INTR_MASK(rxq_number) | MVNETA_TX_INTR_MASK(txq_number));
+       cause_rx_tx = mvreg_read(pp, MVNETA_INTR_NEW_CAUSE);
+       if (cause_rx_tx & MVNETA_MISCINTR_INTR_MASK) {
+               u32 cause_misc = mvreg_read(pp, MVNETA_INTR_MISC_CAUSE);
+
+               mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0);
+               if (pp->use_inband_status && (cause_misc &
+                               (MVNETA_CAUSE_PHY_STATUS_CHANGE |
+                                MVNETA_CAUSE_LINK_CHANGE |
+                                MVNETA_CAUSE_PSC_SYNC_CHANGE))) {
+                       mvneta_fixed_link_update(pp, pp->phy_dev);
+               }
+       }
 
        /* Release Tx descriptors */
        if (cause_rx_tx & MVNETA_TX_INTR_MASK_ALL) {
@@ -2109,7 +2162,9 @@ static int mvneta_poll(struct napi_struct *napi, int budget)
                napi_complete(napi);
                local_irq_save(flags);
                mvreg_write(pp, MVNETA_INTR_NEW_MASK,
-                           MVNETA_RX_INTR_MASK(rxq_number) | MVNETA_TX_INTR_MASK(txq_number));
+                           MVNETA_RX_INTR_MASK(rxq_number) |
+                           MVNETA_TX_INTR_MASK(txq_number) |
+                           MVNETA_MISCINTR_INTR_MASK);
                local_irq_restore(flags);
        }
 
@@ -2373,7 +2428,13 @@ static void mvneta_start_dev(struct mvneta_port *pp)
 
        /* Unmask interrupts */
        mvreg_write(pp, MVNETA_INTR_NEW_MASK,
-                   MVNETA_RX_INTR_MASK(rxq_number) | MVNETA_TX_INTR_MASK(txq_number));
+                   MVNETA_RX_INTR_MASK(rxq_number) |
+                   MVNETA_TX_INTR_MASK(txq_number) |
+                   MVNETA_MISCINTR_INTR_MASK);
+       mvreg_write(pp, MVNETA_INTR_MISC_MASK,
+                   MVNETA_CAUSE_PHY_STATUS_CHANGE |
+                   MVNETA_CAUSE_LINK_CHANGE |
+                   MVNETA_CAUSE_PSC_SYNC_CHANGE);
 
        phy_start(pp->phy_dev);
        netif_tx_start_all_queues(pp->dev);
@@ -2523,9 +2584,7 @@ static void mvneta_adjust_link(struct net_device *ndev)
                        val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
                        val &= ~(MVNETA_GMAC_CONFIG_MII_SPEED |
                                 MVNETA_GMAC_CONFIG_GMII_SPEED |
-                                MVNETA_GMAC_CONFIG_FULL_DUPLEX |
-                                MVNETA_GMAC_AN_SPEED_EN |
-                                MVNETA_GMAC_AN_DUPLEX_EN);
+                                MVNETA_GMAC_CONFIG_FULL_DUPLEX);
 
                        if (phydev->duplex)
                                val |= MVNETA_GMAC_CONFIG_FULL_DUPLEX;
@@ -2554,12 +2613,24 @@ static void mvneta_adjust_link(struct net_device *ndev)
 
        if (status_change) {
                if (phydev->link) {
-                       u32 val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
-                       val |= (MVNETA_GMAC_FORCE_LINK_PASS |
-                               MVNETA_GMAC_FORCE_LINK_DOWN);
-                       mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
+                       if (!pp->use_inband_status) {
+                               u32 val = mvreg_read(pp,
+                                                 MVNETA_GMAC_AUTONEG_CONFIG);
+                               val &= ~MVNETA_GMAC_FORCE_LINK_DOWN;
+                               val |= MVNETA_GMAC_FORCE_LINK_PASS;
+                               mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG,
+                                           val);
+                       }
                        mvneta_port_up(pp);
                } else {
+                       if (!pp->use_inband_status) {
+                               u32 val = mvreg_read(pp,
+                                                 MVNETA_GMAC_AUTONEG_CONFIG);
+                               val &= ~MVNETA_GMAC_FORCE_LINK_PASS;
+                               val |= MVNETA_GMAC_FORCE_LINK_DOWN;
+                               mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG,
+                                           val);
+                       }
                        mvneta_port_down(pp);
                }
                phy_print_status(phydev);
@@ -2658,16 +2729,11 @@ static int mvneta_stop(struct net_device *dev)
 static int mvneta_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
 {
        struct mvneta_port *pp = netdev_priv(dev);
-       int ret;
 
        if (!pp->phy_dev)
                return -ENOTSUPP;
 
-       ret = phy_mii_ioctl(pp->phy_dev, ifr, cmd);
-       if (!ret)
-               mvneta_adjust_link(dev);
-
-       return ret;
+       return phy_mii_ioctl(pp->phy_dev, ifr, cmd);
 }
 
 /* Ethtool methods */
@@ -2910,6 +2976,9 @@ static int mvneta_port_power_up(struct mvneta_port *pp, int phy_mode)
                return -EINVAL;
        }
 
+       if (pp->use_inband_status)
+               ctrl |= MVNETA_GMAC2_INBAND_AN_ENABLE;
+
        /* Cancel Port Reset */
        ctrl &= ~MVNETA_GMAC2_PORT_RESET;
        mvreg_write(pp, MVNETA_GMAC_CTRL_2, ctrl);
@@ -2934,6 +3003,7 @@ static int mvneta_probe(struct platform_device *pdev)
        char hw_mac_addr[ETH_ALEN];
        const char *mac_from;
        int phy_mode;
+       int fixed_phy = 0;
        int err;
 
        /* Our multiqueue support is not complete, so for now, only
@@ -2967,6 +3037,7 @@ static int mvneta_probe(struct platform_device *pdev)
                        dev_err(&pdev->dev, "cannot register fixed PHY\n");
                        goto err_free_irq;
                }
+               fixed_phy = 1;
 
                /* In the case of a fixed PHY, the DT node associated
                 * to the PHY is the Ethernet MAC DT node.
@@ -2990,6 +3061,8 @@ static int mvneta_probe(struct platform_device *pdev)
        pp = netdev_priv(dev);
        pp->phy_node = phy_node;
        pp->phy_interface = phy_mode;
+       pp->use_inband_status = (phy_mode == PHY_INTERFACE_MODE_SGMII) &&
+                               fixed_phy;
 
        pp->clk = devm_clk_get(&pdev->dev, NULL);
        if (IS_ERR(pp->clk)) {
@@ -3067,6 +3140,12 @@ static int mvneta_probe(struct platform_device *pdev)
 
        platform_set_drvdata(pdev, pp->dev);
 
+       if (pp->use_inband_status) {
+               struct phy_device *phy = of_phy_find_device(dn);
+
+               mvneta_fixed_link_update(pp, phy);
+       }
+
        return 0;
 
 err_free_stats:
index fdf3e382e4649313b677fa8c164e1d2430130f3b..3e8b1bfb1f2e316212bd9b60fa06522ca4dc68db 100644 (file)
@@ -1423,7 +1423,7 @@ static void mvpp2_prs_mac_promisc_set(struct mvpp2 *priv, int port, bool add)
 {
        struct mvpp2_prs_entry pe;
 
-       /* Promiscous mode - Accept unknown packets */
+       /* Promiscuous mode - Accept unknown packets */
 
        if (priv->prs_shadow[MVPP2_PE_MAC_PROMISCUOUS].valid) {
                /* Entry exist - update port only */
@@ -3402,7 +3402,7 @@ static void mvpp2_bm_bufs_free(struct mvpp2 *priv, struct mvpp2_bm_pool *bm_pool
        for (i = 0; i < bm_pool->buf_num; i++) {
                u32 vaddr;
 
-               /* Get buffer virtual adress (indirect access) */
+               /* Get buffer virtual address (indirect access) */
                mvpp2_read(priv, MVPP2_BM_PHY_ALLOC_REG(bm_pool->id));
                vaddr = mvpp2_read(priv, MVPP2_BM_VIRT_ALLOC_REG);
                if (!vaddr)
index 3e9c70f15b4258f56ac61fd8999d7d8edfe101c2..c82217e0d22d557d6ae3da0907e3b2cd5a08cf36 100644 (file)
@@ -1,7 +1,8 @@
 obj-$(CONFIG_MLX4_CORE)                += mlx4_core.o
 
-mlx4_core-y := alloc.o catas.o cmd.o cq.o eq.o fw.o icm.o intf.o main.o mcg.o \
-               mr.o pd.o port.o profile.o qp.o reset.o sense.o srq.o resource_tracker.o
+mlx4_core-y := alloc.o catas.o cmd.o cq.o eq.o fw.o fw_qos.o icm.o intf.o \
+               main.o mcg.o mr.o pd.o port.o profile.o qp.o reset.o sense.o \
+               srq.o resource_tracker.o
 
 obj-$(CONFIG_MLX4_EN)               += mlx4_en.o
 
index 3350721bf515ea1d3c48df1592cddbf837c47946..f0fbb4ade85db9b3db5ce049eff76cd978d08353 100644 (file)
@@ -48,6 +48,7 @@
 
 #include "mlx4.h"
 #include "fw.h"
+#include "fw_qos.h"
 
 #define CMD_POLL_TOKEN 0xffff
 #define INBOX_MASK     0xffffffffffffff00ULL
@@ -724,8 +725,10 @@ static int mlx4_cmd_wait(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
                 * on the host, we deprecate the error message for this
                 * specific command/input_mod/opcode_mod/fw-status to be debug.
                 */
-               if (op == MLX4_CMD_SET_PORT && in_modifier == 1 &&
-                   op_modifier == 0 && context->fw_status == CMD_STAT_BAD_SIZE)
+               if (op == MLX4_CMD_SET_PORT &&
+                   (in_modifier == 1 || in_modifier == 2) &&
+                   op_modifier == MLX4_SET_PORT_IB_OPCODE &&
+                   context->fw_status == CMD_STAT_BAD_SIZE)
                        mlx4_dbg(dev, "command 0x%x failed: fw status = 0x%x\n",
                                 op, context->fw_status);
                else
@@ -1454,6 +1457,24 @@ static struct mlx4_cmd_info cmd_info[] = {
                .verify = NULL,
                .wrapper = mlx4_CMD_EPERM_wrapper,
        },
+       {
+               .opcode = MLX4_CMD_ALLOCATE_VPP,
+               .has_inbox = false,
+               .has_outbox = true,
+               .out_is_imm = false,
+               .encode_slave_id = false,
+               .verify = NULL,
+               .wrapper = mlx4_CMD_EPERM_wrapper,
+       },
+       {
+               .opcode = MLX4_CMD_SET_VPORT_QOS,
+               .has_inbox = false,
+               .has_outbox = true,
+               .out_is_imm = false,
+               .encode_slave_id = false,
+               .verify = NULL,
+               .wrapper = mlx4_CMD_EPERM_wrapper,
+       },
        {
                .opcode = MLX4_CMD_CONF_SPECIAL_QP,
                .has_inbox = false,
@@ -1499,6 +1520,15 @@ static struct mlx4_cmd_info cmd_info[] = {
                .verify = NULL,
                .wrapper = mlx4_ACCESS_REG_wrapper,
        },
+       {
+               .opcode = MLX4_CMD_CONGESTION_CTRL_OPCODE,
+               .has_inbox = false,
+               .has_outbox = false,
+               .out_is_imm = false,
+               .encode_slave_id = false,
+               .verify = NULL,
+               .wrapper = mlx4_CMD_EPERM_wrapper,
+       },
        /* Native multicast commands are not available for guests */
        {
                .opcode = MLX4_CMD_QP_ATTACH,
@@ -1781,7 +1811,8 @@ static int mlx4_master_immediate_activate_vlan_qos(struct mlx4_priv *priv,
 
        if (vp_oper->state.default_vlan == vp_admin->default_vlan &&
            vp_oper->state.default_qos == vp_admin->default_qos &&
-           vp_oper->state.link_state == vp_admin->link_state)
+           vp_oper->state.link_state == vp_admin->link_state &&
+           vp_oper->state.qos_vport == vp_admin->qos_vport)
                return 0;
 
        if (!(priv->mfunc.master.slave_state[slave].active &&
@@ -1839,6 +1870,7 @@ static int mlx4_master_immediate_activate_vlan_qos(struct mlx4_priv *priv,
        vp_oper->state.default_vlan = vp_admin->default_vlan;
        vp_oper->state.default_qos = vp_admin->default_qos;
        vp_oper->state.link_state = vp_admin->link_state;
+       vp_oper->state.qos_vport = vp_admin->qos_vport;
 
        if (vp_admin->link_state == IFLA_VF_LINK_STATE_DISABLE)
                work->flags |= MLX4_VF_IMMED_VLAN_FLAG_LINK_DISABLE;
@@ -1847,6 +1879,7 @@ static int mlx4_master_immediate_activate_vlan_qos(struct mlx4_priv *priv,
        work->port = port;
        work->slave = slave;
        work->qos = vp_oper->state.default_qos;
+       work->qos_vport = vp_oper->state.qos_vport;
        work->vlan_id = vp_oper->state.default_vlan;
        work->vlan_ix = vp_oper->vlan_idx;
        work->priv = priv;
@@ -1856,6 +1889,63 @@ static int mlx4_master_immediate_activate_vlan_qos(struct mlx4_priv *priv,
        return 0;
 }
 
+static void mlx4_set_default_port_qos(struct mlx4_dev *dev, int port)
+{
+       struct mlx4_qos_manager *port_qos_ctl;
+       struct mlx4_priv *priv = mlx4_priv(dev);
+
+       port_qos_ctl = &priv->mfunc.master.qos_ctl[port];
+       bitmap_zero(port_qos_ctl->priority_bm, MLX4_NUM_UP);
+
+       /* Enable only default prio at PF init routine */
+       set_bit(MLX4_DEFAULT_QOS_PRIO, port_qos_ctl->priority_bm);
+}
+
+static void mlx4_allocate_port_vpps(struct mlx4_dev *dev, int port)
+{
+       int i;
+       int err;
+       int num_vfs;
+       u16 availible_vpp;
+       u8 vpp_param[MLX4_NUM_UP];
+       struct mlx4_qos_manager *port_qos;
+       struct mlx4_priv *priv = mlx4_priv(dev);
+
+       err = mlx4_ALLOCATE_VPP_get(dev, port, &availible_vpp, vpp_param);
+       if (err) {
+               mlx4_info(dev, "Failed query availible VPPs\n");
+               return;
+       }
+
+       port_qos = &priv->mfunc.master.qos_ctl[port];
+       num_vfs = (availible_vpp /
+                  bitmap_weight(port_qos->priority_bm, MLX4_NUM_UP));
+
+       for (i = 0; i < MLX4_NUM_UP; i++) {
+               if (test_bit(i, port_qos->priority_bm))
+                       vpp_param[i] = num_vfs;
+       }
+
+       err = mlx4_ALLOCATE_VPP_set(dev, port, vpp_param);
+       if (err) {
+               mlx4_info(dev, "Failed allocating VPPs\n");
+               return;
+       }
+
+       /* Query actual allocated VPP, just to make sure */
+       err = mlx4_ALLOCATE_VPP_get(dev, port, &availible_vpp, vpp_param);
+       if (err) {
+               mlx4_info(dev, "Failed query availible VPPs\n");
+               return;
+       }
+
+       port_qos->num_of_qos_vfs = num_vfs;
+       mlx4_dbg(dev, "Port %d Availible VPPs %d\n", port, availible_vpp);
+
+       for (i = 0; i < MLX4_NUM_UP; i++)
+               mlx4_dbg(dev, "Port %d UP %d Allocated %d VPPs\n", port, i,
+                        vpp_param[i]);
+}
 
 static int mlx4_master_activate_admin_state(struct mlx4_priv *priv, int slave)
 {
@@ -2203,6 +2293,9 @@ int mlx4_multi_func_init(struct mlx4_dev *dev)
        }
 
        if (mlx4_is_master(dev)) {
+               struct mlx4_vf_oper_state *vf_oper;
+               struct mlx4_vf_admin_state *vf_admin;
+
                priv->mfunc.master.slave_state =
                        kzalloc(dev->num_slaves *
                                sizeof(struct mlx4_slave_state), GFP_KERNEL);
@@ -2222,6 +2315,8 @@ int mlx4_multi_func_init(struct mlx4_dev *dev)
                        goto err_comm_oper;
 
                for (i = 0; i < dev->num_slaves; ++i) {
+                       vf_admin = &priv->mfunc.master.vf_admin[i];
+                       vf_oper = &priv->mfunc.master.vf_oper[i];
                        s_state = &priv->mfunc.master.slave_state[i];
                        s_state->last_cmd = MLX4_COMM_CMD_RESET;
                        mutex_init(&priv->mfunc.master.gen_eqe_mutex[i]);
@@ -2233,6 +2328,9 @@ int mlx4_multi_func_init(struct mlx4_dev *dev)
                                     &priv->mfunc.comm[i].slave_read);
                        mmiowb();
                        for (port = 1; port <= MLX4_MAX_PORTS; port++) {
+                               struct mlx4_vport_state *admin_vport;
+                               struct mlx4_vport_state *oper_vport;
+
                                s_state->vlan_filter[port] =
                                        kzalloc(sizeof(struct mlx4_vlan_fltr),
                                                GFP_KERNEL);
@@ -2241,15 +2339,30 @@ int mlx4_multi_func_init(struct mlx4_dev *dev)
                                                kfree(s_state->vlan_filter[port]);
                                        goto err_slaves;
                                }
+
+                               admin_vport = &vf_admin->vport[port];
+                               oper_vport = &vf_oper->vport[port].state;
                                INIT_LIST_HEAD(&s_state->mcast_filters[port]);
-                               priv->mfunc.master.vf_admin[i].vport[port].default_vlan = MLX4_VGT;
-                               priv->mfunc.master.vf_oper[i].vport[port].state.default_vlan = MLX4_VGT;
-                               priv->mfunc.master.vf_oper[i].vport[port].vlan_idx = NO_INDX;
-                               priv->mfunc.master.vf_oper[i].vport[port].mac_idx = NO_INDX;
+                               admin_vport->default_vlan = MLX4_VGT;
+                               oper_vport->default_vlan = MLX4_VGT;
+                               admin_vport->qos_vport =
+                                               MLX4_VPP_DEFAULT_VPORT;
+                               oper_vport->qos_vport = MLX4_VPP_DEFAULT_VPORT;
+                               vf_oper->vport[port].vlan_idx = NO_INDX;
+                               vf_oper->vport[port].mac_idx = NO_INDX;
                        }
                        spin_lock_init(&s_state->lock);
                }
 
+               if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QOS_VPP) {
+                       for (port = 1; port <= dev->caps.num_ports; port++) {
+                               if (mlx4_is_eth(dev, port)) {
+                                       mlx4_set_default_port_qos(dev, port);
+                                       mlx4_allocate_port_vpps(dev, port);
+                               }
+                       }
+               }
+
                memset(&priv->mfunc.master.cmd_eqe, 0, dev->caps.eqe_size);
                priv->mfunc.master.cmd_eqe.type = MLX4_EVENT_TYPE_CMD;
                INIT_WORK(&priv->mfunc.master.comm_work,
@@ -2670,6 +2783,103 @@ static int mlx4_slaves_closest_port(struct mlx4_dev *dev, int slave, int port)
        return port;
 }
 
+static int mlx4_set_vport_qos(struct mlx4_priv *priv, int slave, int port,
+                             int max_tx_rate)
+{
+       int i;
+       int err;
+       struct mlx4_qos_manager *port_qos;
+       struct mlx4_dev *dev = &priv->dev;
+       struct mlx4_vport_qos_param vpp_qos[MLX4_NUM_UP];
+
+       port_qos = &priv->mfunc.master.qos_ctl[port];
+       memset(vpp_qos, 0, sizeof(struct mlx4_vport_qos_param) * MLX4_NUM_UP);
+
+       if (slave > port_qos->num_of_qos_vfs) {
+               mlx4_info(dev, "No availible VPP resources for this VF\n");
+               return -EINVAL;
+       }
+
+       /* Query for default QoS values from Vport 0 is needed */
+       err = mlx4_SET_VPORT_QOS_get(dev, port, 0, vpp_qos);
+       if (err) {
+               mlx4_info(dev, "Failed to query Vport 0 QoS values\n");
+               return err;
+       }
+
+       for (i = 0; i < MLX4_NUM_UP; i++) {
+               if (test_bit(i, port_qos->priority_bm) && max_tx_rate) {
+                       vpp_qos[i].max_avg_bw = max_tx_rate;
+                       vpp_qos[i].enable = 1;
+               } else {
+                       /* if user supplied tx_rate == 0, meaning no rate limit
+                        * configuration is required. so we are leaving the
+                        * value of max_avg_bw as queried from Vport 0.
+                        */
+                       vpp_qos[i].enable = 0;
+               }
+       }
+
+       err = mlx4_SET_VPORT_QOS_set(dev, port, slave, vpp_qos);
+       if (err) {
+               mlx4_info(dev, "Failed to set Vport %d QoS values\n", slave);
+               return err;
+       }
+
+       return 0;
+}
+
+static bool mlx4_is_vf_vst_and_prio_qos(struct mlx4_dev *dev, int port,
+                                       struct mlx4_vport_state *vf_admin)
+{
+       struct mlx4_qos_manager *info;
+       struct mlx4_priv *priv = mlx4_priv(dev);
+
+       if (!mlx4_is_master(dev) ||
+           !(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QOS_VPP))
+               return false;
+
+       info = &priv->mfunc.master.qos_ctl[port];
+
+       if (vf_admin->default_vlan != MLX4_VGT &&
+           test_bit(vf_admin->default_qos, info->priority_bm))
+               return true;
+
+       return false;
+}
+
+static bool mlx4_valid_vf_state_change(struct mlx4_dev *dev, int port,
+                                      struct mlx4_vport_state *vf_admin,
+                                      int vlan, int qos)
+{
+       struct mlx4_vport_state dummy_admin = {0};
+
+       if (!mlx4_is_vf_vst_and_prio_qos(dev, port, vf_admin) ||
+           !vf_admin->tx_rate)
+               return true;
+
+       dummy_admin.default_qos = qos;
+       dummy_admin.default_vlan = vlan;
+
+       /* VF wants to move to other VST state which is valid with current
+        * rate limit. Either differnt default vlan in VST or other
+        * supported QoS priority. Otherwise we don't allow this change when
+        * the TX rate is still configured.
+        */
+       if (mlx4_is_vf_vst_and_prio_qos(dev, port, &dummy_admin))
+               return true;
+
+       mlx4_info(dev, "Cannot change VF state to %s while rate is set\n",
+                 (vlan == MLX4_VGT) ? "VGT" : "VST");
+
+       if (vlan != MLX4_VGT)
+               mlx4_info(dev, "VST priority %d not supported for QoS\n", qos);
+
+       mlx4_info(dev, "Please set rate to 0 prior to this VF state change\n");
+
+       return false;
+}
+
 int mlx4_set_vf_mac(struct mlx4_dev *dev, int port, int vf, u64 mac)
 {
        struct mlx4_priv *priv = mlx4_priv(dev);
@@ -2713,12 +2923,22 @@ int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos)
        port = mlx4_slaves_closest_port(dev, slave, port);
        vf_admin = &priv->mfunc.master.vf_admin[slave].vport[port];
 
+       if (!mlx4_valid_vf_state_change(dev, port, vf_admin, vlan, qos))
+               return -EPERM;
+
        if ((0 == vlan) && (0 == qos))
                vf_admin->default_vlan = MLX4_VGT;
        else
                vf_admin->default_vlan = vlan;
        vf_admin->default_qos = qos;
 
+       /* If rate was configured prior to VST, we saved the configured rate
+        * in vf_admin->rate and now, if priority supported we enforce the QoS
+        */
+       if (mlx4_is_vf_vst_and_prio_qos(dev, port, vf_admin) &&
+           vf_admin->tx_rate)
+               vf_admin->qos_vport = slave;
+
        if (mlx4_master_immediate_activate_vlan_qos(priv, slave, port))
                mlx4_info(dev,
                          "updating vf %d port %d config will take effect on next VF restart\n",
@@ -2727,6 +2947,69 @@ int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos)
 }
 EXPORT_SYMBOL_GPL(mlx4_set_vf_vlan);
 
+int mlx4_set_vf_rate(struct mlx4_dev *dev, int port, int vf, int min_tx_rate,
+                    int max_tx_rate)
+{
+       int err;
+       int slave;
+       struct mlx4_vport_state *vf_admin;
+       struct mlx4_priv *priv = mlx4_priv(dev);
+
+       if (!mlx4_is_master(dev) ||
+           !(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QOS_VPP))
+               return -EPROTONOSUPPORT;
+
+       if (min_tx_rate) {
+               mlx4_info(dev, "Minimum BW share not supported\n");
+               return -EPROTONOSUPPORT;
+       }
+
+       slave = mlx4_get_slave_indx(dev, vf);
+       if (slave < 0)
+               return -EINVAL;
+
+       port = mlx4_slaves_closest_port(dev, slave, port);
+       vf_admin = &priv->mfunc.master.vf_admin[slave].vport[port];
+
+       err = mlx4_set_vport_qos(priv, slave, port, max_tx_rate);
+       if (err) {
+               mlx4_info(dev, "vf %d failed to set rate %d\n", vf,
+                         max_tx_rate);
+               return err;
+       }
+
+       vf_admin->tx_rate = max_tx_rate;
+       /* if VF is not in supported mode (VST with supported prio),
+        * we do not change vport configuration for its QPs, but save
+        * the rate, so it will be enforced when it moves to supported
+        * mode next time.
+        */
+       if (!mlx4_is_vf_vst_and_prio_qos(dev, port, vf_admin)) {
+               mlx4_info(dev,
+                         "rate set for VF %d when not in valid state\n", vf);
+
+               if (vf_admin->default_vlan != MLX4_VGT)
+                       mlx4_info(dev, "VST priority not supported by QoS\n");
+               else
+                       mlx4_info(dev, "VF in VGT mode (needed VST)\n");
+
+               mlx4_info(dev,
+                         "rate %d take affect when VF moves to valid state\n",
+                         max_tx_rate);
+               return 0;
+       }
+
+       /* If user sets rate 0 assigning default vport for its QPs */
+       vf_admin->qos_vport = max_tx_rate ? slave : MLX4_VPP_DEFAULT_VPORT;
+
+       if (priv->mfunc.master.slave_state[slave].active &&
+           dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_UPDATE_QP)
+               mlx4_master_immediate_activate_vlan_qos(priv, slave, port);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(mlx4_set_vf_rate);
+
  /* mlx4_get_slave_default_vlan -
  * return true if VST ( default vlan)
  * if VST, will return vlan & qos (if not NULL)
@@ -2800,7 +3083,12 @@ int mlx4_get_vf_config(struct mlx4_dev *dev, int port, int vf, struct ifla_vf_in
 
        ivf->vlan               = s_info->default_vlan;
        ivf->qos                = s_info->default_qos;
-       ivf->max_tx_rate        = s_info->tx_rate;
+
+       if (mlx4_is_vf_vst_and_prio_qos(dev, port, s_info))
+               ivf->max_tx_rate = s_info->tx_rate;
+       else
+               ivf->max_tx_rate = 0;
+
        ivf->min_tx_rate        = 0;
        ivf->spoofchk           = s_info->spoofchk;
        ivf->linkstate          = s_info->link_state;
index 90b5309cdb5c623ff5f9a1d3d0a90e04f7b8eb57..8a083d73efdbae61e7d71ee364c816b58a16b2f9 100644 (file)
@@ -164,20 +164,19 @@ static int mlx4_en_phc_adjtime(struct ptp_clock_info *ptp, s64 delta)
  * Read the timecounter and return the correct value in ns after converting
  * it into a struct timespec.
  **/
-static int mlx4_en_phc_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
+static int mlx4_en_phc_gettime(struct ptp_clock_info *ptp,
+                              struct timespec64 *ts)
 {
        struct mlx4_en_dev *mdev = container_of(ptp, struct mlx4_en_dev,
                                                ptp_clock_info);
        unsigned long flags;
-       u32 remainder;
        u64 ns;
 
        write_lock_irqsave(&mdev->clock_lock, flags);
        ns = timecounter_read(&mdev->clock);
        write_unlock_irqrestore(&mdev->clock_lock, flags);
 
-       ts->tv_sec = div_u64_rem(ns, NSEC_PER_SEC, &remainder);
-       ts->tv_nsec = remainder;
+       *ts = ns_to_timespec64(ns);
 
        return 0;
 }
@@ -191,11 +190,11 @@ static int mlx4_en_phc_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
  * wall timer value.
  **/
 static int mlx4_en_phc_settime(struct ptp_clock_info *ptp,
-                              const struct timespec *ts)
+                              const struct timespec64 *ts)
 {
        struct mlx4_en_dev *mdev = container_of(ptp, struct mlx4_en_dev,
                                                ptp_clock_info);
-       u64 ns = timespec_to_ns(ts);
+       u64 ns = timespec64_to_ns(ts);
        unsigned long flags;
 
        /* reset the timecounter */
@@ -232,8 +231,8 @@ static const struct ptp_clock_info mlx4_en_ptp_clock_info = {
        .pps            = 0,
        .adjfreq        = mlx4_en_phc_adjfreq,
        .adjtime        = mlx4_en_phc_adjtime,
-       .gettime        = mlx4_en_phc_gettime,
-       .settime        = mlx4_en_phc_settime,
+       .gettime64      = mlx4_en_phc_gettime,
+       .settime64      = mlx4_en_phc_settime,
        .enable         = mlx4_en_phc_enable,
 };
 
index c95ca252187c333719fe4141225cea020909c723..f01918c63f2816fecf07141a29c8c140b29e8629 100644 (file)
 #include <linux/math64.h>
 
 #include "mlx4_en.h"
+#include "fw_qos.h"
+
+/* Definitions for QCN
+ */
+
+struct mlx4_congestion_control_mb_prio_802_1_qau_params {
+       __be32 modify_enable_high;
+       __be32 modify_enable_low;
+       __be32 reserved1;
+       __be32 extended_enable;
+       __be32 rppp_max_rps;
+       __be32 rpg_time_reset;
+       __be32 rpg_byte_reset;
+       __be32 rpg_threshold;
+       __be32 rpg_max_rate;
+       __be32 rpg_ai_rate;
+       __be32 rpg_hai_rate;
+       __be32 rpg_gd;
+       __be32 rpg_min_dec_fac;
+       __be32 rpg_min_rate;
+       __be32 max_time_rise;
+       __be32 max_byte_rise;
+       __be32 max_qdelta;
+       __be32 min_qoffset;
+       __be32 gd_coefficient;
+       __be32 reserved2[5];
+       __be32 cp_sample_base;
+       __be32 reserved3[39];
+};
+
+struct mlx4_congestion_control_mb_prio_802_1_qau_statistics {
+       __be64 rppp_rp_centiseconds;
+       __be32 reserved1;
+       __be32 ignored_cnm;
+       __be32 rppp_created_rps;
+       __be32 estimated_total_rate;
+       __be32 max_active_rate_limiter_index;
+       __be32 dropped_cnms_busy_fw;
+       __be32 reserved2;
+       __be32 cnms_handled_successfully;
+       __be32 min_total_limiters_rate;
+       __be32 max_total_limiters_rate;
+       __be32 reserved3[4];
+};
 
 static int mlx4_en_dcbnl_ieee_getets(struct net_device *dev,
                                   struct ieee_ets *ets)
@@ -183,6 +227,10 @@ static int mlx4_en_dcbnl_ieee_setpfc(struct net_device *dev,
                                    prof->rx_ppp);
        if (err)
                en_err(priv, "Failed setting pause params\n");
+       else
+               mlx4_en_update_pfc_stats_bitmap(mdev->dev, &priv->stats_bitmap,
+                                               prof->rx_ppp, prof->rx_pause,
+                                               prof->tx_ppp, prof->tx_pause);
 
        return err;
 }
@@ -242,6 +290,178 @@ static int mlx4_en_dcbnl_ieee_setmaxrate(struct net_device *dev,
        return 0;
 }
 
+#define RPG_ENABLE_BIT 31
+#define CN_TAG_BIT     30
+
+static int mlx4_en_dcbnl_ieee_getqcn(struct net_device *dev,
+                                    struct ieee_qcn *qcn)
+{
+       struct mlx4_en_priv *priv = netdev_priv(dev);
+       struct mlx4_congestion_control_mb_prio_802_1_qau_params *hw_qcn;
+       struct mlx4_cmd_mailbox *mailbox_out = NULL;
+       u64 mailbox_in_dma = 0;
+       u32 inmod = 0;
+       int i, err;
+
+       if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QCN))
+               return -EOPNOTSUPP;
+
+       mailbox_out = mlx4_alloc_cmd_mailbox(priv->mdev->dev);
+       if (IS_ERR(mailbox_out))
+               return -ENOMEM;
+       hw_qcn =
+       (struct mlx4_congestion_control_mb_prio_802_1_qau_params *)
+       mailbox_out->buf;
+
+       for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+               inmod = priv->port | ((1 << i) << 8) |
+                        (MLX4_CTRL_ALGO_802_1_QAU_REACTION_POINT << 16);
+               err = mlx4_cmd_box(priv->mdev->dev, mailbox_in_dma,
+                                  mailbox_out->dma,
+                                  inmod, MLX4_CONGESTION_CONTROL_GET_PARAMS,
+                                  MLX4_CMD_CONGESTION_CTRL_OPCODE,
+                                  MLX4_CMD_TIME_CLASS_C,
+                                  MLX4_CMD_NATIVE);
+               if (err) {
+                       mlx4_free_cmd_mailbox(priv->mdev->dev, mailbox_out);
+                       return err;
+               }
+
+               qcn->rpg_enable[i] =
+                       be32_to_cpu(hw_qcn->extended_enable) >> RPG_ENABLE_BIT;
+               qcn->rppp_max_rps[i] =
+                       be32_to_cpu(hw_qcn->rppp_max_rps);
+               qcn->rpg_time_reset[i] =
+                       be32_to_cpu(hw_qcn->rpg_time_reset);
+               qcn->rpg_byte_reset[i] =
+                       be32_to_cpu(hw_qcn->rpg_byte_reset);
+               qcn->rpg_threshold[i] =
+                       be32_to_cpu(hw_qcn->rpg_threshold);
+               qcn->rpg_max_rate[i] =
+                       be32_to_cpu(hw_qcn->rpg_max_rate);
+               qcn->rpg_ai_rate[i] =
+                       be32_to_cpu(hw_qcn->rpg_ai_rate);
+               qcn->rpg_hai_rate[i] =
+                       be32_to_cpu(hw_qcn->rpg_hai_rate);
+               qcn->rpg_gd[i] =
+                       be32_to_cpu(hw_qcn->rpg_gd);
+               qcn->rpg_min_dec_fac[i] =
+                       be32_to_cpu(hw_qcn->rpg_min_dec_fac);
+               qcn->rpg_min_rate[i] =
+                       be32_to_cpu(hw_qcn->rpg_min_rate);
+               qcn->cndd_state_machine[i] =
+                       priv->cndd_state[i];
+       }
+       mlx4_free_cmd_mailbox(priv->mdev->dev, mailbox_out);
+       return 0;
+}
+
+static int mlx4_en_dcbnl_ieee_setqcn(struct net_device *dev,
+                                    struct ieee_qcn *qcn)
+{
+       struct mlx4_en_priv *priv = netdev_priv(dev);
+       struct mlx4_congestion_control_mb_prio_802_1_qau_params *hw_qcn;
+       struct mlx4_cmd_mailbox *mailbox_in = NULL;
+       u64 mailbox_in_dma = 0;
+       u32 inmod = 0;
+       int i, err;
+#define MODIFY_ENABLE_HIGH_MASK 0xc0000000
+#define MODIFY_ENABLE_LOW_MASK 0xffc00000
+
+       if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QCN))
+               return -EOPNOTSUPP;
+
+       mailbox_in = mlx4_alloc_cmd_mailbox(priv->mdev->dev);
+       if (IS_ERR(mailbox_in))
+               return -ENOMEM;
+
+       mailbox_in_dma = mailbox_in->dma;
+       hw_qcn =
+       (struct mlx4_congestion_control_mb_prio_802_1_qau_params *)mailbox_in->buf;
+       for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+               inmod = priv->port | ((1 << i) << 8) |
+                        (MLX4_CTRL_ALGO_802_1_QAU_REACTION_POINT << 16);
+
+               /* Before updating QCN parameter,
+                * need to set it's modify enable bit to 1
+                */
+
+               hw_qcn->modify_enable_high = cpu_to_be32(
+                                               MODIFY_ENABLE_HIGH_MASK);
+               hw_qcn->modify_enable_low = cpu_to_be32(MODIFY_ENABLE_LOW_MASK);
+
+               hw_qcn->extended_enable = cpu_to_be32(qcn->rpg_enable[i] << RPG_ENABLE_BIT);
+               hw_qcn->rppp_max_rps = cpu_to_be32(qcn->rppp_max_rps[i]);
+               hw_qcn->rpg_time_reset = cpu_to_be32(qcn->rpg_time_reset[i]);
+               hw_qcn->rpg_byte_reset = cpu_to_be32(qcn->rpg_byte_reset[i]);
+               hw_qcn->rpg_threshold = cpu_to_be32(qcn->rpg_threshold[i]);
+               hw_qcn->rpg_max_rate = cpu_to_be32(qcn->rpg_max_rate[i]);
+               hw_qcn->rpg_ai_rate = cpu_to_be32(qcn->rpg_ai_rate[i]);
+               hw_qcn->rpg_hai_rate = cpu_to_be32(qcn->rpg_hai_rate[i]);
+               hw_qcn->rpg_gd = cpu_to_be32(qcn->rpg_gd[i]);
+               hw_qcn->rpg_min_dec_fac = cpu_to_be32(qcn->rpg_min_dec_fac[i]);
+               hw_qcn->rpg_min_rate = cpu_to_be32(qcn->rpg_min_rate[i]);
+               priv->cndd_state[i] = qcn->cndd_state_machine[i];
+               if (qcn->cndd_state_machine[i] == DCB_CNDD_INTERIOR_READY)
+                       hw_qcn->extended_enable |= cpu_to_be32(1 << CN_TAG_BIT);
+
+               err = mlx4_cmd(priv->mdev->dev, mailbox_in_dma, inmod,
+                              MLX4_CONGESTION_CONTROL_SET_PARAMS,
+                              MLX4_CMD_CONGESTION_CTRL_OPCODE,
+                              MLX4_CMD_TIME_CLASS_C,
+                              MLX4_CMD_NATIVE);
+               if (err) {
+                       mlx4_free_cmd_mailbox(priv->mdev->dev, mailbox_in);
+                       return err;
+               }
+       }
+       mlx4_free_cmd_mailbox(priv->mdev->dev, mailbox_in);
+       return 0;
+}
+
+static int mlx4_en_dcbnl_ieee_getqcnstats(struct net_device *dev,
+                                         struct ieee_qcn_stats *qcn_stats)
+{
+       struct mlx4_en_priv *priv = netdev_priv(dev);
+       struct mlx4_congestion_control_mb_prio_802_1_qau_statistics *hw_qcn_stats;
+       struct mlx4_cmd_mailbox *mailbox_out = NULL;
+       u64 mailbox_in_dma = 0;
+       u32 inmod = 0;
+       int i, err;
+
+       if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QCN))
+               return -EOPNOTSUPP;
+
+       mailbox_out = mlx4_alloc_cmd_mailbox(priv->mdev->dev);
+       if (IS_ERR(mailbox_out))
+               return -ENOMEM;
+
+       hw_qcn_stats =
+       (struct mlx4_congestion_control_mb_prio_802_1_qau_statistics *)
+       mailbox_out->buf;
+
+       for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+               inmod = priv->port | ((1 << i) << 8) |
+                        (MLX4_CTRL_ALGO_802_1_QAU_REACTION_POINT << 16);
+               err = mlx4_cmd_box(priv->mdev->dev, mailbox_in_dma,
+                                  mailbox_out->dma, inmod,
+                                  MLX4_CONGESTION_CONTROL_GET_STATISTICS,
+                                  MLX4_CMD_CONGESTION_CTRL_OPCODE,
+                                  MLX4_CMD_TIME_CLASS_C,
+                                  MLX4_CMD_NATIVE);
+               if (err) {
+                       mlx4_free_cmd_mailbox(priv->mdev->dev, mailbox_out);
+                       return err;
+               }
+               qcn_stats->rppp_rp_centiseconds[i] =
+                       be64_to_cpu(hw_qcn_stats->rppp_rp_centiseconds);
+               qcn_stats->rppp_created_rps[i] =
+                       be32_to_cpu(hw_qcn_stats->rppp_created_rps);
+       }
+       mlx4_free_cmd_mailbox(priv->mdev->dev, mailbox_out);
+       return 0;
+}
+
 const struct dcbnl_rtnl_ops mlx4_en_dcbnl_ops = {
        .ieee_getets    = mlx4_en_dcbnl_ieee_getets,
        .ieee_setets    = mlx4_en_dcbnl_ieee_setets,
@@ -252,6 +472,9 @@ const struct dcbnl_rtnl_ops mlx4_en_dcbnl_ops = {
 
        .getdcbx        = mlx4_en_dcbnl_getdcbx,
        .setdcbx        = mlx4_en_dcbnl_setdcbx,
+       .ieee_getqcn    = mlx4_en_dcbnl_ieee_getqcn,
+       .ieee_setqcn    = mlx4_en_dcbnl_ieee_setqcn,
+       .ieee_getqcnstats = mlx4_en_dcbnl_ieee_getqcnstats,
 };
 
 const struct dcbnl_rtnl_ops mlx4_en_dcbnl_pfc_ops = {
index a7b58ba8492b5c2540111ab2622cb8819f1f21cb..3f44e2bbb9824caad9068e7ce6f03e1a2df382f2 100644 (file)
@@ -38,6 +38,7 @@
 #include <linux/mlx4/device.h>
 #include <linux/in.h>
 #include <net/ip.h>
+#include <linux/bitmap.h>
 
 #include "mlx4_en.h"
 #include "en_port.h"
@@ -104,6 +105,7 @@ static const char mlx4_en_priv_flags[][ETH_GSTRING_LEN] = {
 };
 
 static const char main_strings[][ETH_GSTRING_LEN] = {
+       /* main statistics */
        "rx_packets", "tx_packets", "rx_bytes", "tx_bytes", "rx_errors",
        "tx_errors", "rx_dropped", "tx_dropped", "multicast", "collisions",
        "rx_length_errors", "rx_over_errors", "rx_crc_errors",
@@ -117,14 +119,76 @@ static const char main_strings[][ETH_GSTRING_LEN] = {
        "queue_stopped", "wake_queue", "tx_timeout", "rx_alloc_failed",
        "rx_csum_good", "rx_csum_none", "rx_csum_complete", "tx_chksum_offload",
 
+       /* priority flow control statistics rx */
+       "rx_pause_prio_0", "rx_pause_duration_prio_0",
+       "rx_pause_transition_prio_0",
+       "rx_pause_prio_1", "rx_pause_duration_prio_1",
+       "rx_pause_transition_prio_1",
+       "rx_pause_prio_2", "rx_pause_duration_prio_2",
+       "rx_pause_transition_prio_2",
+       "rx_pause_prio_3", "rx_pause_duration_prio_3",
+       "rx_pause_transition_prio_3",
+       "rx_pause_prio_4", "rx_pause_duration_prio_4",
+       "rx_pause_transition_prio_4",
+       "rx_pause_prio_5", "rx_pause_duration_prio_5",
+       "rx_pause_transition_prio_5",
+       "rx_pause_prio_6", "rx_pause_duration_prio_6",
+       "rx_pause_transition_prio_6",
+       "rx_pause_prio_7", "rx_pause_duration_prio_7",
+       "rx_pause_transition_prio_7",
+
+       /* flow control statistics rx */
+       "rx_pause", "rx_pause_duration", "rx_pause_transition",
+
+       /* priority flow control statistics tx */
+       "tx_pause_prio_0", "tx_pause_duration_prio_0",
+       "tx_pause_transition_prio_0",
+       "tx_pause_prio_1", "tx_pause_duration_prio_1",
+       "tx_pause_transition_prio_1",
+       "tx_pause_prio_2", "tx_pause_duration_prio_2",
+       "tx_pause_transition_prio_2",
+       "tx_pause_prio_3", "tx_pause_duration_prio_3",
+       "tx_pause_transition_prio_3",
+       "tx_pause_prio_4", "tx_pause_duration_prio_4",
+       "tx_pause_transition_prio_4",
+       "tx_pause_prio_5", "tx_pause_duration_prio_5",
+       "tx_pause_transition_prio_5",
+       "tx_pause_prio_6", "tx_pause_duration_prio_6",
+       "tx_pause_transition_prio_6",
+       "tx_pause_prio_7", "tx_pause_duration_prio_7",
+       "tx_pause_transition_prio_7",
+
+       /* flow control statistics tx */
+       "tx_pause", "tx_pause_duration", "tx_pause_transition",
+
        /* packet statistics */
-       "broadcast", "rx_prio_0", "rx_prio_1", "rx_prio_2", "rx_prio_3",
-       "rx_prio_4", "rx_prio_5", "rx_prio_6", "rx_prio_7", "tx_prio_0",
-       "tx_prio_1", "tx_prio_2", "tx_prio_3", "tx_prio_4", "tx_prio_5",
-       "tx_prio_6", "tx_prio_7",
+       "rx_multicast_packets",
+       "rx_broadcast_packets",
+       "rx_jabbers",
+       "rx_in_range_length_error",
+       "rx_out_range_length_error",
+       "tx_multicast_packets",
+       "tx_broadcast_packets",
+       "rx_prio_0_packets", "rx_prio_0_bytes",
+       "rx_prio_1_packets", "rx_prio_1_bytes",
+       "rx_prio_2_packets", "rx_prio_2_bytes",
+       "rx_prio_3_packets", "rx_prio_3_bytes",
+       "rx_prio_4_packets", "rx_prio_4_bytes",
+       "rx_prio_5_packets", "rx_prio_5_bytes",
+       "rx_prio_6_packets", "rx_prio_6_bytes",
+       "rx_prio_7_packets", "rx_prio_7_bytes",
+       "rx_novlan_packets", "rx_novlan_bytes",
+       "tx_prio_0_packets", "tx_prio_0_bytes",
+       "tx_prio_1_packets", "tx_prio_1_bytes",
+       "tx_prio_2_packets", "tx_prio_2_bytes",
+       "tx_prio_3_packets", "tx_prio_3_bytes",
+       "tx_prio_4_packets", "tx_prio_4_bytes",
+       "tx_prio_5_packets", "tx_prio_5_bytes",
+       "tx_prio_6_packets", "tx_prio_6_bytes",
+       "tx_prio_7_packets", "tx_prio_7_bytes",
+       "tx_novlan_packets", "tx_novlan_bytes",
+
 };
-#define NUM_MAIN_STATS 21
-#define NUM_ALL_STATS  (NUM_MAIN_STATS + NUM_PORT_STATS + NUM_PKT_STATS + NUM_PERF_STATS)
 
 static const char mlx4_en_test_names[][ETH_GSTRING_LEN]= {
        "Interrupt Test",
@@ -224,14 +288,50 @@ static int mlx4_en_set_wol(struct net_device *netdev,
        return err;
 }
 
+struct bitmap_iterator {
+       unsigned long *stats_bitmap;
+       unsigned int count;
+       unsigned int iterator;
+       bool advance_array; /* if set, force no increments */
+};
+
+static inline void bitmap_iterator_init(struct bitmap_iterator *h,
+                                       unsigned long *stats_bitmap,
+                                       int count)
+{
+       h->iterator = 0;
+       h->advance_array = !bitmap_empty(stats_bitmap, count);
+       h->count = h->advance_array ? bitmap_weight(stats_bitmap, count)
+               : count;
+       h->stats_bitmap = stats_bitmap;
+}
+
+static inline int bitmap_iterator_test(struct bitmap_iterator *h)
+{
+       return !h->advance_array ? 1 : test_bit(h->iterator, h->stats_bitmap);
+}
+
+static inline int bitmap_iterator_inc(struct bitmap_iterator *h)
+{
+       return h->iterator++;
+}
+
+static inline unsigned int
+bitmap_iterator_count(struct bitmap_iterator *h)
+{
+       return h->count;
+}
+
 static int mlx4_en_get_sset_count(struct net_device *dev, int sset)
 {
        struct mlx4_en_priv *priv = netdev_priv(dev);
-       int bit_count = hweight64(priv->stats_bitmap);
+       struct bitmap_iterator it;
+
+       bitmap_iterator_init(&it, priv->stats_bitmap.bitmap, NUM_ALL_STATS);
 
        switch (sset) {
        case ETH_SS_STATS:
-               return (priv->stats_bitmap ? bit_count : NUM_ALL_STATS) +
+               return bitmap_iterator_count(&it) +
                        (priv->tx_ring_num * 2) +
 #ifdef CONFIG_NET_RX_BUSY_POLL
                        (priv->rx_ring_num * 5);
@@ -253,34 +353,45 @@ static void mlx4_en_get_ethtool_stats(struct net_device *dev,
 {
        struct mlx4_en_priv *priv = netdev_priv(dev);
        int index = 0;
-       int i, j = 0;
+       int i;
+       struct bitmap_iterator it;
+
+       bitmap_iterator_init(&it, priv->stats_bitmap.bitmap, NUM_ALL_STATS);
 
        spin_lock_bh(&priv->stats_lock);
 
-       if (!(priv->stats_bitmap)) {
-               for (i = 0; i < NUM_MAIN_STATS; i++)
-                       data[index++] =
-                               ((unsigned long *) &priv->stats)[i];
-               for (i = 0; i < NUM_PORT_STATS; i++)
+       for (i = 0; i < NUM_MAIN_STATS; i++, bitmap_iterator_inc(&it))
+               if (bitmap_iterator_test(&it))
+                       data[index++] = ((unsigned long *)&priv->stats)[i];
+
+       for (i = 0; i < NUM_PORT_STATS; i++, bitmap_iterator_inc(&it))
+               if (bitmap_iterator_test(&it))
+                       data[index++] = ((unsigned long *)&priv->port_stats)[i];
+
+       for (i = 0; i < NUM_FLOW_PRIORITY_STATS_RX;
+            i++, bitmap_iterator_inc(&it))
+               if (bitmap_iterator_test(&it))
                        data[index++] =
-                               ((unsigned long *) &priv->port_stats)[i];
-               for (i = 0; i < NUM_PKT_STATS; i++)
+                               ((u64 *)&priv->rx_priority_flowstats)[i];
+
+       for (i = 0; i < NUM_FLOW_STATS_RX; i++, bitmap_iterator_inc(&it))
+               if (bitmap_iterator_test(&it))
+                       data[index++] = ((u64 *)&priv->rx_flowstats)[i];
+
+       for (i = 0; i < NUM_FLOW_PRIORITY_STATS_TX;
+            i++, bitmap_iterator_inc(&it))
+               if (bitmap_iterator_test(&it))
                        data[index++] =
-                               ((unsigned long *) &priv->pkstats)[i];
-       } else {
-               for (i = 0; i < NUM_MAIN_STATS; i++) {
-                       if ((priv->stats_bitmap >> j) & 1)
-                               data[index++] =
-                               ((unsigned long *) &priv->stats)[i];
-                       j++;
-               }
-               for (i = 0; i < NUM_PORT_STATS; i++) {
-                       if ((priv->stats_bitmap >> j) & 1)
-                               data[index++] =
-                               ((unsigned long *) &priv->port_stats)[i];
-                       j++;
-               }
-       }
+                               ((u64 *)&priv->tx_priority_flowstats)[i];
+
+       for (i = 0; i < NUM_FLOW_STATS_TX; i++, bitmap_iterator_inc(&it))
+               if (bitmap_iterator_test(&it))
+                       data[index++] = ((u64 *)&priv->tx_flowstats)[i];
+
+       for (i = 0; i < NUM_PKT_STATS; i++, bitmap_iterator_inc(&it))
+               if (bitmap_iterator_test(&it))
+                       data[index++] = ((unsigned long *)&priv->pkstats)[i];
+
        for (i = 0; i < priv->tx_ring_num; i++) {
                data[index++] = priv->tx_ring[i]->packets;
                data[index++] = priv->tx_ring[i]->bytes;
@@ -309,7 +420,10 @@ static void mlx4_en_get_strings(struct net_device *dev,
 {
        struct mlx4_en_priv *priv = netdev_priv(dev);
        int index = 0;
-       int i;
+       int i, strings = 0;
+       struct bitmap_iterator it;
+
+       bitmap_iterator_init(&it, priv->stats_bitmap.bitmap, NUM_ALL_STATS);
 
        switch (stringset) {
        case ETH_SS_TEST:
@@ -322,29 +436,30 @@ static void mlx4_en_get_strings(struct net_device *dev,
 
        case ETH_SS_STATS:
                /* Add main counters */
-               if (!priv->stats_bitmap) {
-                       for (i = 0; i < NUM_MAIN_STATS; i++)
+               for (i = 0; i < NUM_MAIN_STATS; i++, strings++,
+                    bitmap_iterator_inc(&it))
+                       if (bitmap_iterator_test(&it))
+                               strcpy(data + (index++) * ETH_GSTRING_LEN,
+                                      main_strings[strings]);
+
+               for (i = 0; i < NUM_PORT_STATS; i++, strings++,
+                    bitmap_iterator_inc(&it))
+                       if (bitmap_iterator_test(&it))
                                strcpy(data + (index++) * ETH_GSTRING_LEN,
-                                       main_strings[i]);
-                       for (i = 0; i < NUM_PORT_STATS; i++)
+                                      main_strings[strings]);
+
+               for (i = 0; i < NUM_FLOW_STATS; i++, strings++,
+                    bitmap_iterator_inc(&it))
+                       if (bitmap_iterator_test(&it))
                                strcpy(data + (index++) * ETH_GSTRING_LEN,
-                                       main_strings[i +
-                                       NUM_MAIN_STATS]);
-                       for (i = 0; i < NUM_PKT_STATS; i++)
+                                      main_strings[strings]);
+
+               for (i = 0; i < NUM_PKT_STATS; i++, strings++,
+                    bitmap_iterator_inc(&it))
+                       if (bitmap_iterator_test(&it))
                                strcpy(data + (index++) * ETH_GSTRING_LEN,
-                                       main_strings[i +
-                                       NUM_MAIN_STATS +
-                                       NUM_PORT_STATS]);
-               } else
-                       for (i = 0; i < NUM_MAIN_STATS + NUM_PORT_STATS; i++) {
-                               if ((priv->stats_bitmap >> i) & 1) {
-                                       strcpy(data +
-                                              (index++) * ETH_GSTRING_LEN,
-                                              main_strings[i]);
-                               }
-                               if (!(priv->stats_bitmap >> i))
-                                       break;
-                       }
+                                      main_strings[strings]);
+
                for (i = 0; i < priv->tx_ring_num; i++) {
                        sprintf(data + (index++) * ETH_GSTRING_LEN,
                                "tx%d_packets", i);
@@ -885,6 +1000,12 @@ static int mlx4_en_set_pauseparam(struct net_device *dev,
                                    priv->prof->rx_ppp);
        if (err)
                en_err(priv, "Failed setting pause params\n");
+       else
+               mlx4_en_update_pfc_stats_bitmap(mdev->dev, &priv->stats_bitmap,
+                                               priv->prof->rx_ppp,
+                                               priv->prof->rx_pause,
+                                               priv->prof->tx_ppp,
+                                               priv->prof->tx_pause);
 
        return err;
 }
@@ -1818,6 +1939,32 @@ static int mlx4_en_get_module_eeprom(struct net_device *dev,
        return 0;
 }
 
+static int mlx4_en_set_phys_id(struct net_device *dev,
+                              enum ethtool_phys_id_state state)
+{
+       int err;
+       u16 beacon_duration;
+       struct mlx4_en_priv *priv = netdev_priv(dev);
+       struct mlx4_en_dev *mdev = priv->mdev;
+
+       if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PORT_BEACON))
+               return -EOPNOTSUPP;
+
+       switch (state) {
+       case ETHTOOL_ID_ACTIVE:
+               beacon_duration = PORT_BEACON_MAX_LIMIT;
+               break;
+       case ETHTOOL_ID_INACTIVE:
+               beacon_duration = 0;
+               break;
+       default:
+               return -EOPNOTSUPP;
+       }
+
+       err = mlx4_SET_PORT_BEACON(mdev->dev, priv->port, beacon_duration);
+       return err;
+}
+
 const struct ethtool_ops mlx4_en_ethtool_ops = {
        .get_drvinfo = mlx4_en_get_drvinfo,
        .get_settings = mlx4_en_get_settings,
@@ -1827,6 +1974,7 @@ const struct ethtool_ops mlx4_en_ethtool_ops = {
        .get_sset_count = mlx4_en_get_sset_count,
        .get_ethtool_stats = mlx4_en_get_ethtool_stats,
        .self_test = mlx4_en_self_test,
+       .set_phys_id = mlx4_en_set_phys_id,
        .get_wol = mlx4_en_get_wol,
        .set_wol = mlx4_en_set_wol,
        .get_msglevel = mlx4_en_get_msglevel,
index 58d5a07d0ff4da6397118fb4b3b051ac936ed95b..913b716ed2e141189a978af29ab5d54c1b606387 100644 (file)
@@ -103,6 +103,11 @@ void mlx4_en_update_loopback_state(struct net_device *dev,
 {
        struct mlx4_en_priv *priv = netdev_priv(dev);
 
+       if (features & NETIF_F_LOOPBACK)
+               priv->ctrl_flags |= cpu_to_be32(MLX4_WQE_CTRL_FORCE_LOOPBACK);
+       else
+               priv->ctrl_flags &= cpu_to_be32(~MLX4_WQE_CTRL_FORCE_LOOPBACK);
+
        priv->flags &= ~(MLX4_EN_FLAG_RX_FILTER_NEEDED|
                        MLX4_EN_FLAG_ENABLE_HW_LOOPBACK);
 
index 3485acf03014c7e4df64fa94a770fce00abe5a35..0f1afc085d580b34e0eda1eaa4b1cdc1737c71be 100644 (file)
@@ -1685,7 +1685,7 @@ int mlx4_en_start_port(struct net_device *dev)
        }
 
        /* Attach rx QP to bradcast address */
-       memset(&mc_list[10], 0xff, ETH_ALEN);
+       eth_broadcast_addr(&mc_list[10]);
        mc_list[5] = priv->port; /* needed for B0 steering support */
        if (mlx4_multicast_attach(mdev->dev, &priv->rss_map.indir_qp, mc_list,
                                  priv->port, 0, MLX4_PROT_ETH,
@@ -1786,7 +1786,7 @@ void mlx4_en_stop_port(struct net_device *dev, int detach)
        }
 
        /* Detach All multicasts */
-       memset(&mc_list[10], 0xff, ETH_ALEN);
+       eth_broadcast_addr(&mc_list[10]);
        mc_list[5] = priv->port; /* needed for B0 steering support */
        mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp, mc_list,
                              MLX4_PROT_ETH, priv->broadcast_id);
@@ -1888,6 +1888,12 @@ static void mlx4_en_clear_stats(struct net_device *dev)
        memset(&priv->pstats, 0, sizeof(priv->pstats));
        memset(&priv->pkstats, 0, sizeof(priv->pkstats));
        memset(&priv->port_stats, 0, sizeof(priv->port_stats));
+       memset(&priv->rx_flowstats, 0, sizeof(priv->rx_flowstats));
+       memset(&priv->tx_flowstats, 0, sizeof(priv->tx_flowstats));
+       memset(&priv->rx_priority_flowstats, 0,
+              sizeof(priv->rx_priority_flowstats));
+       memset(&priv->tx_priority_flowstats, 0,
+              sizeof(priv->tx_priority_flowstats));
 
        for (i = 0; i < priv->tx_ring_num; i++) {
                priv->tx_ring[i]->bytes = 0;
@@ -2189,31 +2195,50 @@ static int mlx4_en_set_features(struct net_device *netdev,
                netdev_features_t features)
 {
        struct mlx4_en_priv *priv = netdev_priv(netdev);
+       bool reset = false;
        int ret = 0;
 
+       if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_RXFCS)) {
+               en_info(priv, "Turn %s RX-FCS\n",
+                       (features & NETIF_F_RXFCS) ? "ON" : "OFF");
+               reset = true;
+       }
+
+       if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_RXALL)) {
+               u8 ignore_fcs_value = (features & NETIF_F_RXALL) ? 1 : 0;
+
+               en_info(priv, "Turn %s RX-ALL\n",
+                       ignore_fcs_value ? "ON" : "OFF");
+               ret = mlx4_SET_PORT_fcs_check(priv->mdev->dev,
+                                             priv->port, ignore_fcs_value);
+               if (ret)
+                       return ret;
+       }
+
        if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_HW_VLAN_CTAG_RX)) {
                en_info(priv, "Turn %s RX vlan strip offload\n",
                        (features & NETIF_F_HW_VLAN_CTAG_RX) ? "ON" : "OFF");
-               ret = mlx4_en_reset_config(netdev, priv->hwtstamp_config,
-                                          features);
-               if (ret)
-                       return ret;
+               reset = true;
        }
 
        if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_HW_VLAN_CTAG_TX))
                en_info(priv, "Turn %s TX vlan strip offload\n",
                        (features & NETIF_F_HW_VLAN_CTAG_TX) ? "ON" : "OFF");
 
-       if (features & NETIF_F_LOOPBACK)
-               priv->ctrl_flags |= cpu_to_be32(MLX4_WQE_CTRL_FORCE_LOOPBACK);
-       else
-               priv->ctrl_flags &=
-                       cpu_to_be32(~MLX4_WQE_CTRL_FORCE_LOOPBACK);
+       if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_LOOPBACK)) {
+               en_info(priv, "Turn %s loopback\n",
+                       (features & NETIF_F_LOOPBACK) ? "ON" : "OFF");
+               mlx4_en_update_loopback_state(netdev, features);
+       }
 
-       mlx4_en_update_loopback_state(netdev, features);
+       if (reset) {
+               ret = mlx4_en_reset_config(netdev, priv->hwtstamp_config,
+                                          features);
+               if (ret)
+                       return ret;
+       }
 
        return 0;
-
 }
 
 static int mlx4_en_set_vf_mac(struct net_device *dev, int queue, u8 *mac)
@@ -2236,6 +2261,16 @@ static int mlx4_en_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos)
        return mlx4_set_vf_vlan(mdev->dev, en_priv->port, vf, vlan, qos);
 }
 
+static int mlx4_en_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate,
+                              int max_tx_rate)
+{
+       struct mlx4_en_priv *en_priv = netdev_priv(dev);
+       struct mlx4_en_dev *mdev = en_priv->mdev;
+
+       return mlx4_set_vf_rate(mdev->dev, en_priv->port, vf, min_tx_rate,
+                               max_tx_rate);
+}
+
 static int mlx4_en_set_vf_spoofchk(struct net_device *dev, int vf, bool setting)
 {
        struct mlx4_en_priv *en_priv = netdev_priv(dev);
@@ -2373,10 +2408,38 @@ static netdev_features_t mlx4_en_features_check(struct sk_buff *skb,
                                                struct net_device *dev,
                                                netdev_features_t features)
 {
+       features = vlan_features_check(skb, features);
        return vxlan_features_check(skb, features);
 }
 #endif
 
+static int mlx4_en_set_tx_maxrate(struct net_device *dev, int queue_index, u32 maxrate)
+{
+       struct mlx4_en_priv *priv = netdev_priv(dev);
+       struct mlx4_en_tx_ring *tx_ring = priv->tx_ring[queue_index];
+       struct mlx4_update_qp_params params;
+       int err;
+
+       if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QP_RATE_LIMIT))
+               return -EOPNOTSUPP;
+
+       /* rate provided to us in Mbs, check if it fits into 12 bits, if not use Gbs */
+       if (maxrate >> 12) {
+               params.rate_unit = MLX4_QP_RATE_LIMIT_GBS;
+               params.rate_val  = maxrate / 1000;
+       } else if (maxrate) {
+               params.rate_unit = MLX4_QP_RATE_LIMIT_MBS;
+               params.rate_val  = maxrate;
+       } else { /* zero serves to revoke the QP rate-limitation */
+               params.rate_unit = 0;
+               params.rate_val  = 0;
+       }
+
+       err = mlx4_update_qp(priv->mdev->dev, tx_ring->qpn, MLX4_UPDATE_QP_RATE_LIMIT,
+                            &params);
+       return err;
+}
+
 static const struct net_device_ops mlx4_netdev_ops = {
        .ndo_open               = mlx4_en_open,
        .ndo_stop               = mlx4_en_close,
@@ -2408,6 +2471,7 @@ static const struct net_device_ops mlx4_netdev_ops = {
        .ndo_del_vxlan_port     = mlx4_en_del_vxlan_port,
        .ndo_features_check     = mlx4_en_features_check,
 #endif
+       .ndo_set_tx_maxrate     = mlx4_en_set_tx_maxrate,
 };
 
 static const struct net_device_ops mlx4_netdev_ops_master = {
@@ -2425,6 +2489,7 @@ static const struct net_device_ops mlx4_netdev_ops_master = {
        .ndo_vlan_rx_kill_vid   = mlx4_en_vlan_rx_kill_vid,
        .ndo_set_vf_mac         = mlx4_en_set_vf_mac,
        .ndo_set_vf_vlan        = mlx4_en_set_vf_vlan,
+       .ndo_set_vf_rate        = mlx4_en_set_vf_rate,
        .ndo_set_vf_spoofchk    = mlx4_en_set_vf_spoofchk,
        .ndo_set_vf_link_state  = mlx4_en_set_vf_link_state,
        .ndo_get_vf_config      = mlx4_en_get_vf_config,
@@ -2442,6 +2507,7 @@ static const struct net_device_ops mlx4_netdev_ops_master = {
        .ndo_del_vxlan_port     = mlx4_en_del_vxlan_port,
        .ndo_features_check     = mlx4_en_features_check,
 #endif
+       .ndo_set_tx_maxrate     = mlx4_en_set_tx_maxrate,
 };
 
 struct mlx4_en_bond {
@@ -2618,6 +2684,82 @@ int mlx4_en_netdev_event(struct notifier_block *this,
        return NOTIFY_DONE;
 }
 
+void mlx4_en_update_pfc_stats_bitmap(struct mlx4_dev *dev,
+                                    struct mlx4_en_stats_bitmap *stats_bitmap,
+                                    u8 rx_ppp, u8 rx_pause,
+                                    u8 tx_ppp, u8 tx_pause)
+{
+       int last_i = NUM_MAIN_STATS + NUM_PORT_STATS;
+
+       if (!mlx4_is_slave(dev) &&
+           (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FLOWSTATS_EN)) {
+               mutex_lock(&stats_bitmap->mutex);
+               bitmap_clear(stats_bitmap->bitmap, last_i, NUM_FLOW_STATS);
+
+               if (rx_ppp)
+                       bitmap_set(stats_bitmap->bitmap, last_i,
+                                  NUM_FLOW_PRIORITY_STATS_RX);
+               last_i += NUM_FLOW_PRIORITY_STATS_RX;
+
+               if (rx_pause && !(rx_ppp))
+                       bitmap_set(stats_bitmap->bitmap, last_i,
+                                  NUM_FLOW_STATS_RX);
+               last_i += NUM_FLOW_STATS_RX;
+
+               if (tx_ppp)
+                       bitmap_set(stats_bitmap->bitmap, last_i,
+                                  NUM_FLOW_PRIORITY_STATS_TX);
+               last_i += NUM_FLOW_PRIORITY_STATS_TX;
+
+               if (tx_pause && !(tx_ppp))
+                       bitmap_set(stats_bitmap->bitmap, last_i,
+                                  NUM_FLOW_STATS_TX);
+               last_i += NUM_FLOW_STATS_TX;
+
+               mutex_unlock(&stats_bitmap->mutex);
+       }
+}
+
+void mlx4_en_set_stats_bitmap(struct mlx4_dev *dev,
+                             struct mlx4_en_stats_bitmap *stats_bitmap,
+                             u8 rx_ppp, u8 rx_pause,
+                             u8 tx_ppp, u8 tx_pause)
+{
+       int last_i = 0;
+
+       mutex_init(&stats_bitmap->mutex);
+       bitmap_zero(stats_bitmap->bitmap, NUM_ALL_STATS);
+
+       if (mlx4_is_slave(dev)) {
+               bitmap_set(stats_bitmap->bitmap, last_i +
+                                        MLX4_FIND_NETDEV_STAT(rx_packets), 1);
+               bitmap_set(stats_bitmap->bitmap, last_i +
+                                        MLX4_FIND_NETDEV_STAT(tx_packets), 1);
+               bitmap_set(stats_bitmap->bitmap, last_i +
+                                        MLX4_FIND_NETDEV_STAT(rx_bytes), 1);
+               bitmap_set(stats_bitmap->bitmap, last_i +
+                                        MLX4_FIND_NETDEV_STAT(tx_bytes), 1);
+               bitmap_set(stats_bitmap->bitmap, last_i +
+                                        MLX4_FIND_NETDEV_STAT(rx_dropped), 1);
+               bitmap_set(stats_bitmap->bitmap, last_i +
+                                        MLX4_FIND_NETDEV_STAT(tx_dropped), 1);
+       } else {
+               bitmap_set(stats_bitmap->bitmap, last_i, NUM_MAIN_STATS);
+       }
+       last_i += NUM_MAIN_STATS;
+
+       bitmap_set(stats_bitmap->bitmap, last_i, NUM_PORT_STATS);
+       last_i += NUM_PORT_STATS;
+
+       mlx4_en_update_pfc_stats_bitmap(dev, stats_bitmap,
+                                       rx_ppp, rx_pause,
+                                       tx_ppp, tx_pause);
+       last_i += NUM_FLOW_STATS;
+
+       if (!mlx4_is_slave(dev))
+               bitmap_set(stats_bitmap->bitmap, last_i, NUM_PKT_STATS);
+}
+
 int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
                        struct mlx4_en_port_profile *prof)
 {
@@ -2693,7 +2835,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
        priv->msg_enable = MLX4_EN_MSG_LEVEL;
 #ifdef CONFIG_MLX4_EN_DCB
        if (!mlx4_is_slave(priv->mdev->dev)) {
-               if (mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_SET_ETH_SCHED) {
+               if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETS_CFG) {
                        dev->dcbnl_ops = &mlx4_en_dcbnl_ops;
                } else {
                        en_info(priv, "enabling only PFC DCB ops\n");
@@ -2780,6 +2922,12 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
        dev->hw_features |= NETIF_F_LOOPBACK |
                        NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
 
+       if (mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_FCS_KEEP)
+               dev->hw_features |= NETIF_F_RXFCS;
+
+       if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_IGNORE_FCS)
+               dev->hw_features |= NETIF_F_RXALL;
+
        if (mdev->dev->caps.steering_mode ==
            MLX4_STEERING_MODE_DEVICE_MANAGED &&
            mdev->dev->caps.dmfs_high_steer_mode != MLX4_STEERING_DMFS_A0_STATIC)
@@ -2844,7 +2992,11 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
                queue_delayed_work(mdev->workqueue, &priv->service_task,
                                   SERVICE_TASK_DELAY);
 
-       mlx4_set_stats_bitmap(mdev->dev, &priv->stats_bitmap);
+       mlx4_en_set_stats_bitmap(mdev->dev, &priv->stats_bitmap,
+                                mdev->profile.prof[priv->port].rx_ppp,
+                                mdev->profile.prof[priv->port].rx_pause,
+                                mdev->profile.prof[priv->port].tx_ppp,
+                                mdev->profile.prof[priv->port].tx_pause);
 
        err = register_netdev(dev);
        if (err) {
@@ -2872,7 +3024,8 @@ int mlx4_en_reset_config(struct net_device *dev,
 
        if (priv->hwtstamp_config.tx_type == ts_config.tx_type &&
            priv->hwtstamp_config.rx_filter == ts_config.rx_filter &&
-           !DEV_FEATURE_CHANGED(dev, features, NETIF_F_HW_VLAN_CTAG_RX))
+           !DEV_FEATURE_CHANGED(dev, features, NETIF_F_HW_VLAN_CTAG_RX) &&
+           !DEV_FEATURE_CHANGED(dev, features, NETIF_F_RXFCS))
                return 0; /* Nothing to change */
 
        if (DEV_FEATURE_CHANGED(dev, features, NETIF_F_HW_VLAN_CTAG_RX) &&
@@ -2911,6 +3064,13 @@ int mlx4_en_reset_config(struct net_device *dev,
                        dev->features &= ~NETIF_F_HW_VLAN_CTAG_RX;
        }
 
+       if (DEV_FEATURE_CHANGED(dev, features, NETIF_F_RXFCS)) {
+               if (features & NETIF_F_RXFCS)
+                       dev->features |= NETIF_F_RXFCS;
+               else
+                       dev->features &= ~NETIF_F_RXFCS;
+       }
+
        /* RX vlan offload and RX time-stamping can't co-exist !
         * Regardless of the caller's choice,
         * Turn Off RX vlan offload in case of time-stamping is ON
index 6cb80072af6c16b33f832a06a67d7e379ecb2f1f..54f0e5ab2e55ca87dc66a2ef8b4e27062a634ce4 100644 (file)
@@ -128,9 +128,29 @@ out:
        return err;
 }
 
+/* Each counter set is located in struct mlx4_en_stat_out_mbox
+ * with a const offset between its prio components.
+ * This function runs over a counter set and sum all of it's prio components.
+ */
+static unsigned long en_stats_adder(__be64 *start, __be64 *next, int num)
+{
+       __be64 *curr = start;
+       unsigned long ret = 0;
+       int i;
+       int offset = next - start;
+
+       for (i = 0; i <= num; i++) {
+               ret += be64_to_cpu(*curr);
+               curr += offset;
+       }
+
+       return ret;
+}
+
 int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
 {
        struct mlx4_en_stat_out_mbox *mlx4_en_stats;
+       struct mlx4_en_stat_out_flow_control_mbox *flowstats;
        struct mlx4_en_priv *priv = netdev_priv(mdev->pndev[port]);
        struct net_device_stats *stats = &priv->stats;
        struct mlx4_cmd_mailbox *mailbox;
@@ -183,22 +203,25 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
                priv->port_stats.xmit_more         += ring->xmit_more;
        }
 
+       /* net device stats */
        stats->rx_errors = be64_to_cpu(mlx4_en_stats->PCS) +
-                          be32_to_cpu(mlx4_en_stats->RdropLength) +
                           be32_to_cpu(mlx4_en_stats->RJBBR) +
                           be32_to_cpu(mlx4_en_stats->RCRC) +
-                          be32_to_cpu(mlx4_en_stats->RRUNT);
-       stats->tx_errors = be32_to_cpu(mlx4_en_stats->TDROP);
-       stats->multicast = be64_to_cpu(mlx4_en_stats->MCAST_prio_0) +
-                          be64_to_cpu(mlx4_en_stats->MCAST_prio_1) +
-                          be64_to_cpu(mlx4_en_stats->MCAST_prio_2) +
-                          be64_to_cpu(mlx4_en_stats->MCAST_prio_3) +
-                          be64_to_cpu(mlx4_en_stats->MCAST_prio_4) +
-                          be64_to_cpu(mlx4_en_stats->MCAST_prio_5) +
-                          be64_to_cpu(mlx4_en_stats->MCAST_prio_6) +
-                          be64_to_cpu(mlx4_en_stats->MCAST_prio_7) +
-                          be64_to_cpu(mlx4_en_stats->MCAST_novlan);
+                          be32_to_cpu(mlx4_en_stats->RRUNT) +
+                          be64_to_cpu(mlx4_en_stats->RInRangeLengthErr) +
+                          be64_to_cpu(mlx4_en_stats->ROutRangeLengthErr) +
+                          be32_to_cpu(mlx4_en_stats->RSHORT) +
+                          en_stats_adder(&mlx4_en_stats->RGIANT_prio_0,
+                                         &mlx4_en_stats->RGIANT_prio_1,
+                                         NUM_PRIORITIES);
+       stats->tx_errors = en_stats_adder(&mlx4_en_stats->TGIANT_prio_0,
+                                         &mlx4_en_stats->TGIANT_prio_1,
+                                         NUM_PRIORITIES);
+       stats->multicast = en_stats_adder(&mlx4_en_stats->MCAST_prio_0,
+                                         &mlx4_en_stats->MCAST_prio_1,
+                                         NUM_PRIORITIES);
        stats->collisions = 0;
+       stats->rx_dropped = be32_to_cpu(mlx4_en_stats->RDROP);
        stats->rx_length_errors = be32_to_cpu(mlx4_en_stats->RdropLength);
        stats->rx_over_errors = be32_to_cpu(mlx4_en_stats->RdropOvflw);
        stats->rx_crc_errors = be32_to_cpu(mlx4_en_stats->RCRC);
@@ -210,33 +233,116 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
        stats->tx_fifo_errors = 0;
        stats->tx_heartbeat_errors = 0;
        stats->tx_window_errors = 0;
+       stats->tx_dropped = be32_to_cpu(mlx4_en_stats->TDROP);
+
+       /* RX stats */
+       priv->pkstats.rx_multicast_packets = stats->multicast;
+       priv->pkstats.rx_broadcast_packets =
+                       en_stats_adder(&mlx4_en_stats->RBCAST_prio_0,
+                                      &mlx4_en_stats->RBCAST_prio_1,
+                                      NUM_PRIORITIES);
+       priv->pkstats.rx_jabbers = be32_to_cpu(mlx4_en_stats->RJBBR);
+       priv->pkstats.rx_in_range_length_error =
+               be64_to_cpu(mlx4_en_stats->RInRangeLengthErr);
+       priv->pkstats.rx_out_range_length_error =
+               be64_to_cpu(mlx4_en_stats->ROutRangeLengthErr);
+
+       /* Tx stats */
+       priv->pkstats.tx_multicast_packets =
+               en_stats_adder(&mlx4_en_stats->TMCAST_prio_0,
+                              &mlx4_en_stats->TMCAST_prio_1,
+                              NUM_PRIORITIES);
+       priv->pkstats.tx_broadcast_packets =
+               en_stats_adder(&mlx4_en_stats->TBCAST_prio_0,
+                              &mlx4_en_stats->TBCAST_prio_1,
+                              NUM_PRIORITIES);
+
+       priv->pkstats.rx_prio[0][0] = be64_to_cpu(mlx4_en_stats->RTOT_prio_0);
+       priv->pkstats.rx_prio[0][1] = be64_to_cpu(mlx4_en_stats->ROCT_prio_0);
+       priv->pkstats.rx_prio[1][0] = be64_to_cpu(mlx4_en_stats->RTOT_prio_1);
+       priv->pkstats.rx_prio[1][1] = be64_to_cpu(mlx4_en_stats->ROCT_prio_1);
+       priv->pkstats.rx_prio[2][0] = be64_to_cpu(mlx4_en_stats->RTOT_prio_2);
+       priv->pkstats.rx_prio[2][1] = be64_to_cpu(mlx4_en_stats->ROCT_prio_2);
+       priv->pkstats.rx_prio[3][0] = be64_to_cpu(mlx4_en_stats->RTOT_prio_3);
+       priv->pkstats.rx_prio[3][1] = be64_to_cpu(mlx4_en_stats->ROCT_prio_3);
+       priv->pkstats.rx_prio[4][0] = be64_to_cpu(mlx4_en_stats->RTOT_prio_4);
+       priv->pkstats.rx_prio[4][1] = be64_to_cpu(mlx4_en_stats->ROCT_prio_4);
+       priv->pkstats.rx_prio[5][0] = be64_to_cpu(mlx4_en_stats->RTOT_prio_5);
+       priv->pkstats.rx_prio[5][1] = be64_to_cpu(mlx4_en_stats->ROCT_prio_5);
+       priv->pkstats.rx_prio[6][0] = be64_to_cpu(mlx4_en_stats->RTOT_prio_6);
+       priv->pkstats.rx_prio[6][1] = be64_to_cpu(mlx4_en_stats->ROCT_prio_6);
+       priv->pkstats.rx_prio[7][0] = be64_to_cpu(mlx4_en_stats->RTOT_prio_7);
+       priv->pkstats.rx_prio[7][1] = be64_to_cpu(mlx4_en_stats->ROCT_prio_7);
+       priv->pkstats.rx_prio[8][0] = be64_to_cpu(mlx4_en_stats->RTOT_novlan);
+       priv->pkstats.rx_prio[8][1] = be64_to_cpu(mlx4_en_stats->ROCT_novlan);
+       priv->pkstats.tx_prio[0][0] = be64_to_cpu(mlx4_en_stats->TTOT_prio_0);
+       priv->pkstats.tx_prio[0][1] = be64_to_cpu(mlx4_en_stats->TOCT_prio_0);
+       priv->pkstats.tx_prio[1][0] = be64_to_cpu(mlx4_en_stats->TTOT_prio_1);
+       priv->pkstats.tx_prio[1][1] = be64_to_cpu(mlx4_en_stats->TOCT_prio_1);
+       priv->pkstats.tx_prio[2][0] = be64_to_cpu(mlx4_en_stats->TTOT_prio_2);
+       priv->pkstats.tx_prio[2][1] = be64_to_cpu(mlx4_en_stats->TOCT_prio_2);
+       priv->pkstats.tx_prio[3][0] = be64_to_cpu(mlx4_en_stats->TTOT_prio_3);
+       priv->pkstats.tx_prio[3][1] = be64_to_cpu(mlx4_en_stats->TOCT_prio_3);
+       priv->pkstats.tx_prio[4][0] = be64_to_cpu(mlx4_en_stats->TTOT_prio_4);
+       priv->pkstats.tx_prio[4][1] = be64_to_cpu(mlx4_en_stats->TOCT_prio_4);
+       priv->pkstats.tx_prio[5][0] = be64_to_cpu(mlx4_en_stats->TTOT_prio_5);
+       priv->pkstats.tx_prio[5][1] = be64_to_cpu(mlx4_en_stats->TOCT_prio_5);
+       priv->pkstats.tx_prio[6][0] = be64_to_cpu(mlx4_en_stats->TTOT_prio_6);
+       priv->pkstats.tx_prio[6][1] = be64_to_cpu(mlx4_en_stats->TOCT_prio_6);
+       priv->pkstats.tx_prio[7][0] = be64_to_cpu(mlx4_en_stats->TTOT_prio_7);
+       priv->pkstats.tx_prio[7][1] = be64_to_cpu(mlx4_en_stats->TOCT_prio_7);
+       priv->pkstats.tx_prio[8][0] = be64_to_cpu(mlx4_en_stats->TTOT_novlan);
+       priv->pkstats.tx_prio[8][1] = be64_to_cpu(mlx4_en_stats->TOCT_novlan);
+
+       spin_unlock_bh(&priv->stats_lock);
+
+       /* 0xffs indicates invalid value */
+       memset(mailbox->buf, 0xff, sizeof(*flowstats) * MLX4_NUM_PRIORITIES);
+
+       if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FLOWSTATS_EN) {
+               memset(mailbox->buf, 0,
+                      sizeof(*flowstats) * MLX4_NUM_PRIORITIES);
+               err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma,
+                                  in_mod | MLX4_DUMP_ETH_STATS_FLOW_CONTROL,
+                                  0, MLX4_CMD_DUMP_ETH_STATS,
+                                  MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED);
+               if (err)
+                       goto out;
+       }
+
+       flowstats = mailbox->buf;
+
+       spin_lock_bh(&priv->stats_lock);
+
+       for (i = 0; i < MLX4_NUM_PRIORITIES; i++)       {
+               priv->rx_priority_flowstats[i].rx_pause =
+                       be64_to_cpu(flowstats[i].rx_pause);
+               priv->rx_priority_flowstats[i].rx_pause_duration =
+                       be64_to_cpu(flowstats[i].rx_pause_duration);
+               priv->rx_priority_flowstats[i].rx_pause_transition =
+                       be64_to_cpu(flowstats[i].rx_pause_transition);
+               priv->tx_priority_flowstats[i].tx_pause =
+                       be64_to_cpu(flowstats[i].tx_pause);
+               priv->tx_priority_flowstats[i].tx_pause_duration =
+                       be64_to_cpu(flowstats[i].tx_pause_duration);
+               priv->tx_priority_flowstats[i].tx_pause_transition =
+                       be64_to_cpu(flowstats[i].tx_pause_transition);
+       }
+
+       /* if pfc is not in use, all priorities counters have the same value */
+       priv->rx_flowstats.rx_pause =
+               be64_to_cpu(flowstats[0].rx_pause);
+       priv->rx_flowstats.rx_pause_duration =
+               be64_to_cpu(flowstats[0].rx_pause_duration);
+       priv->rx_flowstats.rx_pause_transition =
+               be64_to_cpu(flowstats[0].rx_pause_transition);
+       priv->tx_flowstats.tx_pause =
+               be64_to_cpu(flowstats[0].tx_pause);
+       priv->tx_flowstats.tx_pause_duration =
+               be64_to_cpu(flowstats[0].tx_pause_duration);
+       priv->tx_flowstats.tx_pause_transition =
+               be64_to_cpu(flowstats[0].tx_pause_transition);
 
-       priv->pkstats.broadcast =
-                               be64_to_cpu(mlx4_en_stats->RBCAST_prio_0) +
-                               be64_to_cpu(mlx4_en_stats->RBCAST_prio_1) +
-                               be64_to_cpu(mlx4_en_stats->RBCAST_prio_2) +
-                               be64_to_cpu(mlx4_en_stats->RBCAST_prio_3) +
-                               be64_to_cpu(mlx4_en_stats->RBCAST_prio_4) +
-                               be64_to_cpu(mlx4_en_stats->RBCAST_prio_5) +
-                               be64_to_cpu(mlx4_en_stats->RBCAST_prio_6) +
-                               be64_to_cpu(mlx4_en_stats->RBCAST_prio_7) +
-                               be64_to_cpu(mlx4_en_stats->RBCAST_novlan);
-       priv->pkstats.rx_prio[0] = be64_to_cpu(mlx4_en_stats->RTOT_prio_0);
-       priv->pkstats.rx_prio[1] = be64_to_cpu(mlx4_en_stats->RTOT_prio_1);
-       priv->pkstats.rx_prio[2] = be64_to_cpu(mlx4_en_stats->RTOT_prio_2);
-       priv->pkstats.rx_prio[3] = be64_to_cpu(mlx4_en_stats->RTOT_prio_3);
-       priv->pkstats.rx_prio[4] = be64_to_cpu(mlx4_en_stats->RTOT_prio_4);
-       priv->pkstats.rx_prio[5] = be64_to_cpu(mlx4_en_stats->RTOT_prio_5);
-       priv->pkstats.rx_prio[6] = be64_to_cpu(mlx4_en_stats->RTOT_prio_6);
-       priv->pkstats.rx_prio[7] = be64_to_cpu(mlx4_en_stats->RTOT_prio_7);
-       priv->pkstats.tx_prio[0] = be64_to_cpu(mlx4_en_stats->TTOT_prio_0);
-       priv->pkstats.tx_prio[1] = be64_to_cpu(mlx4_en_stats->TTOT_prio_1);
-       priv->pkstats.tx_prio[2] = be64_to_cpu(mlx4_en_stats->TTOT_prio_2);
-       priv->pkstats.tx_prio[3] = be64_to_cpu(mlx4_en_stats->TTOT_prio_3);
-       priv->pkstats.tx_prio[4] = be64_to_cpu(mlx4_en_stats->TTOT_prio_4);
-       priv->pkstats.tx_prio[5] = be64_to_cpu(mlx4_en_stats->TTOT_prio_5);
-       priv->pkstats.tx_prio[6] = be64_to_cpu(mlx4_en_stats->TTOT_prio_6);
-       priv->pkstats.tx_prio[7] = be64_to_cpu(mlx4_en_stats->TTOT_prio_7);
        spin_unlock_bh(&priv->stats_lock);
 
 out:
index 698d60de1255269c11363c0196fd16800d5c4f13..79b1501e7951fc18a97caf3c3f127a50c6c3e256 100644 (file)
@@ -1116,7 +1116,10 @@ static int mlx4_en_config_rss_qp(struct mlx4_en_priv *priv, int qpn,
        /* Cancel FCS removal if FW allows */
        if (mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_FCS_KEEP) {
                context->param3 |= cpu_to_be32(1 << 29);
-               ring->fcs_del = ETH_FCS_LEN;
+               if (priv->dev->features & NETIF_F_RXFCS)
+                       ring->fcs_del = 0;
+               else
+                       ring->fcs_del = ETH_FCS_LEN;
        } else
                ring->fcs_del = 0;
 
index a61009f4b2df728e05a4e54def4bb1336949f03e..b66e03d9711f945fe06827ed480258a78ce26833 100644 (file)
@@ -66,7 +66,7 @@ static int mlx4_en_test_loopback_xmit(struct mlx4_en_priv *priv)
        ethh = (struct ethhdr *)skb_put(skb, sizeof(struct ethhdr));
        packet  = (unsigned char *)skb_put(skb, packet_size);
        memcpy(ethh->h_dest, priv->dev->dev_addr, ETH_ALEN);
-       memset(ethh->h_source, 0, ETH_ALEN);
+       eth_zero_addr(ethh->h_source);
        ethh->h_proto = htons(ETH_P_ARP);
        skb_set_mac_header(skb, 0);
        for (i = 0; i < packet_size; ++i)       /* fill our packet */
index 5a21e5dc94cbae7f8c35d989aba039afcb5c4f77..b9881fc1252fab863cb184a633113ac7307a4467 100644 (file)
@@ -49,9 +49,9 @@ enum {
 extern void __buggy_use_of_MLX4_GET(void);
 extern void __buggy_use_of_MLX4_PUT(void);
 
-static bool enable_qos;
+static bool enable_qos = true;
 module_param(enable_qos, bool, 0444);
-MODULE_PARM_DESC(enable_qos, "Enable Quality of Service support in the HCA (default: off)");
+MODULE_PARM_DESC(enable_qos, "Enable Enhanced QoS support (default: on)");
 
 #define MLX4_GET(dest, source, offset)                               \
        do {                                                          \
@@ -105,6 +105,7 @@ static void dump_dev_cap_flags(struct mlx4_dev *dev, u64 flags)
                [41] = "Unicast VEP steering support",
                [42] = "Multicast VEP steering support",
                [48] = "Counters support",
+               [52] = "RSS IP fragments support",
                [53] = "Port ETS Scheduler support",
                [55] = "Port link type sensing support",
                [59] = "Port management change event support",
@@ -143,7 +144,14 @@ static void dump_dev_cap_flags2(struct mlx4_dev *dev, u64 flags)
                [18] = "More than 80 VFs support",
                [19] = "Performance optimized for limited rule configuration flow steering support",
                [20] = "Recoverable error events support",
-               [21] = "Port Remap support"
+               [21] = "Port Remap support",
+               [22] = "QCN support",
+               [23] = "QP rate limiting support",
+               [24] = "Ethernet Flow control statistics support",
+               [25] = "Granular QoS per VF support",
+               [26] = "Port ETS Scheduler support",
+               [27] = "Port beacon support",
+               [28] = "RX-ALL support",
        };
        int i;
 
@@ -641,6 +649,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
 #define QUERY_DEV_CAP_RSS_OFFSET               0x2e
 #define QUERY_DEV_CAP_MAX_RDMA_OFFSET          0x2f
 #define QUERY_DEV_CAP_RSZ_SRQ_OFFSET           0x33
+#define QUERY_DEV_CAP_PORT_BEACON_OFFSET       0x34
 #define QUERY_DEV_CAP_ACK_DELAY_OFFSET         0x35
 #define QUERY_DEV_CAP_MTU_WIDTH_OFFSET         0x36
 #define QUERY_DEV_CAP_VL_PORT_OFFSET           0x37
@@ -670,12 +679,13 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
 #define QUERY_DEV_CAP_RSVD_XRC_OFFSET          0x66
 #define QUERY_DEV_CAP_MAX_XRC_OFFSET           0x67
 #define QUERY_DEV_CAP_MAX_COUNTERS_OFFSET      0x68
+#define QUERY_DEV_CAP_PORT_FLOWSTATS_COUNTERS_OFFSET   0x70
 #define QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET       0x70
 #define QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET       0x74
 #define QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET    0x76
 #define QUERY_DEV_CAP_FLOW_STEERING_MAX_QP_OFFSET      0x77
 #define QUERY_DEV_CAP_CQ_EQ_CACHE_LINE_STRIDE  0x7a
-#define QUERY_DEV_CAP_ETH_PROT_CTRL_OFFSET     0x7a
+#define QUERY_DEV_CAP_ECN_QCN_VER_OFFSET       0x7b
 #define QUERY_DEV_CAP_RDMARC_ENTRY_SZ_OFFSET   0x80
 #define QUERY_DEV_CAP_QPC_ENTRY_SZ_OFFSET      0x82
 #define QUERY_DEV_CAP_AUX_ENTRY_SZ_OFFSET      0x84
@@ -696,6 +706,10 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
 #define QUERY_DEV_CAP_MAD_DEMUX_OFFSET         0xb0
 #define QUERY_DEV_CAP_DMFS_HIGH_RATE_QPN_BASE_OFFSET   0xa8
 #define QUERY_DEV_CAP_DMFS_HIGH_RATE_QPN_RANGE_OFFSET  0xac
+#define QUERY_DEV_CAP_QP_RATE_LIMIT_NUM_OFFSET 0xcc
+#define QUERY_DEV_CAP_QP_RATE_LIMIT_MAX_OFFSET 0xd0
+#define QUERY_DEV_CAP_QP_RATE_LIMIT_MIN_OFFSET 0xd2
+
 
        dev_cap->flags2 = 0;
        mailbox = mlx4_alloc_cmd_mailbox(dev);
@@ -767,16 +781,25 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
        MLX4_GET(field, outbox, QUERY_DEV_CAP_VL_PORT_OFFSET);
        dev_cap->num_ports = field & 0xf;
        MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MSG_SZ_OFFSET);
+       MLX4_GET(field, outbox, QUERY_DEV_CAP_PORT_FLOWSTATS_COUNTERS_OFFSET);
+       if (field & 0x10)
+               dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_FLOWSTATS_EN;
        dev_cap->max_msg_sz = 1 << (field & 0x1f);
        MLX4_GET(field, outbox, QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET);
        if (field & 0x80)
                dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_FS_EN;
        dev_cap->fs_log_max_ucast_qp_range_size = field & 0x1f;
+       MLX4_GET(field, outbox, QUERY_DEV_CAP_PORT_BEACON_OFFSET);
+       if (field & 0x80)
+               dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_PORT_BEACON;
        MLX4_GET(field, outbox, QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET);
        if (field & 0x80)
                dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_DMFS_IPOIB;
        MLX4_GET(field, outbox, QUERY_DEV_CAP_FLOW_STEERING_MAX_QP_OFFSET);
        dev_cap->fs_max_num_qp_per_entry = field;
+       MLX4_GET(field, outbox, QUERY_DEV_CAP_ECN_QCN_VER_OFFSET);
+       if (field & 0x1)
+               dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_QCN;
        MLX4_GET(stat_rate, outbox, QUERY_DEV_CAP_RATE_SUPPORT_OFFSET);
        dev_cap->stat_rate_support = stat_rate;
        MLX4_GET(field, outbox, QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET);
@@ -856,6 +879,8 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
        MLX4_GET(size, outbox, QUERY_DEV_CAP_MAX_DESC_SZ_RQ_OFFSET);
        dev_cap->max_rq_desc_sz = size;
        MLX4_GET(field, outbox, QUERY_DEV_CAP_CQ_EQ_CACHE_LINE_STRIDE);
+       if (field & (1 << 4))
+               dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_QOS_VPP;
        if (field & (1 << 5))
                dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_ETH_PROT_CTRL;
        if (field & (1 << 6))
@@ -869,6 +894,8 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
        MLX4_GET(field, outbox, QUERY_DEV_CAP_CONFIG_DEV_OFFSET);
        if (field & 0x20)
                dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_CONFIG_DEV;
+       if (field & (1 << 2))
+               dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_IGNORE_FCS;
        MLX4_GET(dev_cap->reserved_lkey, outbox,
                 QUERY_DEV_CAP_RSVD_LKEY_OFFSET);
        MLX4_GET(field32, outbox, QUERY_DEV_CAP_ETH_BACKPL_OFFSET);
@@ -882,6 +909,8 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
        MLX4_GET(field, outbox, QUERY_DEV_CAP_VXLAN);
        if (field & 1<<3)
                dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS;
+       if (field & (1 << 5))
+               dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_ETS_CFG;
        MLX4_GET(dev_cap->max_icm_sz, outbox,
                 QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET);
        if (dev_cap->flags & MLX4_DEV_CAP_FLAG_COUNTERS)
@@ -900,6 +929,18 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
                 QUERY_DEV_CAP_DMFS_HIGH_RATE_QPN_RANGE_OFFSET);
        dev_cap->dmfs_high_rate_qpn_range &= MGM_QPN_MASK;
 
+       MLX4_GET(size, outbox, QUERY_DEV_CAP_QP_RATE_LIMIT_NUM_OFFSET);
+       dev_cap->rl_caps.num_rates = size;
+       if (dev_cap->rl_caps.num_rates) {
+               dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_QP_RATE_LIMIT;
+               MLX4_GET(size, outbox, QUERY_DEV_CAP_QP_RATE_LIMIT_MAX_OFFSET);
+               dev_cap->rl_caps.max_val  = size & 0xfff;
+               dev_cap->rl_caps.max_unit = size >> 14;
+               MLX4_GET(size, outbox, QUERY_DEV_CAP_QP_RATE_LIMIT_MIN_OFFSET);
+               dev_cap->rl_caps.min_val  = size & 0xfff;
+               dev_cap->rl_caps.min_unit = size >> 14;
+       }
+
        MLX4_GET(field32, outbox, QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET);
        if (field32 & (1 << 16))
                dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_UPDATE_QP;
@@ -975,6 +1016,15 @@ void mlx4_dev_cap_dump(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
                 dev_cap->dmfs_high_rate_qpn_base);
        mlx4_dbg(dev, "DMFS high rate steer QPn range: %d\n",
                 dev_cap->dmfs_high_rate_qpn_range);
+
+       if (dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_QP_RATE_LIMIT) {
+               struct mlx4_rate_limit_caps *rl_caps = &dev_cap->rl_caps;
+
+               mlx4_dbg(dev, "QP Rate-Limit: #rates %d, unit/val max %d/%d, min %d/%d\n",
+                        rl_caps->num_rates, rl_caps->max_unit, rl_caps->max_val,
+                        rl_caps->min_unit, rl_caps->min_val);
+       }
+
        dump_dev_cap_flags(dev, dev_cap->flags);
        dump_dev_cap_flags2(dev, dev_cap->flags2);
 }
@@ -1058,6 +1108,7 @@ out:
        return err;
 }
 
+#define DEV_CAP_EXT_2_FLAG_PFC_COUNTERS        (1 << 28)
 #define DEV_CAP_EXT_2_FLAG_VLAN_CONTROL (1 << 26)
 #define DEV_CAP_EXT_2_FLAG_80_VFS      (1 << 21)
 #define DEV_CAP_EXT_2_FLAG_FSM         (1 << 20)
@@ -1071,6 +1122,7 @@ int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave,
        u64     flags;
        int     err = 0;
        u8      field;
+       u16     field16;
        u32     bmme_flags, field32;
        int     real_port;
        int     slave_port;
@@ -1101,6 +1153,9 @@ int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave,
        }
        for (; slave_port < dev->caps.num_ports; ++slave_port)
                flags &= ~(MLX4_DEV_CAP_FLAG_WOL_PORT1 << slave_port);
+
+       /* Not exposing RSS IP fragments to guests */
+       flags &= ~MLX4_DEV_CAP_FLAG_RSS_IP_FRAG;
        MLX4_PUT(outbox->buf, flags, QUERY_DEV_CAP_EXT_FLAGS_OFFSET);
 
        MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_VL_PORT_OFFSET);
@@ -1113,11 +1168,16 @@ int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave,
        field &= 0x7f;
        MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET);
 
-       /* For guests, disable vxlan tunneling */
+       /* For guests, disable vxlan tunneling and QoS support */
        MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_VXLAN);
-       field &= 0xf7;
+       field &= 0xd7;
        MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_VXLAN);
 
+       /* For guests, disable port BEACON */
+       MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_PORT_BEACON_OFFSET);
+       field &= 0x7f;
+       MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_PORT_BEACON_OFFSET);
+
        /* For guests, report Blueflame disabled */
        MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_BF_OFFSET);
        field &= 0x7f;
@@ -1146,9 +1206,28 @@ int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave,
        /* turn off host side virt features (VST, FSM, etc) for guests */
        MLX4_GET(field32, outbox->buf, QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET);
        field32 &= ~(DEV_CAP_EXT_2_FLAG_VLAN_CONTROL | DEV_CAP_EXT_2_FLAG_80_VFS |
-                    DEV_CAP_EXT_2_FLAG_FSM);
+                    DEV_CAP_EXT_2_FLAG_FSM | DEV_CAP_EXT_2_FLAG_PFC_COUNTERS);
        MLX4_PUT(outbox->buf, field32, QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET);
 
+       /* turn off QCN for guests */
+       MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_ECN_QCN_VER_OFFSET);
+       field &= 0xfe;
+       MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_ECN_QCN_VER_OFFSET);
+
+       /* turn off QP max-rate limiting for guests */
+       field16 = 0;
+       MLX4_PUT(outbox->buf, field16, QUERY_DEV_CAP_QP_RATE_LIMIT_NUM_OFFSET);
+
+       /* turn off QoS per VF support for guests */
+       MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_CQ_EQ_CACHE_LINE_STRIDE);
+       field &= 0xef;
+       MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_CQ_EQ_CACHE_LINE_STRIDE);
+
+       /* turn off ignore FCS feature for guests */
+       MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_CONFIG_DEV_OFFSET);
+       field &= 0xfb;
+       MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_CONFIG_DEV_OFFSET);
+
        return 0;
 }
 
@@ -1648,13 +1727,17 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
                *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 3);
 
        /* Enable QoS support if module parameter set */
-       if (enable_qos)
+       if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETS_CFG && enable_qos)
                *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 2);
 
        /* enable counters */
        if (dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS)
                *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 4);
 
+       /* Enable RSS spread to fragmented IP packets when supported */
+       if (dev->caps.flags & MLX4_DEV_CAP_FLAG_RSS_IP_FRAG)
+               *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 13);
+
        /* CX3 is capable of extending CQEs/EQEs from 32 to 64 bytes */
        if (dev->caps.flags & MLX4_DEV_CAP_FLAG_64B_EQE) {
                *(inbox + INIT_HCA_EQE_CQE_OFFSETS / 4) |= cpu_to_be32(1 << 29);
@@ -1843,6 +1926,10 @@ int mlx4_QUERY_HCA(struct mlx4_dev *dev,
                else
                        param->steering_mode = MLX4_STEERING_MODE_A0;
        }
+
+       if (dword_field & (1 << 13))
+               param->rss_ip_frags = 1;
+
        /* steering attributes */
        if (param->steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) {
                MLX4_GET(param->mc_base, outbox, INIT_HCA_FS_BASE_OFFSET);
index f44f7f6017ed589f5c8a184666b82e00f37f47a8..07cb7c2461adaa90cbfab5e478a6a82d14613f87 100644 (file)
@@ -127,6 +127,7 @@ struct mlx4_dev_cap {
        u32 max_counters;
        u32 dmfs_high_rate_qpn_base;
        u32 dmfs_high_rate_qpn_range;
+       struct mlx4_rate_limit_caps rl_caps;
        struct mlx4_port_cap port_cap[MLX4_MAX_PORTS + 1];
 };
 
@@ -202,6 +203,7 @@ struct mlx4_init_hca_param {
        u64 dev_cap_enabled;
        u16 cqe_size; /* For use only when CQE stride feature enabled */
        u16 eqe_size; /* For use only when EQE stride feature enabled */
+       u8 rss_ip_frags;
 };
 
 struct mlx4_init_ib_param {
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw_qos.c b/drivers/net/ethernet/mellanox/mlx4/fw_qos.c
new file mode 100644 (file)
index 0000000..8f2fde0
--- /dev/null
@@ -0,0 +1,289 @@
+/*
+ * Copyright (c) 2004, 2005 Topspin Communications.  All rights reserved.
+ * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies.
+ * All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/export.h>
+#include "fw_qos.h"
+#include "fw.h"
+
+enum {
+       /* allocate vpp opcode modifiers */
+       MLX4_ALLOCATE_VPP_ALLOCATE      = 0x0,
+       MLX4_ALLOCATE_VPP_QUERY         = 0x1
+};
+
+enum {
+       /* set vport qos opcode modifiers */
+       MLX4_SET_VPORT_QOS_SET          = 0x0,
+       MLX4_SET_VPORT_QOS_QUERY        = 0x1
+};
+
+struct mlx4_set_port_prio2tc_context {
+       u8 prio2tc[4];
+};
+
+struct mlx4_port_scheduler_tc_cfg_be {
+       __be16 pg;
+       __be16 bw_precentage;
+       __be16 max_bw_units; /* 3-100Mbps, 4-1Gbps, other values - reserved */
+       __be16 max_bw_value;
+};
+
+struct mlx4_set_port_scheduler_context {
+       struct mlx4_port_scheduler_tc_cfg_be tc[MLX4_NUM_TC];
+};
+
+/* Granular Qos (per VF) section */
+struct mlx4_alloc_vpp_param {
+       __be32 availible_vpp;
+       __be32 vpp_p_up[MLX4_NUM_UP];
+};
+
+struct mlx4_prio_qos_param {
+       __be32 bw_share;
+       __be32 max_avg_bw;
+       __be32 reserved;
+       __be32 enable;
+       __be32 reserved1[4];
+};
+
+struct mlx4_set_vport_context {
+       __be32 reserved[8];
+       struct mlx4_prio_qos_param qos_p_up[MLX4_NUM_UP];
+};
+
+int mlx4_SET_PORT_PRIO2TC(struct mlx4_dev *dev, u8 port, u8 *prio2tc)
+{
+       struct mlx4_cmd_mailbox *mailbox;
+       struct mlx4_set_port_prio2tc_context *context;
+       int err;
+       u32 in_mod;
+       int i;
+
+       mailbox = mlx4_alloc_cmd_mailbox(dev);
+       if (IS_ERR(mailbox))
+               return PTR_ERR(mailbox);
+
+       context = mailbox->buf;
+
+       for (i = 0; i < MLX4_NUM_UP; i += 2)
+               context->prio2tc[i >> 1] = prio2tc[i] << 4 | prio2tc[i + 1];
+
+       in_mod = MLX4_SET_PORT_PRIO2TC << 8 | port;
+       err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
+                      MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
+
+       mlx4_free_cmd_mailbox(dev, mailbox);
+       return err;
+}
+EXPORT_SYMBOL(mlx4_SET_PORT_PRIO2TC);
+
+int mlx4_SET_PORT_SCHEDULER(struct mlx4_dev *dev, u8 port, u8 *tc_tx_bw,
+                           u8 *pg, u16 *ratelimit)
+{
+       struct mlx4_cmd_mailbox *mailbox;
+       struct mlx4_set_port_scheduler_context *context;
+       int err;
+       u32 in_mod;
+       int i;
+
+       mailbox = mlx4_alloc_cmd_mailbox(dev);
+       if (IS_ERR(mailbox))
+               return PTR_ERR(mailbox);
+
+       context = mailbox->buf;
+
+       for (i = 0; i < MLX4_NUM_TC; i++) {
+               struct mlx4_port_scheduler_tc_cfg_be *tc = &context->tc[i];
+               u16 r;
+
+               if (ratelimit && ratelimit[i]) {
+                       if (ratelimit[i] <= MLX4_MAX_100M_UNITS_VAL) {
+                               r = ratelimit[i];
+                               tc->max_bw_units =
+                                       htons(MLX4_RATELIMIT_100M_UNITS);
+                       } else {
+                               r = ratelimit[i] / 10;
+                               tc->max_bw_units =
+                                       htons(MLX4_RATELIMIT_1G_UNITS);
+                       }
+                       tc->max_bw_value = htons(r);
+               } else {
+                       tc->max_bw_value = htons(MLX4_RATELIMIT_DEFAULT);
+                       tc->max_bw_units = htons(MLX4_RATELIMIT_1G_UNITS);
+               }
+
+               tc->pg = htons(pg[i]);
+               tc->bw_precentage = htons(tc_tx_bw[i]);
+       }
+
+       in_mod = MLX4_SET_PORT_SCHEDULER << 8 | port;
+       err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
+                      MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
+
+       mlx4_free_cmd_mailbox(dev, mailbox);
+       return err;
+}
+EXPORT_SYMBOL(mlx4_SET_PORT_SCHEDULER);
+
+int mlx4_ALLOCATE_VPP_get(struct mlx4_dev *dev, u8 port,
+                         u16 *availible_vpp, u8 *vpp_p_up)
+{
+       int i;
+       int err;
+       struct mlx4_cmd_mailbox *mailbox;
+       struct mlx4_alloc_vpp_param *out_param;
+
+       mailbox = mlx4_alloc_cmd_mailbox(dev);
+       if (IS_ERR(mailbox))
+               return PTR_ERR(mailbox);
+
+       out_param = mailbox->buf;
+
+       err = mlx4_cmd_box(dev, 0, mailbox->dma, port,
+                          MLX4_ALLOCATE_VPP_QUERY,
+                          MLX4_CMD_ALLOCATE_VPP,
+                          MLX4_CMD_TIME_CLASS_A,
+                          MLX4_CMD_NATIVE);
+       if (err)
+               goto out;
+
+       /* Total number of supported VPPs */
+       *availible_vpp = (u16)be32_to_cpu(out_param->availible_vpp);
+
+       for (i = 0; i < MLX4_NUM_UP; i++)
+               vpp_p_up[i] = (u8)be32_to_cpu(out_param->vpp_p_up[i]);
+
+out:
+       mlx4_free_cmd_mailbox(dev, mailbox);
+
+       return err;
+}
+EXPORT_SYMBOL(mlx4_ALLOCATE_VPP_get);
+
+int mlx4_ALLOCATE_VPP_set(struct mlx4_dev *dev, u8 port, u8 *vpp_p_up)
+{
+       int i;
+       int err;
+       struct mlx4_cmd_mailbox *mailbox;
+       struct mlx4_alloc_vpp_param *in_param;
+
+       mailbox = mlx4_alloc_cmd_mailbox(dev);
+       if (IS_ERR(mailbox))
+               return PTR_ERR(mailbox);
+
+       in_param = mailbox->buf;
+
+       for (i = 0; i < MLX4_NUM_UP; i++)
+               in_param->vpp_p_up[i] = cpu_to_be32(vpp_p_up[i]);
+
+       err = mlx4_cmd(dev, mailbox->dma, port,
+                      MLX4_ALLOCATE_VPP_ALLOCATE,
+                      MLX4_CMD_ALLOCATE_VPP,
+                      MLX4_CMD_TIME_CLASS_A,
+                      MLX4_CMD_NATIVE);
+
+       mlx4_free_cmd_mailbox(dev, mailbox);
+       return err;
+}
+EXPORT_SYMBOL(mlx4_ALLOCATE_VPP_set);
+
+int mlx4_SET_VPORT_QOS_get(struct mlx4_dev *dev, u8 port, u8 vport,
+                          struct mlx4_vport_qos_param *out_param)
+{
+       int i;
+       int err;
+       struct mlx4_cmd_mailbox *mailbox;
+       struct mlx4_set_vport_context *ctx;
+
+       mailbox = mlx4_alloc_cmd_mailbox(dev);
+       if (IS_ERR(mailbox))
+               return PTR_ERR(mailbox);
+
+       ctx = mailbox->buf;
+
+       err = mlx4_cmd_box(dev, 0, mailbox->dma, (vport << 8) | port,
+                          MLX4_SET_VPORT_QOS_QUERY,
+                          MLX4_CMD_SET_VPORT_QOS,
+                          MLX4_CMD_TIME_CLASS_A,
+                          MLX4_CMD_NATIVE);
+       if (err)
+               goto out;
+
+       for (i = 0; i < MLX4_NUM_UP; i++) {
+               out_param[i].bw_share = be32_to_cpu(ctx->qos_p_up[i].bw_share);
+               out_param[i].max_avg_bw =
+                       be32_to_cpu(ctx->qos_p_up[i].max_avg_bw);
+               out_param[i].enable =
+                       !!(be32_to_cpu(ctx->qos_p_up[i].enable) & 31);
+       }
+
+out:
+       mlx4_free_cmd_mailbox(dev, mailbox);
+
+       return err;
+}
+EXPORT_SYMBOL(mlx4_SET_VPORT_QOS_get);
+
+int mlx4_SET_VPORT_QOS_set(struct mlx4_dev *dev, u8 port, u8 vport,
+                          struct mlx4_vport_qos_param *in_param)
+{
+       int i;
+       int err;
+       struct mlx4_cmd_mailbox *mailbox;
+       struct mlx4_set_vport_context *ctx;
+
+       mailbox = mlx4_alloc_cmd_mailbox(dev);
+       if (IS_ERR(mailbox))
+               return PTR_ERR(mailbox);
+
+       ctx = mailbox->buf;
+
+       for (i = 0; i < MLX4_NUM_UP; i++) {
+               ctx->qos_p_up[i].bw_share = cpu_to_be32(in_param[i].bw_share);
+               ctx->qos_p_up[i].max_avg_bw =
+                               cpu_to_be32(in_param[i].max_avg_bw);
+               ctx->qos_p_up[i].enable =
+                               cpu_to_be32(in_param[i].enable << 31);
+       }
+
+       err = mlx4_cmd(dev, mailbox->dma, (vport << 8) | port,
+                      MLX4_SET_VPORT_QOS_SET,
+                      MLX4_CMD_SET_VPORT_QOS,
+                      MLX4_CMD_TIME_CLASS_A,
+                      MLX4_CMD_NATIVE);
+
+       mlx4_free_cmd_mailbox(dev, mailbox);
+       return err;
+}
+EXPORT_SYMBOL(mlx4_SET_VPORT_QOS_set);
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw_qos.h b/drivers/net/ethernet/mellanox/mlx4/fw_qos.h
new file mode 100644 (file)
index 0000000..ac1f331
--- /dev/null
@@ -0,0 +1,145 @@
+/*
+ * Copyright (c) 2004, 2005 Topspin Communications.  All rights reserved.
+ * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies.
+ * All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef MLX4_FW_QOS_H
+#define MLX4_FW_QOS_H
+
+#include <linux/mlx4/cmd.h>
+#include <linux/mlx4/device.h>
+
+#define MLX4_NUM_UP 8
+#define MLX4_NUM_TC 8
+
+/* Default supported priorities for VPP allocation */
+#define MLX4_DEFAULT_QOS_PRIO (0)
+
+/* Derived from FW feature definition, 0 is the default vport fo all QPs */
+#define MLX4_VPP_DEFAULT_VPORT (0)
+
+struct mlx4_vport_qos_param {
+       u32 bw_share;
+       u32 max_avg_bw;
+       u8 enable;
+};
+
+/**
+ * mlx4_SET_PORT_PRIO2TC - This routine maps user priorities to traffic
+ * classes of a given port and device.
+ *
+ * @dev: mlx4_dev.
+ * @port: Physical port number.
+ * @prio2tc: Array of TC associated with each priorities.
+ *
+ * Returns 0 on success or a negative mlx4_core errno code.
+ **/
+int mlx4_SET_PORT_PRIO2TC(struct mlx4_dev *dev, u8 port, u8 *prio2tc);
+
+/**
+ * mlx4_SET_PORT_SCHEDULER - This routine configures the arbitration between
+ * traffic classes (ETS) and configured rate limit for traffic classes.
+ * tc_tx_bw, pg and ratelimit are arrays where each index represents a TC.
+ * The description for those parameters below refers to a single TC.
+ *
+ * @dev: mlx4_dev.
+ * @port: Physical port number.
+ * @tc_tx_bw: The percentage of the bandwidth allocated for traffic class
+ *  within a TC group. The sum of the bw_percentage of all the traffic
+ *  classes within a TC group must equal 100% for correct operation.
+ * @pg: The TC group the traffic class is associated with.
+ * @ratelimit: The maximal bandwidth allowed for the use by this traffic class.
+ *
+ * Returns 0 on success or a negative mlx4_core errno code.
+ **/
+int mlx4_SET_PORT_SCHEDULER(struct mlx4_dev *dev, u8 port, u8 *tc_tx_bw,
+                           u8 *pg, u16 *ratelimit);
+/**
+ * mlx4_ALLOCATE_VPP_get - Query port VPP availible resources and allocation.
+ * Before distribution of VPPs to priorities, only availible_vpp is returned.
+ * After initialization it returns the distribution of VPPs among priorities.
+ *
+ * @dev: mlx4_dev.
+ * @port: Physical port number.
+ * @availible_vpp: Pointer to variable where number of availible VPPs is stored
+ * @vpp_p_up: Distribution of VPPs to priorities is stored in this array
+ *
+ * Returns 0 on success or a negative mlx4_core errno code.
+ **/
+int mlx4_ALLOCATE_VPP_get(struct mlx4_dev *dev, u8 port,
+                         u16 *availible_vpp, u8 *vpp_p_up);
+/**
+ * mlx4_ALLOCATE_VPP_set - Distribution of VPPs among differnt priorities.
+ * The total number of VPPs assigned to all for a port must not exceed
+ * the value reported by availible_vpp in mlx4_ALLOCATE_VPP_get.
+ * VPP allocation is allowed only after the port type has been set,
+ * and while no QPs are open for this port.
+ *
+ * @dev: mlx4_dev.
+ * @port: Physical port number.
+ * @vpp_p_up: Allocation of VPPs to different priorities.
+ *
+ * Returns 0 on success or a negative mlx4_core errno code.
+ **/
+int mlx4_ALLOCATE_VPP_set(struct mlx4_dev *dev, u8 port, u8 *vpp_p_up);
+
+/**
+ * mlx4_SET_VPORT_QOS_get - Query QoS proporties of a Vport.
+ * Each priority allowed for the Vport is assigned with a share of the BW,
+ * and a BW limitation. This commands query the current QoS values.
+ *
+ * @dev: mlx4_dev.
+ * @port: Physical port number.
+ * @vport: Vport id.
+ * @out_param: Array of mlx4_vport_qos_param that will contain the values.
+ *
+ * Returns 0 on success or a negative mlx4_core errno code.
+ **/
+int mlx4_SET_VPORT_QOS_get(struct mlx4_dev *dev, u8 port, u8 vport,
+                          struct mlx4_vport_qos_param *out_param);
+
+/**
+ * mlx4_SET_VPORT_QOS_set - Set QoS proporties of a Vport.
+ * QoS parameters can be modified at any time, but must be initialized
+ * before any QP is associated with the VPort.
+ *
+ * @dev: mlx4_dev.
+ * @port: Physical port number.
+ * @vport: Vport id.
+ * @out_param: Array of mlx4_vport_qos_param which holds the requested values.
+ *
+ * Returns 0 on success or a negative mlx4_core errno code.
+ **/
+int mlx4_SET_VPORT_QOS_set(struct mlx4_dev *dev, u8 port, u8 vport,
+                          struct mlx4_vport_qos_param *in_param);
+
+#endif /* MLX4_FW_QOS_H */
index 7e487223489a467071155f0e67ea052ba2b18949..acceb75e8c440c6aab8061cc1cdec7c0d420f4b1 100644 (file)
@@ -297,6 +297,25 @@ static int mlx4_dev_port(struct mlx4_dev *dev, int port,
        return err;
 }
 
+static inline void mlx4_enable_ignore_fcs(struct mlx4_dev *dev)
+{
+       if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_IGNORE_FCS))
+               return;
+
+       if (mlx4_is_mfunc(dev)) {
+               mlx4_dbg(dev, "SRIOV mode - Disabling Ignore FCS");
+               dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_IGNORE_FCS;
+               return;
+       }
+
+       if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_FCS_KEEP)) {
+               mlx4_dbg(dev,
+                        "Keep FCS is not supported - Disabling Ignore FCS");
+               dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_IGNORE_FCS;
+               return;
+       }
+}
+
 #define MLX4_A0_STEERING_TABLE_SIZE    256
 static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
 {
@@ -489,6 +508,8 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
                dev->caps.dmfs_high_rate_qpn_range = MLX4_A0_STEERING_TABLE_SIZE;
        }
 
+       dev->caps.rl_caps = dev_cap->rl_caps;
+
        dev->caps.reserved_qps_cnt[MLX4_QP_REGION_RSS_RAW_ETH] =
                dev->caps.dmfs_high_rate_qpn_range;
 
@@ -526,10 +547,20 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
                dev->caps.alloc_res_qp_mask =
                        (dev->caps.bf_reg_size ? MLX4_RESERVE_ETH_BF_QP : 0) |
                        MLX4_RESERVE_A0_QP;
+
+               if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETS_CFG) &&
+                   dev->caps.flags & MLX4_DEV_CAP_FLAG_SET_ETH_SCHED) {
+                       mlx4_warn(dev, "Old device ETS support detected\n");
+                       mlx4_warn(dev, "Consider upgrading device FW.\n");
+                       dev->caps.flags2 |= MLX4_DEV_CAP_FLAG2_ETS_CFG;
+               }
+
        } else {
                dev->caps.alloc_res_qp_mask = 0;
        }
 
+       mlx4_enable_ignore_fcs(dev);
+
        return 0;
 }
 
@@ -883,6 +914,8 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
        mlx4_warn(dev, "Timestamping is not supported in slave mode\n");
 
        slave_adjust_steering_mode(dev, &dev_cap, &hca_param);
+       mlx4_dbg(dev, "RSS support for IP fragments is %s\n",
+                hca_param.rss_ip_frags ? "on" : "off");
 
        if (func_cap.extra_flags & MLX4_QUERY_FUNC_FLAGS_BF_RES_QP &&
            dev->caps.bf_reg_size)
index 1409d0cd6143e8554524c8377a018ccb1c3edba1..f30eeb730a8667d44bead81f19c606d6770d2bb9 100644 (file)
@@ -50,6 +50,7 @@
 #include <linux/mlx4/driver.h>
 #include <linux/mlx4/doorbell.h>
 #include <linux/mlx4/cmd.h>
+#include "fw_qos.h"
 
 #define DRV_NAME       "mlx4_core"
 #define PFX            DRV_NAME ": "
 
 #define INIT_HCA_TPT_MW_ENABLE          (1 << 7)
 
-struct mlx4_set_port_prio2tc_context {
-       u8 prio2tc[4];
-};
-
-struct mlx4_port_scheduler_tc_cfg_be {
-       __be16 pg;
-       __be16 bw_precentage;
-       __be16 max_bw_units; /* 3-100Mbps, 4-1Gbps, other values - reserved */
-       __be16 max_bw_value;
-};
-
-struct mlx4_set_port_scheduler_context {
-       struct mlx4_port_scheduler_tc_cfg_be tc[MLX4_NUM_TC];
-};
-
 enum {
        MLX4_HCR_BASE           = 0x80680,
        MLX4_HCR_SIZE           = 0x0001c,
@@ -175,7 +161,7 @@ enum mlx4_res_tracker_free_type {
 
 /*
  *Virtual HCR structures.
- * mlx4_vhcr is the sw representation, in machine endianess
+ * mlx4_vhcr is the sw representation, in machine endianness
  *
  * mlx4_vhcr_cmd is the formalized structure, the one that is passed
  * to FW to go through communication channel.
@@ -512,6 +498,7 @@ struct mlx4_vport_state {
        u32 tx_rate;
        bool spoofchk;
        u32 link_state;
+       u8 qos_vport;
 };
 
 struct mlx4_vf_admin_state {
@@ -568,6 +555,11 @@ struct mlx4_slave_event_eq {
        struct mlx4_eqe event_eqe[SLAVE_EVENT_EQ_SIZE];
 };
 
+struct mlx4_qos_manager {
+       int num_of_qos_vfs;
+       DECLARE_BITMAP(priority_bm, MLX4_NUM_UP);
+};
+
 struct mlx4_master_qp0_state {
        int proxy_qp0_active;
        int qp0_active;
@@ -592,6 +584,7 @@ struct mlx4_mfunc_master_ctx {
        struct mlx4_eqe         cmd_eqe;
        struct mlx4_slave_event_eq slave_eq;
        struct mutex            gen_eqe_mutex[MLX4_MFUNC_MAX];
+       struct mlx4_qos_manager qos_ctl[MLX4_MAX_PORTS + 1];
 };
 
 struct mlx4_mfunc {
@@ -644,6 +637,7 @@ struct mlx4_vf_immed_vlan_work {
        int                     orig_vlan_ix;
        u8                      port;
        u8                      qos;
+       u8                      qos_vport;
        u16                     vlan_id;
        u16                     orig_vlan_id;
 };
@@ -769,9 +763,11 @@ enum {
 
 
 struct mlx4_set_port_general_context {
-       u8 reserved[3];
+       u16 reserved1;
+       u8 v_ignore_fcs;
        u8 flags;
-       u16 reserved2;
+       u8 ignore_fcs;
+       u8 reserved2;
        __be16 mtu;
        u8 pptx;
        u8 pfctx;
index ebbe244e80dde55068d924905e08d1d631821811..9de30216b146bb09188a6867307b5bbcf7aa9dd0 100644 (file)
@@ -55,6 +55,7 @@
 #include <linux/mlx4/cmd.h>
 
 #include "en_port.h"
+#include "mlx4_stats.h"
 
 #define DRV_NAME       "mlx4_en"
 #define DRV_VERSION    "2.2-1"
@@ -171,7 +172,6 @@ enum {
 /* Number of samples to 'average' */
 #define AVG_SIZE                       128
 #define AVG_FACTOR                     1024
-#define NUM_PERF_STATS                 NUM_PERF_COUNTERS
 
 #define INC_PERF_COUNTER(cnt)          (++(cnt))
 #define ADD_PERF_COUNTER(cnt, add)     ((cnt) += (add))
@@ -182,7 +182,6 @@ enum {
 
 #else
 
-#define NUM_PERF_STATS                 0
 #define INC_PERF_COUNTER(cnt)          do {} while (0)
 #define ADD_PERF_COUNTER(cnt, add)     do {} while (0)
 #define AVG_PERF_COUNTER(cnt, sample)  do {} while (0)
@@ -435,37 +434,6 @@ struct mlx4_en_port_state {
        u32 flags;
 };
 
-struct mlx4_en_pkt_stats {
-       unsigned long broadcast;
-       unsigned long rx_prio[8];
-       unsigned long tx_prio[8];
-#define NUM_PKT_STATS          17
-};
-
-struct mlx4_en_port_stats {
-       unsigned long tso_packets;
-       unsigned long xmit_more;
-       unsigned long queue_stopped;
-       unsigned long wake_queue;
-       unsigned long tx_timeout;
-       unsigned long rx_alloc_failed;
-       unsigned long rx_chksum_good;
-       unsigned long rx_chksum_none;
-       unsigned long rx_chksum_complete;
-       unsigned long tx_chksum_offload;
-#define NUM_PORT_STATS         10
-};
-
-struct mlx4_en_perf_stats {
-       u32 tx_poll;
-       u64 tx_pktsz_avg;
-       u32 inflight_avg;
-       u16 tx_coal_avg;
-       u16 rx_coal_avg;
-       u32 napi_quota;
-#define NUM_PERF_COUNTERS              6
-};
-
 enum mlx4_en_mclist_act {
        MCLIST_NONE,
        MCLIST_REM,
@@ -514,9 +482,15 @@ enum {
        MLX4_EN_FLAG_RX_CSUM_NON_TCP_UDP        = (1 << 5),
 };
 
+#define PORT_BEACON_MAX_LIMIT (65535)
 #define MLX4_EN_MAC_HASH_SIZE (1 << BITS_PER_BYTE)
 #define MLX4_EN_MAC_HASH_IDX 5
 
+struct mlx4_en_stats_bitmap {
+       DECLARE_BITMAP(bitmap, NUM_ALL_STATS);
+       struct mutex mutex; /* for mutual access to stats bitmap */
+};
+
 struct mlx4_en_priv {
        struct mlx4_en_dev *mdev;
        struct mlx4_en_port_profile *prof;
@@ -592,8 +566,12 @@ struct mlx4_en_priv {
 #endif
        struct mlx4_en_perf_stats pstats;
        struct mlx4_en_pkt_stats pkstats;
+       struct mlx4_en_flow_stats_rx rx_priority_flowstats[MLX4_NUM_PRIORITIES];
+       struct mlx4_en_flow_stats_tx tx_priority_flowstats[MLX4_NUM_PRIORITIES];
+       struct mlx4_en_flow_stats_rx rx_flowstats;
+       struct mlx4_en_flow_stats_tx tx_flowstats;
        struct mlx4_en_port_stats port_stats;
-       u64 stats_bitmap;
+       struct mlx4_en_stats_bitmap stats_bitmap;
        struct list_head mc_list;
        struct list_head curr_list;
        u64 broadcast_id;
@@ -608,6 +586,7 @@ struct mlx4_en_priv {
 #ifdef CONFIG_MLX4_EN_DCB
        struct ieee_ets ets;
        u16 maxrate[IEEE_8021QAZ_MAX_TCS];
+       enum dcbnl_cndd_states cndd_state[IEEE_8021QAZ_MAX_TCS];
 #endif
 #ifdef CONFIG_RFS_ACCEL
        spinlock_t filters_lock;
@@ -761,6 +740,11 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
 int mlx4_en_start_port(struct net_device *dev);
 void mlx4_en_stop_port(struct net_device *dev, int detach);
 
+void mlx4_en_set_stats_bitmap(struct mlx4_dev *dev,
+                             struct mlx4_en_stats_bitmap *stats_bitmap,
+                             u8 rx_ppp, u8 rx_pause,
+                             u8 tx_ppp, u8 tx_pause);
+
 void mlx4_en_free_resources(struct mlx4_en_priv *priv);
 int mlx4_en_alloc_resources(struct mlx4_en_priv *priv);
 
@@ -846,7 +830,10 @@ void mlx4_en_ptp_overflow_check(struct mlx4_en_dev *mdev);
 int mlx4_en_reset_config(struct net_device *dev,
                         struct hwtstamp_config ts_config,
                         netdev_features_t new_features);
-
+void mlx4_en_update_pfc_stats_bitmap(struct mlx4_dev *dev,
+                                    struct mlx4_en_stats_bitmap *stats_bitmap,
+                                    u8 rx_ppp, u8 rx_pause,
+                                    u8 tx_ppp, u8 tx_pause);
 int mlx4_en_netdev_event(struct notifier_block *this,
                         unsigned long event, void *ptr);
 
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h
new file mode 100644 (file)
index 0000000..0055583
--- /dev/null
@@ -0,0 +1,107 @@
+#ifndef _MLX4_STATS_
+#define _MLX4_STATS_
+
+#ifdef MLX4_EN_PERF_STAT
+#define NUM_PERF_STATS                 NUM_PERF_COUNTERS
+#else
+#define NUM_PERF_STATS                 0
+#endif
+
+#define NUM_PRIORITIES 9
+#define NUM_PRIORITY_STATS 2
+
+struct mlx4_en_pkt_stats {
+       unsigned long rx_multicast_packets;
+       unsigned long rx_broadcast_packets;
+       unsigned long rx_jabbers;
+       unsigned long rx_in_range_length_error;
+       unsigned long rx_out_range_length_error;
+       unsigned long tx_multicast_packets;
+       unsigned long tx_broadcast_packets;
+       unsigned long rx_prio[NUM_PRIORITIES][NUM_PRIORITY_STATS];
+       unsigned long tx_prio[NUM_PRIORITIES][NUM_PRIORITY_STATS];
+#define NUM_PKT_STATS          43
+};
+
+struct mlx4_en_port_stats {
+       unsigned long tso_packets;
+       unsigned long xmit_more;
+       unsigned long queue_stopped;
+       unsigned long wake_queue;
+       unsigned long tx_timeout;
+       unsigned long rx_alloc_failed;
+       unsigned long rx_chksum_good;
+       unsigned long rx_chksum_none;
+       unsigned long rx_chksum_complete;
+       unsigned long tx_chksum_offload;
+#define NUM_PORT_STATS         10
+};
+
+struct mlx4_en_perf_stats {
+       u32 tx_poll;
+       u64 tx_pktsz_avg;
+       u32 inflight_avg;
+       u16 tx_coal_avg;
+       u16 rx_coal_avg;
+       u32 napi_quota;
+#define NUM_PERF_COUNTERS              6
+};
+
+#define NUM_MAIN_STATS 21
+
+#define MLX4_NUM_PRIORITIES    8
+
+struct mlx4_en_flow_stats_rx {
+       u64 rx_pause;
+       u64 rx_pause_duration;
+       u64 rx_pause_transition;
+#define NUM_FLOW_STATS_RX      3
+#define NUM_FLOW_PRIORITY_STATS_RX     (NUM_FLOW_STATS_RX * \
+                                        MLX4_NUM_PRIORITIES)
+};
+
+struct mlx4_en_flow_stats_tx {
+       u64 tx_pause;
+       u64 tx_pause_duration;
+       u64 tx_pause_transition;
+#define NUM_FLOW_STATS_TX      3
+#define NUM_FLOW_PRIORITY_STATS_TX     (NUM_FLOW_STATS_TX * \
+                                        MLX4_NUM_PRIORITIES)
+};
+
+#define NUM_FLOW_STATS (NUM_FLOW_STATS_RX + NUM_FLOW_STATS_TX + \
+                       NUM_FLOW_PRIORITY_STATS_TX + \
+                       NUM_FLOW_PRIORITY_STATS_RX)
+
+struct mlx4_en_stat_out_flow_control_mbox {
+       /* Total number of PAUSE frames received from the far-end port */
+       __be64 rx_pause;
+       /* Total number of microseconds that far-end port requested to pause
+       * transmission of packets
+       */
+       __be64 rx_pause_duration;
+       /* Number of received transmission from XOFF state to XON state */
+       __be64 rx_pause_transition;
+       /* Total number of PAUSE frames sent from the far-end port */
+       __be64 tx_pause;
+       /* Total time in microseconds that transmission of packets has been
+       * paused
+       */
+       __be64 tx_pause_duration;
+       /* Number of transmitter transitions from XOFF state to XON state */
+       __be64 tx_pause_transition;
+       /* Reserverd */
+       __be64 reserved[2];
+};
+
+enum {
+       MLX4_DUMP_ETH_STATS_FLOW_CONTROL = 1 << 12
+};
+
+#define NUM_ALL_STATS  (NUM_MAIN_STATS + NUM_PORT_STATS + NUM_PKT_STATS + \
+                        NUM_FLOW_STATS + NUM_PERF_STATS)
+
+#define MLX4_FIND_NETDEV_STAT(n) (offsetof(struct net_device_stats, n) / \
+                                 sizeof(((struct net_device_stats *)0)->n))
+
+#endif
index 9f268f05290aa7b6491a8eb1a3a8adc636e2ac56..c2b21313dba7f64d0e51cff8ca3c601720a848ef 100644 (file)
@@ -38,6 +38,7 @@
 #include <linux/mlx4/cmd.h>
 
 #include "mlx4.h"
+#include "mlx4_stats.h"
 
 #define MLX4_MAC_VALID         (1ull << 63)
 
@@ -49,6 +50,9 @@
 #define MLX4_STATS_ERROR_COUNTERS_MASK         0x1ffc30ULL
 #define MLX4_STATS_PORT_COUNTERS_MASK          0x1fe00000ULL
 
+#define MLX4_FLAG_V_IGNORE_FCS_MASK            0x2
+#define MLX4_IGNORE_FCS_MASK                   0x1
+
 void mlx4_init_mac_table(struct mlx4_dev *dev, struct mlx4_mac_table *table)
 {
        int i;
@@ -127,8 +131,9 @@ static int mlx4_set_port_mac_table(struct mlx4_dev *dev, u8 port,
 
        in_mod = MLX4_SET_PORT_MAC_TABLE << 8 | port;
 
-       err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
-                      MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
+       err = mlx4_cmd(dev, mailbox->dma, in_mod, MLX4_SET_PORT_ETH_OPCODE,
+                      MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
+                      MLX4_CMD_NATIVE);
 
        mlx4_free_cmd_mailbox(dev, mailbox);
        return err;
@@ -341,8 +346,9 @@ static int mlx4_set_port_vlan_table(struct mlx4_dev *dev, u8 port,
 
        memcpy(mailbox->buf, entries, MLX4_VLAN_TABLE_SIZE);
        in_mod = MLX4_SET_PORT_VLAN_TABLE << 8 | port;
-       err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
-                      MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
+       err = mlx4_cmd(dev, mailbox->dma, in_mod, MLX4_SET_PORT_ETH_OPCODE,
+                      MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
+                      MLX4_CMD_NATIVE);
 
        mlx4_free_cmd_mailbox(dev, mailbox);
 
@@ -629,9 +635,9 @@ static int mlx4_reset_roce_port_gids(struct mlx4_dev *dev, int slave,
                       MLX4_ROCE_GID_ENTRY_SIZE);
 
        err = mlx4_cmd(dev, mailbox->dma,
-                      ((u32)port) | (MLX4_SET_PORT_GID_TABLE << 8), 1,
-                      MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
-                      MLX4_CMD_NATIVE);
+                      ((u32)port) | (MLX4_SET_PORT_GID_TABLE << 8),
+                      MLX4_SET_PORT_ETH_OPCODE, MLX4_CMD_SET_PORT,
+                      MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
        mutex_unlock(&(priv->port[port].gid_table.mutex));
        return err;
 }
@@ -837,6 +843,12 @@ static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod,
                                MLX4_CMD_NATIVE);
        }
 
+       /* Slaves are not allowed to SET_PORT beacon (LED) blink */
+       if (op_mod == MLX4_SET_PORT_BEACON_OPCODE) {
+               mlx4_warn(dev, "denying SET_PORT Beacon slave:%d\n", slave);
+               return -EPERM;
+       }
+
        /* For IB, we only consider:
         * - The capability mask, which is set to the aggregate of all
         *   slave function capabilities
@@ -945,8 +957,9 @@ int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port, int pkey_tbl_sz)
                        (pkey_tbl_flag << MLX4_CHANGE_PORT_PKEY_TBL_SZ) |
                        (dev->caps.port_ib_mtu[port] << MLX4_SET_PORT_MTU_CAP) |
                        (vl_cap << MLX4_SET_PORT_VL_CAP));
-               err = mlx4_cmd(dev, mailbox->dma, port, 0, MLX4_CMD_SET_PORT,
-                               MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED);
+               err = mlx4_cmd(dev, mailbox->dma, port,
+                              MLX4_SET_PORT_IB_OPCODE, MLX4_CMD_SET_PORT,
+                              MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED);
                if (err != -ENOMEM)
                        break;
        }
@@ -975,8 +988,9 @@ int mlx4_SET_PORT_general(struct mlx4_dev *dev, u8 port, int mtu,
        context->pfcrx = pfcrx;
 
        in_mod = MLX4_SET_PORT_GENERAL << 8 | port;
-       err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
-                      MLX4_CMD_TIME_CLASS_B,  MLX4_CMD_WRAPPED);
+       err = mlx4_cmd(dev, mailbox->dma, in_mod, MLX4_SET_PORT_ETH_OPCODE,
+                      MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
+                      MLX4_CMD_WRAPPED);
 
        mlx4_free_cmd_mailbox(dev, mailbox);
        return err;
@@ -1012,84 +1026,40 @@ int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn,
        context->vlan_miss = MLX4_VLAN_MISS_IDX;
 
        in_mod = MLX4_SET_PORT_RQP_CALC << 8 | port;
-       err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
-                      MLX4_CMD_TIME_CLASS_B,  MLX4_CMD_WRAPPED);
+       err = mlx4_cmd(dev, mailbox->dma, in_mod, MLX4_SET_PORT_ETH_OPCODE,
+                      MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
+                      MLX4_CMD_WRAPPED);
 
        mlx4_free_cmd_mailbox(dev, mailbox);
        return err;
 }
 EXPORT_SYMBOL(mlx4_SET_PORT_qpn_calc);
 
-int mlx4_SET_PORT_PRIO2TC(struct mlx4_dev *dev, u8 port, u8 *prio2tc)
+int mlx4_SET_PORT_fcs_check(struct mlx4_dev *dev, u8 port, u8 ignore_fcs_value)
 {
        struct mlx4_cmd_mailbox *mailbox;
-       struct mlx4_set_port_prio2tc_context *context;
-       int err;
+       struct mlx4_set_port_general_context *context;
        u32 in_mod;
-       int i;
-
-       mailbox = mlx4_alloc_cmd_mailbox(dev);
-       if (IS_ERR(mailbox))
-               return PTR_ERR(mailbox);
-       context = mailbox->buf;
-       for (i = 0; i < MLX4_NUM_UP; i += 2)
-               context->prio2tc[i >> 1] = prio2tc[i] << 4 | prio2tc[i + 1];
-
-       in_mod = MLX4_SET_PORT_PRIO2TC << 8 | port;
-       err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
-                      MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
-
-       mlx4_free_cmd_mailbox(dev, mailbox);
-       return err;
-}
-EXPORT_SYMBOL(mlx4_SET_PORT_PRIO2TC);
-
-int mlx4_SET_PORT_SCHEDULER(struct mlx4_dev *dev, u8 port, u8 *tc_tx_bw,
-               u8 *pg, u16 *ratelimit)
-{
-       struct mlx4_cmd_mailbox *mailbox;
-       struct mlx4_set_port_scheduler_context *context;
        int err;
-       u32 in_mod;
-       int i;
 
        mailbox = mlx4_alloc_cmd_mailbox(dev);
        if (IS_ERR(mailbox))
                return PTR_ERR(mailbox);
        context = mailbox->buf;
+       context->v_ignore_fcs |= MLX4_FLAG_V_IGNORE_FCS_MASK;
+       if (ignore_fcs_value)
+               context->ignore_fcs |= MLX4_IGNORE_FCS_MASK;
+       else
+               context->ignore_fcs &= ~MLX4_IGNORE_FCS_MASK;
 
-       for (i = 0; i < MLX4_NUM_TC; i++) {
-               struct mlx4_port_scheduler_tc_cfg_be *tc = &context->tc[i];
-               u16 r;
-
-               if (ratelimit && ratelimit[i]) {
-                       if (ratelimit[i] <= MLX4_MAX_100M_UNITS_VAL) {
-                               r = ratelimit[i];
-                               tc->max_bw_units =
-                                       htons(MLX4_RATELIMIT_100M_UNITS);
-                       } else {
-                               r = ratelimit[i]/10;
-                               tc->max_bw_units =
-                                       htons(MLX4_RATELIMIT_1G_UNITS);
-                       }
-                       tc->max_bw_value = htons(r);
-               } else {
-                       tc->max_bw_value = htons(MLX4_RATELIMIT_DEFAULT);
-                       tc->max_bw_units = htons(MLX4_RATELIMIT_1G_UNITS);
-               }
-
-               tc->pg = htons(pg[i]);
-               tc->bw_precentage = htons(tc_tx_bw[i]);
-       }
-
-       in_mod = MLX4_SET_PORT_SCHEDULER << 8 | port;
+       in_mod = MLX4_SET_PORT_GENERAL << 8 | port;
        err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
                       MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
 
        mlx4_free_cmd_mailbox(dev, mailbox);
        return err;
 }
-EXPORT_SYMBOL(mlx4_SET_PORT_SCHEDULER);
+EXPORT_SYMBOL(mlx4_SET_PORT_fcs_check);
 
 enum {
        VXLAN_ENABLE_MODIFY     = 1 << 7,
@@ -1125,14 +1095,35 @@ int mlx4_SET_PORT_VXLAN(struct mlx4_dev *dev, u8 port, u8 steering, int enable)
        context->steering  = steering;
 
        in_mod = MLX4_SET_PORT_VXLAN << 8 | port;
-       err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
-                      MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
+       err = mlx4_cmd(dev, mailbox->dma, in_mod, MLX4_SET_PORT_ETH_OPCODE,
+                      MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
+                      MLX4_CMD_NATIVE);
 
        mlx4_free_cmd_mailbox(dev, mailbox);
        return err;
 }
 EXPORT_SYMBOL(mlx4_SET_PORT_VXLAN);
 
+int mlx4_SET_PORT_BEACON(struct mlx4_dev *dev, u8 port, u16 time)
+{
+       int err;
+       struct mlx4_cmd_mailbox *mailbox;
+
+       mailbox = mlx4_alloc_cmd_mailbox(dev);
+       if (IS_ERR(mailbox))
+               return PTR_ERR(mailbox);
+
+       *((__be32 *)mailbox->buf) = cpu_to_be32(time);
+
+       err = mlx4_cmd(dev, mailbox->dma, port, MLX4_SET_PORT_BEACON_OPCODE,
+                      MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
+                      MLX4_CMD_NATIVE);
+
+       mlx4_free_cmd_mailbox(dev, mailbox);
+       return err;
+}
+EXPORT_SYMBOL(mlx4_SET_PORT_BEACON);
+
 int mlx4_SET_MCAST_FLTR_wrapper(struct mlx4_dev *dev, int slave,
                                struct mlx4_vhcr *vhcr,
                                struct mlx4_cmd_mailbox *inbox,
@@ -1184,22 +1175,6 @@ int mlx4_DUMP_ETH_STATS_wrapper(struct mlx4_dev *dev, int slave,
                                          vhcr->in_modifier, outbox);
 }
 
-void mlx4_set_stats_bitmap(struct mlx4_dev *dev, u64 *stats_bitmap)
-{
-       if (!mlx4_is_mfunc(dev)) {
-               *stats_bitmap = 0;
-               return;
-       }
-
-       *stats_bitmap = (MLX4_STATS_TRAFFIC_COUNTERS_MASK |
-                        MLX4_STATS_TRAFFIC_DROPS_MASK |
-                        MLX4_STATS_PORT_COUNTERS_MASK);
-
-       if (mlx4_is_master(dev))
-               *stats_bitmap |= MLX4_STATS_ERROR_COUNTERS_MASK;
-}
-EXPORT_SYMBOL(mlx4_set_stats_bitmap);
-
 int mlx4_get_slave_from_roce_gid(struct mlx4_dev *dev, int port, u8 *gid,
                                 int *slave_id)
 {
index eda29dbbfcd259824f0a0fbec3876975f215d2e2..b75214a80d0e5be03ccc4bb9e52049ea25e225f6 100644 (file)
@@ -442,6 +442,16 @@ int mlx4_update_qp(struct mlx4_dev *dev, u32 qpn,
                        cmd->qp_context.param3 |= cpu_to_be32(MLX4_STRIP_VLAN);
        }
 
+       if (attr & MLX4_UPDATE_QP_RATE_LIMIT) {
+               qp_mask |= 1ULL << MLX4_UPD_QP_MASK_RATE_LIMIT;
+               cmd->qp_context.rate_limit_params = cpu_to_be16((params->rate_unit << 14) | params->rate_val);
+       }
+
+       if (attr & MLX4_UPDATE_QP_QOS_VPORT) {
+               qp_mask |= 1ULL << MLX4_UPD_QP_MASK_QOS_VPP;
+               cmd->qp_context.qos_vport = params->qos_vport;
+       }
+
        cmd->primary_addr_path_mask = cpu_to_be64(pri_addr_path_mask);
        cmd->qp_mask = cpu_to_be64(qp_mask);
 
index 6e413ac4e94011c4ea85993b8471d5adfc84d5d3..c7f28bf4b8e21436cc927c8212c5cc6b57706e51 100644 (file)
@@ -221,11 +221,6 @@ struct res_fs_rule {
        int                     qpn;
 };
 
-static int mlx4_is_eth(struct mlx4_dev *dev, int port)
-{
-       return dev->caps.port_mask[port] == MLX4_PORT_TYPE_IB ? 0 : 1;
-}
-
 static void *res_tracker_lookup(struct rb_root *root, u64 res_id)
 {
        struct rb_node *node = root->rb_node;
@@ -770,6 +765,7 @@ static int update_vport_qp_param(struct mlx4_dev *dev,
                qpc->pri_path.feup |= MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN;
                qpc->pri_path.sched_queue &= 0xC7;
                qpc->pri_path.sched_queue |= (vp_oper->state.default_qos) << 3;
+               qpc->qos_vport = vp_oper->state.qos_vport;
        }
        if (vp_oper->state.spoofchk) {
                qpc->pri_path.feup |= MLX4_FSM_FORCE_ETH_SRC_MAC;
@@ -2947,8 +2943,12 @@ static int verify_qp_parameters(struct mlx4_dev *dev,
        qp_type = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
        optpar  = be32_to_cpu(*(__be32 *) inbox->buf);
 
-       if (slave != mlx4_master_func_num(dev))
+       if (slave != mlx4_master_func_num(dev)) {
                qp_ctx->params2 &= ~MLX4_QP_BIT_FPP;
+               /* setting QP rate-limit is disallowed for VFs */
+               if (qp_ctx->rate_limit_params)
+                       return -EPERM;
+       }
 
        switch (qp_type) {
        case MLX4_QP_ST_RC:
@@ -3027,7 +3027,7 @@ int mlx4_WRITE_MTT_wrapper(struct mlx4_dev *dev, int slave,
 
        /* Call the SW implementation of write_mtt:
         * - Prepare a dummy mtt struct
-        * - Translate inbox contents to simple addresses in host endianess */
+        * - Translate inbox contents to simple addresses in host endianness */
        mtt.offset = 0;  /* TBD this is broken but I don't handle it since
                            we don't really use it */
        mtt.order = 0;
@@ -4918,6 +4918,11 @@ void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work)
                                        qp->sched_queue & 0xC7;
                                upd_context->qp_context.pri_path.sched_queue |=
                                        ((work->qos & 0x7) << 3);
+                               upd_context->qp_mask |=
+                                       cpu_to_be64(1ULL <<
+                                                   MLX4_UPD_QP_MASK_QOS_VPP);
+                               upd_context->qp_context.qos_vport =
+                                       work->qos_vport;
                        }
 
                        err = mlx4_cmd(dev, mailbox->dma,
index 201ca6d76ce563862b6f2d620b68c69e3e58002a..ac0f7bf4be958bef168c0281f05108f6287304f4 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, Mellanox Technologies inc.  All rights reserved.
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
  *
  * This software is available to you under a choice of one of two
  * licenses.  You may choose to be licensed under the terms of the GNU
@@ -171,6 +171,9 @@ static int mlx5_alloc_db_from_pgdir(struct mlx5_db_pgdir *pgdir,
        db->db      = pgdir->db_page + offset / sizeof(*pgdir->db_page);
        db->dma     = pgdir->db_dma  + offset;
 
+       db->db[0] = 0;
+       db->db[1] = 0;
+
        return 0;
 }
 
index a2853057c779529b0a226e5aa54adfbc10c3a645..e3273faf4568945cb494e6598dbc013e61b11919 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, Mellanox Technologies inc.  All rights reserved.
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
  *
  * This software is available to you under a choice of one of two
  * licenses.  You may choose to be licensed under the terms of the GNU
@@ -125,7 +125,10 @@ static u8 alloc_token(struct mlx5_cmd *cmd)
        u8 token;
 
        spin_lock(&cmd->token_lock);
-       token = cmd->token++ % 255 + 1;
+       cmd->token++;
+       if (cmd->token == 0)
+               cmd->token++;
+       token = cmd->token;
        spin_unlock(&cmd->token_lock);
 
        return token;
@@ -515,10 +518,11 @@ static void cmd_work_handler(struct work_struct *work)
        ent->ts1 = ktime_get_ns();
 
        /* ring doorbell after the descriptor is valid */
+       mlx5_core_dbg(dev, "writing 0x%x to command doorbell\n", 1 << ent->idx);
        wmb();
        iowrite32be(1 << ent->idx, &dev->iseg->cmd_dbell);
-       mlx5_core_dbg(dev, "write 0x%x to command doorbell\n", 1 << ent->idx);
        mmiowb();
+       /* if not in polling don't use ent after this point */
        if (cmd->mode == CMD_MODE_POLLING) {
                poll_timeout(ent);
                /* make sure we read the descriptor after ownership is SW */
@@ -1236,7 +1240,8 @@ static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
                goto out_out;
        }
 
-       err = mlx5_copy_from_msg(out, outb, out_size);
+       if (!callback)
+               err = mlx5_copy_from_msg(out, outb, out_size);
 
 out_out:
        if (!callback)
@@ -1319,6 +1324,45 @@ ex_err:
        return err;
 }
 
+static int alloc_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd)
+{
+       struct device *ddev = &dev->pdev->dev;
+
+       cmd->cmd_alloc_buf = dma_zalloc_coherent(ddev, MLX5_ADAPTER_PAGE_SIZE,
+                                                &cmd->alloc_dma, GFP_KERNEL);
+       if (!cmd->cmd_alloc_buf)
+               return -ENOMEM;
+
+       /* make sure it is aligned to 4K */
+       if (!((uintptr_t)cmd->cmd_alloc_buf & (MLX5_ADAPTER_PAGE_SIZE - 1))) {
+               cmd->cmd_buf = cmd->cmd_alloc_buf;
+               cmd->dma = cmd->alloc_dma;
+               cmd->alloc_size = MLX5_ADAPTER_PAGE_SIZE;
+               return 0;
+       }
+
+       dma_free_coherent(ddev, MLX5_ADAPTER_PAGE_SIZE, cmd->cmd_alloc_buf,
+                         cmd->alloc_dma);
+       cmd->cmd_alloc_buf = dma_zalloc_coherent(ddev,
+                                                2 * MLX5_ADAPTER_PAGE_SIZE - 1,
+                                                &cmd->alloc_dma, GFP_KERNEL);
+       if (!cmd->cmd_alloc_buf)
+               return -ENOMEM;
+
+       cmd->cmd_buf = PTR_ALIGN(cmd->cmd_alloc_buf, MLX5_ADAPTER_PAGE_SIZE);
+       cmd->dma = ALIGN(cmd->alloc_dma, MLX5_ADAPTER_PAGE_SIZE);
+       cmd->alloc_size = 2 * MLX5_ADAPTER_PAGE_SIZE - 1;
+       return 0;
+}
+
+static void free_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd)
+{
+       struct device *ddev = &dev->pdev->dev;
+
+       dma_free_coherent(ddev, cmd->alloc_size, cmd->cmd_alloc_buf,
+                         cmd->alloc_dma);
+}
+
 int mlx5_cmd_init(struct mlx5_core_dev *dev)
 {
        int size = sizeof(struct mlx5_cmd_prot_block);
@@ -1341,17 +1385,9 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev)
        if (!cmd->pool)
                return -ENOMEM;
 
-       cmd->cmd_buf = (void *)__get_free_pages(GFP_ATOMIC, 0);
-       if (!cmd->cmd_buf) {
-               err = -ENOMEM;
+       err = alloc_cmd_page(dev, cmd);
+       if (err)
                goto err_free_pool;
-       }
-       cmd->dma = dma_map_single(&dev->pdev->dev, cmd->cmd_buf, PAGE_SIZE,
-                                 DMA_BIDIRECTIONAL);
-       if (dma_mapping_error(&dev->pdev->dev, cmd->dma)) {
-               err = -ENOMEM;
-               goto err_free;
-       }
 
        cmd_l = ioread32be(&dev->iseg->cmdq_addr_l_sz) & 0xff;
        cmd->log_sz = cmd_l >> 4 & 0xf;
@@ -1360,13 +1396,13 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev)
                dev_err(&dev->pdev->dev, "firmware reports too many outstanding commands %d\n",
                        1 << cmd->log_sz);
                err = -EINVAL;
-               goto err_map;
+               goto err_free_page;
        }
 
        if (cmd->log_sz + cmd->log_stride > MLX5_ADAPTER_PAGE_SHIFT) {
                dev_err(&dev->pdev->dev, "command queue size overflow\n");
                err = -EINVAL;
-               goto err_map;
+               goto err_free_page;
        }
 
        cmd->checksum_disabled = 1;
@@ -1378,7 +1414,7 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev)
                dev_err(&dev->pdev->dev, "driver does not support command interface version. driver %d, firmware %d\n",
                        CMD_IF_REV, cmd->cmdif_rev);
                err = -ENOTSUPP;
-               goto err_map;
+               goto err_free_page;
        }
 
        spin_lock_init(&cmd->alloc_lock);
@@ -1394,7 +1430,7 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev)
        if (cmd_l & 0xfff) {
                dev_err(&dev->pdev->dev, "invalid command queue address\n");
                err = -ENOMEM;
-               goto err_map;
+               goto err_free_page;
        }
 
        iowrite32be(cmd_h, &dev->iseg->cmdq_addr_h);
@@ -1410,7 +1446,7 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev)
        err = create_msg_cache(dev);
        if (err) {
                dev_err(&dev->pdev->dev, "failed to create command cache\n");
-               goto err_map;
+               goto err_free_page;
        }
 
        set_wqname(dev);
@@ -1435,11 +1471,8 @@ err_wq:
 err_cache:
        destroy_msg_cache(dev);
 
-err_map:
-       dma_unmap_single(&dev->pdev->dev, cmd->dma, PAGE_SIZE,
-                        DMA_BIDIRECTIONAL);
-err_free:
-       free_pages((unsigned long)cmd->cmd_buf, 0);
+err_free_page:
+       free_cmd_page(dev, cmd);
 
 err_free_pool:
        pci_pool_destroy(cmd->pool);
@@ -1455,9 +1488,7 @@ void mlx5_cmd_cleanup(struct mlx5_core_dev *dev)
        clean_debug_files(dev);
        destroy_workqueue(cmd->wq);
        destroy_msg_cache(dev);
-       dma_unmap_single(&dev->pdev->dev, cmd->dma, PAGE_SIZE,
-                        DMA_BIDIRECTIONAL);
-       free_pages((unsigned long)cmd->cmd_buf, 0);
+       free_cmd_page(dev, cmd);
        pci_pool_destroy(cmd->pool);
 }
 EXPORT_SYMBOL(mlx5_cmd_cleanup);
index 43c5f48095260b5966d7e7dd7b6bf654bf5249e3..eb0cf81f5f4518a06579a6c52e191b72ad1d0e50 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, Mellanox Technologies inc.  All rights reserved.
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
  *
  * This software is available to you under a choice of one of two
  * licenses.  You may choose to be licensed under the terms of the GNU
index 4878025e231c6b4ebb9f63c72ec3fcdf1402150b..5210d92e6bc7252a4989082e17ac1953490b4af9 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, Mellanox Technologies inc.  All rights reserved.
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
  *
  * This software is available to you under a choice of one of two
  * licenses.  You may choose to be licensed under the terms of the GNU
index da82991239a8cb0df3f74e1a14d36037ed2d9a18..dbf190d9b9adb57c9108a5dcbea8dd80b6e25d5e 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, Mellanox Technologies inc.  All rights reserved.
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
  *
  * This software is available to you under a choice of one of two
  * licenses.  You may choose to be licensed under the terms of the GNU
index 06f9036acd836196424b294ea47ce4dc624cbe09..4b4cda3bcc5fa1eecf99b6ed5265d41cc40dfedc 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, Mellanox Technologies inc.  All rights reserved.
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
  *
  * This software is available to you under a choice of one of two
  * licenses.  You may choose to be licensed under the terms of the GNU
index 3e6670c4a7cd215998bf2302f2573acb8540ffb7..292d76f2a9041105bceb9ae0a792fe4eb05c53d2 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, Mellanox Technologies inc.  All rights reserved.
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
  *
  * This software is available to you under a choice of one of two
  * licenses.  You may choose to be licensed under the terms of the GNU
index fd80ecfa7195195464b8ccd2f0cfc1bc556d96c2..ee1b0b965f34a3f4e29a71c79daf40e47693d67c 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, Mellanox Technologies inc.  All rights reserved.
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
  *
  * This software is available to you under a choice of one of two
  * licenses.  You may choose to be licensed under the terms of the GNU
index 5394a848655876c2d0435ed6f8934363a060c303..28425e5ea91f871670e84721bb865c1725472e80 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, Mellanox Technologies inc.  All rights reserved.
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
  *
  * This software is available to you under a choice of one of two
  * licenses.  You may choose to be licensed under the terms of the GNU
 #include "mlx5_core.h"
 
 #define DRIVER_NAME "mlx5_core"
-#define DRIVER_VERSION "2.2-1"
-#define DRIVER_RELDATE "Feb 2014"
+#define DRIVER_VERSION "3.0"
+#define DRIVER_RELDATE  "January 2015"
 
 MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>");
-MODULE_DESCRIPTION("Mellanox ConnectX-IB HCA core library");
+MODULE_DESCRIPTION("Mellanox Connect-IB, ConnectX-4 core driver");
 MODULE_LICENSE("Dual BSD/GPL");
 MODULE_VERSION(DRIVER_VERSION);
 
@@ -288,8 +288,6 @@ static void copy_rw_fields(void *to, struct mlx5_caps *from)
        MLX5_SET(cmd_hca_cap, to, log_max_ra_req_qp, from->gen.log_max_ra_req_qp);
        MLX5_SET(cmd_hca_cap, to, log_max_ra_res_qp, from->gen.log_max_ra_res_qp);
        MLX5_SET(cmd_hca_cap, to, pkey_table_size, from->gen.pkey_table_size);
-       MLX5_SET(cmd_hca_cap, to, log_max_ra_req_dc, from->gen.log_max_ra_req_dc);
-       MLX5_SET(cmd_hca_cap, to, log_max_ra_res_dc, from->gen.log_max_ra_res_dc);
        MLX5_SET(cmd_hca_cap, to, pkey_table_size, to_fw_pkey_sz(from->gen.pkey_table_size));
        MLX5_SET(cmd_hca_cap, to, log_uar_page_sz, PAGE_SHIFT - 12);
        v64 = from->gen.flags & MLX5_CAP_BITS_RW_MASK;
@@ -509,6 +507,87 @@ static int mlx5_core_disable_hca(struct mlx5_core_dev *dev)
        return 0;
 }
 
+int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn, int *irqn)
+{
+       struct mlx5_eq_table *table = &dev->priv.eq_table;
+       struct mlx5_eq *eq, *n;
+       int err = -ENOENT;
+
+       spin_lock(&table->lock);
+       list_for_each_entry_safe(eq, n, &table->comp_eqs_list, list) {
+               if (eq->index == vector) {
+                       *eqn = eq->eqn;
+                       *irqn = eq->irqn;
+                       err = 0;
+                       break;
+               }
+       }
+       spin_unlock(&table->lock);
+
+       return err;
+}
+EXPORT_SYMBOL(mlx5_vector2eqn);
+
+static void free_comp_eqs(struct mlx5_core_dev *dev)
+{
+       struct mlx5_eq_table *table = &dev->priv.eq_table;
+       struct mlx5_eq *eq, *n;
+
+       spin_lock(&table->lock);
+       list_for_each_entry_safe(eq, n, &table->comp_eqs_list, list) {
+               list_del(&eq->list);
+               spin_unlock(&table->lock);
+               if (mlx5_destroy_unmap_eq(dev, eq))
+                       mlx5_core_warn(dev, "failed to destroy EQ 0x%x\n",
+                                      eq->eqn);
+               kfree(eq);
+               spin_lock(&table->lock);
+       }
+       spin_unlock(&table->lock);
+}
+
+static int alloc_comp_eqs(struct mlx5_core_dev *dev)
+{
+       struct mlx5_eq_table *table = &dev->priv.eq_table;
+       char name[MLX5_MAX_EQ_NAME];
+       struct mlx5_eq *eq;
+       int ncomp_vec;
+       int nent;
+       int err;
+       int i;
+
+       INIT_LIST_HEAD(&table->comp_eqs_list);
+       ncomp_vec = table->num_comp_vectors;
+       nent = MLX5_COMP_EQ_SIZE;
+       for (i = 0; i < ncomp_vec; i++) {
+               eq = kzalloc(sizeof(*eq), GFP_KERNEL);
+               if (!eq) {
+                       err = -ENOMEM;
+                       goto clean;
+               }
+
+               snprintf(name, MLX5_MAX_EQ_NAME, "mlx5_comp%d", i);
+               err = mlx5_create_map_eq(dev, eq,
+                                        i + MLX5_EQ_VEC_COMP_BASE, nent, 0,
+                                        name, &dev->priv.uuari.uars[0]);
+               if (err) {
+                       kfree(eq);
+                       goto clean;
+               }
+               mlx5_core_dbg(dev, "allocated completion EQN %d\n", eq->eqn);
+               eq->index = i;
+               spin_lock(&table->lock);
+               list_add_tail(&eq->list, &table->comp_eqs_list);
+               spin_unlock(&table->lock);
+       }
+
+       return 0;
+
+clean:
+       free_comp_eqs(dev);
+       return err;
+}
+
 static int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev)
 {
        struct mlx5_priv *priv = &dev->priv;
@@ -645,6 +724,12 @@ static int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev)
                goto err_free_uar;
        }
 
+       err = alloc_comp_eqs(dev);
+       if (err) {
+               dev_err(&pdev->dev, "Failed to alloc completion EQs\n");
+               goto err_stop_eqs;
+       }
+
        MLX5_INIT_DOORBELL_LOCK(&priv->cq_uar_lock);
 
        mlx5_init_cq_table(dev);
@@ -654,6 +739,9 @@ static int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev)
 
        return 0;
 
+err_stop_eqs:
+       mlx5_stop_eqs(dev);
+
 err_free_uar:
        mlx5_free_uuars(dev, &priv->uuari);
 
@@ -697,7 +785,6 @@ err_dbg:
        debugfs_remove(priv->dbg_root);
        return err;
 }
-EXPORT_SYMBOL(mlx5_dev_init);
 
 static void mlx5_dev_cleanup(struct mlx5_core_dev *dev)
 {
@@ -706,6 +793,7 @@ static void mlx5_dev_cleanup(struct mlx5_core_dev *dev)
        mlx5_cleanup_srq_table(dev);
        mlx5_cleanup_qp_table(dev);
        mlx5_cleanup_cq_table(dev);
+       free_comp_eqs(dev);
        mlx5_stop_eqs(dev);
        mlx5_free_uuars(dev, &priv->uuari);
        mlx5_eq_cleanup(dev);
@@ -820,6 +908,28 @@ void mlx5_unregister_interface(struct mlx5_interface *intf)
 }
 EXPORT_SYMBOL(mlx5_unregister_interface);
 
+void *mlx5_get_protocol_dev(struct mlx5_core_dev *mdev, int protocol)
+{
+       struct mlx5_priv *priv = &mdev->priv;
+       struct mlx5_device_context *dev_ctx;
+       unsigned long flags;
+       void *result = NULL;
+
+       spin_lock_irqsave(&priv->ctx_lock, flags);
+
+       list_for_each_entry(dev_ctx, &mdev->priv.ctx_list, list)
+               if ((dev_ctx->intf->protocol == protocol) &&
+                   dev_ctx->intf->get_dev) {
+                       result = dev_ctx->intf->get_dev(dev_ctx->context);
+                       break;
+               }
+
+       spin_unlock_irqrestore(&priv->ctx_lock, flags);
+
+       return result;
+}
+EXPORT_SYMBOL(mlx5_get_protocol_dev);
+
 static void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event,
                            unsigned long param)
 {
index 44837640bd7ca45c1380d6d3fe1f4bc9ecceee51..d79fd85d1dd50c6e991eb9659d426839e4e013a8 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, Mellanox Technologies inc.  All rights reserved.
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
  *
  * This software is available to you under a choice of one of two
  * licenses.  You may choose to be licensed under the terms of the GNU
index f0c9f9a7a36142f1a7fded7a88120e1cff213aaa..a051b906afdf1a3fb8059c9567fe8378b4818cf9 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, Mellanox Technologies inc.  All rights reserved.
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
  *
  * This software is available to you under a choice of one of two
  * licenses.  You may choose to be licensed under the terms of the GNU
index 184c3615f4799bbda0adc0da0e265cacc6ae8bd6..1adb300dd850691eaafbcf5c5f68212090e585cd 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, Mellanox Technologies inc.  All rights reserved.
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
  *
  * This software is available to you under a choice of one of two
  * licenses.  You may choose to be licensed under the terms of the GNU
@@ -141,7 +141,7 @@ EXPORT_SYMBOL(mlx5_core_destroy_mkey);
 int mlx5_core_query_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr,
                         struct mlx5_query_mkey_mbox_out *out, int outlen)
 {
-       struct mlx5_destroy_mkey_mbox_in in;
+       struct mlx5_query_mkey_mbox_in in;
        int err;
 
        memset(&in, 0, sizeof(in));
index 4fdaae9b54d99f56f21a78706776805303ba5ed9..df2238372ea73a0d71b39450cd816810bbcdd1ad 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, Mellanox Technologies inc.  All rights reserved.
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
  *
  * This software is available to you under a choice of one of two
  * licenses.  You may choose to be licensed under the terms of the GNU
@@ -243,8 +243,9 @@ static int alloc_system_page(struct mlx5_core_dev *dev, u16 func_id)
        struct page *page;
        u64 addr;
        int err;
+       int nid = dev_to_node(&dev->pdev->dev);
 
-       page = alloc_page(GFP_HIGHUSER);
+       page = alloc_pages_node(nid, GFP_HIGHUSER, 0);
        if (!page) {
                mlx5_core_warn(dev, "failed to allocate page\n");
                return -ENOMEM;
index 790da5c4ca4f4ab0c5b47d036943afdd9d9aafbf..f2d3aee909e8be2051c8d7f8207c50856b93e394 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, Mellanox Technologies inc.  All rights reserved.
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
  *
  * This software is available to you under a choice of one of two
  * licenses.  You may choose to be licensed under the terms of the GNU
index 72c2d002c3b8f65e78b89733c87c44d3ba702089..49e90f2612d8c0b8803e635acfb4c92b51044c8a 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, Mellanox Technologies inc.  All rights reserved.
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
  *
  * This software is available to you under a choice of one of two
  * licenses.  You may choose to be licensed under the terms of the GNU
index 575d853dbe05d1e9e8e3255761a673ff72dc5c30..dc7dbf7e9d98f28d83d55b6632ec44fe4275d9ce 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, Mellanox Technologies inc.  All rights reserved.
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
  *
  * This software is available to you under a choice of one of two
  * licenses.  You may choose to be licensed under the terms of the GNU
index 38bce93f8314f202145277ed749d09f07f352dfb..f9d25dcd03c1e2616be6434cae3a9146d1e83df5 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, Mellanox Technologies inc.  All rights reserved.
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
  *
  * This software is available to you under a choice of one of two
  * licenses.  You may choose to be licensed under the terms of the GNU
index 06801d6f595ef99f0d18cc91d37dcac62be445db..5a89bb1d678a8e5ae6002a6ec9122bbd97d19085 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, Mellanox Technologies inc.  All rights reserved.
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
  *
  * This software is available to you under a choice of one of two
  * licenses.  You may choose to be licensed under the terms of the GNU
index 10988fbf47ebbeffd17cd8d1fb4d6a1c77c29405..6f332ebdf3b5a812dd34be59a072e497af380975 100644 (file)
@@ -4144,7 +4144,7 @@ static int hw_del_addr(struct ksz_hw *hw, u8 *mac_addr)
 
        for (i = 0; i < hw->addr_list_size; i++) {
                if (ether_addr_equal(hw->address[i], mac_addr)) {
-                       memset(hw->address[i], 0, ETH_ALEN);
+                       eth_zero_addr(hw->address[i]);
                        writel(0, hw->io + ADD_ADDR_INCR * i +
                                KS_ADD_ADDR_0_HI);
                        return 0;
index 6c72e74fef3e01a7189a81e2e1fa2355a4f2bdbd..81d0f1c86d6dee1243d5d65a7e499767698844eb 100644 (file)
@@ -150,7 +150,7 @@ static void moxart_mac_setup_desc_ring(struct net_device *ndev)
 
        priv->rx_head = 0;
 
-       /* reset the MAC controler TX/RX desciptor base address */
+       /* reset the MAC controller TX/RX desciptor base address */
        writel(priv->tx_base, priv->base + REG_TXR_BASE_ADDRESS);
        writel(priv->rx_base, priv->base + REG_RXR_BASE_ADDRESS);
 }
index a4cdf2f8041a735de3a199d5e0ba6c5040a02b4e..092dcae0d4a969523e7cd99dfd3380398c3e7c8c 100644 (file)
@@ -1343,7 +1343,7 @@ static int init_nic(struct s2io_nic *nic)
                TX_PA_CFG_IGNORE_L2_ERR;
        writeq(val64, &bar0->tx_pa_cfg);
 
-       /* Rx DMA intialization. */
+       /* Rx DMA initialization. */
        val64 = 0;
        for (i = 0; i < config->rx_ring_num; i++) {
                struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
index b07d552a27d4d486079b329b238235411a103e4e..be916eb2f2e7304dbbfa35d8df22b618298ca3bc 100644 (file)
 
 #include "vxge-ethtool.h"
 
+static const char ethtool_driver_stats_keys[][ETH_GSTRING_LEN] = {
+       {"\n DRIVER STATISTICS"},
+       {"vpaths_opened"},
+       {"vpath_open_fail_cnt"},
+       {"link_up_cnt"},
+       {"link_down_cnt"},
+       {"tx_frms"},
+       {"tx_errors"},
+       {"tx_bytes"},
+       {"txd_not_free"},
+       {"txd_out_of_desc"},
+       {"rx_frms"},
+       {"rx_errors"},
+       {"rx_bytes"},
+       {"rx_mcast"},
+       {"pci_map_fail_cnt"},
+       {"skb_alloc_fail_cnt"}
+};
+
 /**
  * vxge_ethtool_sset - Sets different link parameters.
  * @dev: device pointer.
index 6cf3044d7f438283d6f6455aba047322a151544d..065a2c0429a404aabfc0f8205c2ec7c13ca26d84 100644 (file)
 /* Ethtool related variables and Macros. */
 static int vxge_ethtool_get_sset_count(struct net_device *dev, int sset);
 
-static char ethtool_driver_stats_keys[][ETH_GSTRING_LEN] = {
-       {"\n DRIVER STATISTICS"},
-       {"vpaths_opened"},
-       {"vpath_open_fail_cnt"},
-       {"link_up_cnt"},
-       {"link_down_cnt"},
-       {"tx_frms"},
-       {"tx_errors"},
-       {"tx_bytes"},
-       {"txd_not_free"},
-       {"txd_out_of_desc"},
-       {"rx_frms"},
-       {"rx_errors"},
-       {"rx_bytes"},
-       {"rx_mcast"},
-       {"pci_map_fail_cnt"},
-       {"skb_alloc_fail_cnt"}
-};
-
 #define VXGE_TITLE_LEN                 5
 #define VXGE_HW_VPATH_STATS_LEN        27
 #define VXGE_HW_AGGR_STATS_LEN         13
index d36599f47af5bb0ac2978b6f4db5575d0d6ebac8..7bf9c028d8d7fea824859142d81307d87e056fdd 100644 (file)
@@ -1557,7 +1557,7 @@ static int octeon_mgmt_remove(struct platform_device *pdev)
        return 0;
 }
 
-static struct of_device_id octeon_mgmt_match[] = {
+static const struct of_device_id octeon_mgmt_match[] = {
        {
                .compatible = "cavium,octeon-5750-mix",
        },
index 4fe8ea96bd25d24f1f2296d22230ceebda1e8f64..f6fcf7450352631ad34f7c052fbe5ac960238297 100644 (file)
@@ -394,7 +394,7 @@ static void pch_gbe_get_pauseparam(struct net_device *netdev,
 }
 
 /**
- * pch_gbe_set_pauseparam - Set pause paramters
+ * pch_gbe_set_pauseparam - Set pause parameters
  * @netdev:  Network interface device structure
  * @pause:   Pause parameters structure
  * Returns:
index 319d9d40f922e4616945de4ad46927d707668607..13d88a6025c82a89fb290bc6930cd4055bd1535b 100644 (file)
@@ -350,7 +350,7 @@ V.  Recent Changes
     incorrectly defined and corrected (as per Michel Mueller).
 
 02/23/1999 EPK Corrected the Tx full check to check that at least 4 slots
-    were available before reseting the tbusy and tx_full flags
+    were available before resetting the tbusy and tx_full flags
     (as per Michel Mueller).
 
 03/11/1999 EPK Added Pete Wyckoff's hardware checksumming support.
index 716fc37ada5a961677b577b0c693427ff8b32ee6..db80eb1c6d4fc5ebccea52aa86e87a7578ead04f 100644 (file)
@@ -537,7 +537,7 @@ static void netxen_p2_nic_set_multi(struct net_device *netdev)
        u8 null_addr[ETH_ALEN];
        int i;
 
-       memset(null_addr, 0, ETH_ALEN);
+       eth_zero_addr(null_addr);
 
        if (netdev->flags & IFF_PROMISC) {
 
index f3346a3779d3c36c2f91d4703feb75dc500e39df..69f828eb42cf3762f525ee492d0abe9d5d33d1e7 100644 (file)
@@ -205,7 +205,7 @@ struct qlcnic_add_rings_mbx_out {
  * @phys_addr_{low|high}: DMA address of the transmit buffer
  * @cnsmr_index_{low|high}: host consumer index
  * @size: legth of transmit buffer ring
- * @intr_id: interrput id
+ * @intr_id: interrupt id
  * @src: src of interrupt
  */
 struct qlcnic_tx_mbx {
index 2bb48d57e7a51856225b7cbb7fa7bb0ebca7e510..33669c29b341cb42bb106ec2c634663d0adb2415 100644 (file)
@@ -269,7 +269,7 @@ static int qlcnic_83xx_idc_clear_registers(struct qlcnic_adapter *adapter,
        }
 
        QLCWRX(adapter->ahw, QLC_83XX_IDC_DRV_ACK, 0);
-       /* Clear gracefull reset bit */
+       /* Clear graceful reset bit */
        val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL);
        val &= ~QLC_83XX_IDC_GRACEFULL_RESET;
        QLCWRX(adapter->ahw, QLC_83XX_IDC_CTRL, val);
@@ -889,7 +889,7 @@ static int qlcnic_83xx_idc_ready_state(struct qlcnic_adapter *adapter)
  * @adapter: adapter structure
  *
  * Device will remain in this state until:
- *     Reset request ACK's are recieved from all the functions
+ *     Reset request ACK's are received from all the functions
  *     Wait time exceeds max time limit
  *
  * Returns: Error code or Success(0)
index a430a34a4434aa78a87cb43d90c805fa83ac8252..367f3976df5690d71ba845214423296913aa07dd 100644 (file)
@@ -507,6 +507,7 @@ static netdev_features_t qlcnic_features_check(struct sk_buff *skb,
                                               struct net_device *dev,
                                               netdev_features_t features)
 {
+       features = vlan_features_check(skb, features);
        return vxlan_features_check(skb, features);
 }
 #endif
index 8011ef3e7707f783f4caf9f1c16f3d7be3410c4f..25800a1dedcb9fbe0635e80386521dc789575fba 100644 (file)
@@ -460,7 +460,7 @@ static int ql_set_mac_addr(struct ql_adapter *qdev, int set)
                netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
                             "Set Mac addr %pM\n", addr);
        } else {
-               memset(zero_mac_addr, 0, ETH_ALEN);
+               eth_zero_addr(zero_mac_addr);
                addr = &zero_mac_addr[0];
                netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
                             "Clearing MAC address\n");
index 2c811f66d5acc47da86407222c4eea21d14954d0..4a42e960d331e66ea525a3d7c5755ce29b9e6f6f 100644 (file)
@@ -571,7 +571,7 @@ qcaspi_spi_thread(void *data)
                        }
 
                        /* can only handle other interrupts
-                        * if sync has occured
+                        * if sync has occurred
                         */
                        if (qca->sync == QCASPI_SYNC_READY) {
                                if (intr_cause & SPI_INT_PKT_AVLBL)
index 736d5d1624a142e902d6023cf3ee801c5169fa14..7fb244f565b283b0c130caa7476a8e4339564283 100644 (file)
                NETIF_MSG_RX_ERR| \
                NETIF_MSG_TX_ERR)
 
+#define SH_ETH_OFFSET_DEFAULTS                 \
+       [0 ... SH_ETH_MAX_REGISTER_OFFSET - 1] = SH_ETH_OFFSET_INVALID
+
 static const u16 sh_eth_offset_gigabit[SH_ETH_MAX_REGISTER_OFFSET] = {
+       SH_ETH_OFFSET_DEFAULTS,
+
        [EDSR]          = 0x0000,
        [EDMR]          = 0x0400,
        [EDTRR]         = 0x0408,
@@ -132,9 +137,6 @@ static const u16 sh_eth_offset_gigabit[SH_ETH_MAX_REGISTER_OFFSET] = {
        [TSU_POST3]     = 0x0078,
        [TSU_POST4]     = 0x007c,
        [TSU_ADRH0]     = 0x0100,
-       [TSU_ADRL0]     = 0x0104,
-       [TSU_ADRH31]    = 0x01f8,
-       [TSU_ADRL31]    = 0x01fc,
 
        [TXNLCR0]       = 0x0080,
        [TXALCR0]       = 0x0084,
@@ -151,6 +153,8 @@ static const u16 sh_eth_offset_gigabit[SH_ETH_MAX_REGISTER_OFFSET] = {
 };
 
 static const u16 sh_eth_offset_fast_rz[SH_ETH_MAX_REGISTER_OFFSET] = {
+       SH_ETH_OFFSET_DEFAULTS,
+
        [EDSR]          = 0x0000,
        [EDMR]          = 0x0400,
        [EDTRR]         = 0x0408,
@@ -199,9 +203,6 @@ static const u16 sh_eth_offset_fast_rz[SH_ETH_MAX_REGISTER_OFFSET] = {
        [TSU_ADSBSY]    = 0x0060,
        [TSU_TEN]       = 0x0064,
        [TSU_ADRH0]     = 0x0100,
-       [TSU_ADRL0]     = 0x0104,
-       [TSU_ADRH31]    = 0x01f8,
-       [TSU_ADRL31]    = 0x01fc,
 
        [TXNLCR0]       = 0x0080,
        [TXALCR0]       = 0x0084,
@@ -210,6 +211,8 @@ static const u16 sh_eth_offset_fast_rz[SH_ETH_MAX_REGISTER_OFFSET] = {
 };
 
 static const u16 sh_eth_offset_fast_rcar[SH_ETH_MAX_REGISTER_OFFSET] = {
+       SH_ETH_OFFSET_DEFAULTS,
+
        [ECMR]          = 0x0300,
        [RFLR]          = 0x0308,
        [ECSR]          = 0x0310,
@@ -256,6 +259,8 @@ static const u16 sh_eth_offset_fast_rcar[SH_ETH_MAX_REGISTER_OFFSET] = {
 };
 
 static const u16 sh_eth_offset_fast_sh4[SH_ETH_MAX_REGISTER_OFFSET] = {
+       SH_ETH_OFFSET_DEFAULTS,
+
        [ECMR]          = 0x0100,
        [RFLR]          = 0x0108,
        [ECSR]          = 0x0110,
@@ -308,6 +313,8 @@ static const u16 sh_eth_offset_fast_sh4[SH_ETH_MAX_REGISTER_OFFSET] = {
 };
 
 static const u16 sh_eth_offset_fast_sh3_sh2[SH_ETH_MAX_REGISTER_OFFSET] = {
+       SH_ETH_OFFSET_DEFAULTS,
+
        [EDMR]          = 0x0000,
        [EDTRR]         = 0x0004,
        [EDRRR]         = 0x0008,
@@ -392,8 +399,6 @@ static const u16 sh_eth_offset_fast_sh3_sh2[SH_ETH_MAX_REGISTER_OFFSET] = {
        [FWALCR1]       = 0x00b4,
 
        [TSU_ADRH0]     = 0x0100,
-       [TSU_ADRL0]     = 0x0104,
-       [TSU_ADRL31]    = 0x01fc,
 };
 
 static void sh_eth_rcv_snd_disable(struct net_device *ndev);
@@ -588,6 +593,7 @@ static struct sh_eth_cpu_data sh7757_data = {
        .no_ade         = 1,
        .rpadir         = 1,
        .rpadir_value   = 2 << 16,
+       .rtrate         = 1,
 };
 
 #define SH_GIGA_ETH_BASE       0xfee00000UL
@@ -1411,6 +1417,9 @@ static int sh_eth_txfree(struct net_device *ndev)
                        break;
                /* TACT bit must be checked before all the following reads */
                rmb();
+               netif_info(mdp, tx_done, ndev,
+                          "tx entry %d status 0x%08x\n",
+                          entry, edmac_to_cpu(mdp, txdesc->status));
                /* Free the original skb. */
                if (mdp->tx_skbuff[entry]) {
                        dma_unmap_single(&ndev->dev, txdesc->addr,
@@ -1456,6 +1465,10 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
                if (--boguscnt < 0)
                        break;
 
+               netif_info(mdp, rx_status, ndev,
+                          "rx entry %d status 0x%08x len %d\n",
+                          entry, desc_status, pkt_len);
+
                if (!(desc_status & RDFEND))
                        ndev->stats.rx_length_errors++;
 
@@ -1500,6 +1513,8 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
                        netif_receive_skb(skb);
                        ndev->stats.rx_packets++;
                        ndev->stats.rx_bytes += pkt_len;
+                       if (desc_status & RD_RFS8)
+                               ndev->stats.multicast++;
                }
                entry = (++mdp->cur_rx) % mdp->num_rx_ring;
                rxdesc = &mdp->rx_ring[entry];
@@ -1542,7 +1557,8 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
        /* If we don't need to check status, don't. -KDU */
        if (!(sh_eth_read(ndev, EDRRR) & EDRRR_R)) {
                /* fix the values for the next receiving if RDE is set */
-               if (intr_status & EESR_RDE && mdp->reg_offset[RDFAR] != 0) {
+               if (intr_status & EESR_RDE &&
+                   mdp->reg_offset[RDFAR] != SH_ETH_OFFSET_INVALID) {
                        u32 count = (sh_eth_read(ndev, RDFAR) -
                                     sh_eth_read(ndev, RDLAR)) >> 4;
 
@@ -1929,6 +1945,192 @@ error_exit:
        return ret;
 }
 
+/* If it is ever necessary to increase SH_ETH_REG_DUMP_MAX_REGS, the
+ * version must be bumped as well.  Just adding registers up to that
+ * limit is fine, as long as the existing register indices don't
+ * change.
+ */
+#define SH_ETH_REG_DUMP_VERSION                1
+#define SH_ETH_REG_DUMP_MAX_REGS       256
+
+static size_t __sh_eth_get_regs(struct net_device *ndev, u32 *buf)
+{
+       struct sh_eth_private *mdp = netdev_priv(ndev);
+       struct sh_eth_cpu_data *cd = mdp->cd;
+       u32 *valid_map;
+       size_t len;
+
+       BUILD_BUG_ON(SH_ETH_MAX_REGISTER_OFFSET > SH_ETH_REG_DUMP_MAX_REGS);
+
+       /* Dump starts with a bitmap that tells ethtool which
+        * registers are defined for this chip.
+        */
+       len = DIV_ROUND_UP(SH_ETH_REG_DUMP_MAX_REGS, 32);
+       if (buf) {
+               valid_map = buf;
+               buf += len;
+       } else {
+               valid_map = NULL;
+       }
+
+       /* Add a register to the dump, if it has a defined offset.
+        * This automatically skips most undefined registers, but for
+        * some it is also necessary to check a capability flag in
+        * struct sh_eth_cpu_data.
+        */
+#define mark_reg_valid(reg) valid_map[reg / 32] |= 1U << (reg % 32)
+#define add_reg_from(reg, read_expr) do {                              \
+               if (mdp->reg_offset[reg] != SH_ETH_OFFSET_INVALID) {    \
+                       if (buf) {                                      \
+                               mark_reg_valid(reg);                    \
+                               *buf++ = read_expr;                     \
+                       }                                               \
+                       ++len;                                          \
+               }                                                       \
+       } while (0)
+#define add_reg(reg) add_reg_from(reg, sh_eth_read(ndev, reg))
+#define add_tsu_reg(reg) add_reg_from(reg, sh_eth_tsu_read(mdp, reg))
+
+       add_reg(EDSR);
+       add_reg(EDMR);
+       add_reg(EDTRR);
+       add_reg(EDRRR);
+       add_reg(EESR);
+       add_reg(EESIPR);
+       add_reg(TDLAR);
+       add_reg(TDFAR);
+       add_reg(TDFXR);
+       add_reg(TDFFR);
+       add_reg(RDLAR);
+       add_reg(RDFAR);
+       add_reg(RDFXR);
+       add_reg(RDFFR);
+       add_reg(TRSCER);
+       add_reg(RMFCR);
+       add_reg(TFTR);
+       add_reg(FDR);
+       add_reg(RMCR);
+       add_reg(TFUCR);
+       add_reg(RFOCR);
+       if (cd->rmiimode)
+               add_reg(RMIIMODE);
+       add_reg(FCFTR);
+       if (cd->rpadir)
+               add_reg(RPADIR);
+       if (!cd->no_trimd)
+               add_reg(TRIMD);
+       add_reg(ECMR);
+       add_reg(ECSR);
+       add_reg(ECSIPR);
+       add_reg(PIR);
+       if (!cd->no_psr)
+               add_reg(PSR);
+       add_reg(RDMLR);
+       add_reg(RFLR);
+       add_reg(IPGR);
+       if (cd->apr)
+               add_reg(APR);
+       if (cd->mpr)
+               add_reg(MPR);
+       add_reg(RFCR);
+       add_reg(RFCF);
+       if (cd->tpauser)
+               add_reg(TPAUSER);
+       add_reg(TPAUSECR);
+       add_reg(GECMR);
+       if (cd->bculr)
+               add_reg(BCULR);
+       add_reg(MAHR);
+       add_reg(MALR);
+       add_reg(TROCR);
+       add_reg(CDCR);
+       add_reg(LCCR);
+       add_reg(CNDCR);
+       add_reg(CEFCR);
+       add_reg(FRECR);
+       add_reg(TSFRCR);
+       add_reg(TLFRCR);
+       add_reg(CERCR);
+       add_reg(CEECR);
+       add_reg(MAFCR);
+       if (cd->rtrate)
+               add_reg(RTRATE);
+       if (cd->hw_crc)
+               add_reg(CSMR);
+       if (cd->select_mii)
+               add_reg(RMII_MII);
+       add_reg(ARSTR);
+       if (cd->tsu) {
+               add_tsu_reg(TSU_CTRST);
+               add_tsu_reg(TSU_FWEN0);
+               add_tsu_reg(TSU_FWEN1);
+               add_tsu_reg(TSU_FCM);
+               add_tsu_reg(TSU_BSYSL0);
+               add_tsu_reg(TSU_BSYSL1);
+               add_tsu_reg(TSU_PRISL0);
+               add_tsu_reg(TSU_PRISL1);
+               add_tsu_reg(TSU_FWSL0);
+               add_tsu_reg(TSU_FWSL1);
+               add_tsu_reg(TSU_FWSLC);
+               add_tsu_reg(TSU_QTAG0);
+               add_tsu_reg(TSU_QTAG1);
+               add_tsu_reg(TSU_QTAGM0);
+               add_tsu_reg(TSU_QTAGM1);
+               add_tsu_reg(TSU_FWSR);
+               add_tsu_reg(TSU_FWINMK);
+               add_tsu_reg(TSU_ADQT0);
+               add_tsu_reg(TSU_ADQT1);
+               add_tsu_reg(TSU_VTAG0);
+               add_tsu_reg(TSU_VTAG1);
+               add_tsu_reg(TSU_ADSBSY);
+               add_tsu_reg(TSU_TEN);
+               add_tsu_reg(TSU_POST1);
+               add_tsu_reg(TSU_POST2);
+               add_tsu_reg(TSU_POST3);
+               add_tsu_reg(TSU_POST4);
+               if (mdp->reg_offset[TSU_ADRH0] != SH_ETH_OFFSET_INVALID) {
+                       /* This is the start of a table, not just a single
+                        * register.
+                        */
+                       if (buf) {
+                               unsigned int i;
+
+                               mark_reg_valid(TSU_ADRH0);
+                               for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES * 2; i++)
+                                       *buf++ = ioread32(
+                                               mdp->tsu_addr +
+                                               mdp->reg_offset[TSU_ADRH0] +
+                                               i * 4);
+                       }
+                       len += SH_ETH_TSU_CAM_ENTRIES * 2;
+               }
+       }
+
+#undef mark_reg_valid
+#undef add_reg_from
+#undef add_reg
+#undef add_tsu_reg
+
+       return len * 4;
+}
+
+static int sh_eth_get_regs_len(struct net_device *ndev)
+{
+       return __sh_eth_get_regs(ndev, NULL);
+}
+
+static void sh_eth_get_regs(struct net_device *ndev, struct ethtool_regs *regs,
+                           void *buf)
+{
+       struct sh_eth_private *mdp = netdev_priv(ndev);
+
+       regs->version = SH_ETH_REG_DUMP_VERSION;
+
+       pm_runtime_get_sync(&mdp->pdev->dev);
+       __sh_eth_get_regs(ndev, buf);
+       pm_runtime_put_sync(&mdp->pdev->dev);
+}
+
 static int sh_eth_nway_reset(struct net_device *ndev)
 {
        struct sh_eth_private *mdp = netdev_priv(ndev);
@@ -2074,6 +2276,8 @@ static int sh_eth_set_ringparam(struct net_device *ndev,
 static const struct ethtool_ops sh_eth_ethtool_ops = {
        .get_settings   = sh_eth_get_settings,
        .set_settings   = sh_eth_set_settings,
+       .get_regs_len   = sh_eth_get_regs_len,
+       .get_regs       = sh_eth_get_regs,
        .nway_reset     = sh_eth_nway_reset,
        .get_msglevel   = sh_eth_get_msglevel,
        .set_msglevel   = sh_eth_set_msglevel,
@@ -2213,6 +2417,22 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
        return NETDEV_TX_OK;
 }
 
+/* The statistics registers have write-clear behaviour, which means we
+ * will lose any increment between the read and write.  We mitigate
+ * this by only clearing when we read a non-zero value, so we will
+ * never falsely report a total of zero.
+ */
+static void
+sh_eth_update_stat(struct net_device *ndev, unsigned long *stat, int reg)
+{
+       u32 delta = sh_eth_read(ndev, reg);
+
+       if (delta) {
+               *stat += delta;
+               sh_eth_write(ndev, 0, reg);
+       }
+}
+
 static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev)
 {
        struct sh_eth_private *mdp = netdev_priv(ndev);
@@ -2223,21 +2443,18 @@ static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev)
        if (!mdp->is_opened)
                return &ndev->stats;
 
-       ndev->stats.tx_dropped += sh_eth_read(ndev, TROCR);
-       sh_eth_write(ndev, 0, TROCR);   /* (write clear) */
-       ndev->stats.collisions += sh_eth_read(ndev, CDCR);
-       sh_eth_write(ndev, 0, CDCR);    /* (write clear) */
-       ndev->stats.tx_carrier_errors += sh_eth_read(ndev, LCCR);
-       sh_eth_write(ndev, 0, LCCR);    /* (write clear) */
+       sh_eth_update_stat(ndev, &ndev->stats.tx_dropped, TROCR);
+       sh_eth_update_stat(ndev, &ndev->stats.collisions, CDCR);
+       sh_eth_update_stat(ndev, &ndev->stats.tx_carrier_errors, LCCR);
 
        if (sh_eth_is_gether(mdp)) {
-               ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CERCR);
-               sh_eth_write(ndev, 0, CERCR);   /* (write clear) */
-               ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CEECR);
-               sh_eth_write(ndev, 0, CEECR);   /* (write clear) */
+               sh_eth_update_stat(ndev, &ndev->stats.tx_carrier_errors,
+                                  CERCR);
+               sh_eth_update_stat(ndev, &ndev->stats.tx_carrier_errors,
+                                  CEECR);
        } else {
-               ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CNDCR);
-               sh_eth_write(ndev, 0, CNDCR);   /* (write clear) */
+               sh_eth_update_stat(ndev, &ndev->stats.tx_carrier_errors,
+                                  CNDCR);
        }
 
        return &ndev->stats;
index 259d03f353e109709abfbaac4a447d9f4af82026..06dbbe5201cbc915cf28401307cdaa5f0d2dc91f 100644 (file)
 #define SH_ETH_TSU_CAM_ENTRIES 32
 
 enum {
+       /* IMPORTANT: To keep ethtool register dump working, add new
+        * register names immediately before SH_ETH_MAX_REGISTER_OFFSET.
+        */
+
        /* E-DMAC registers */
        EDSR = 0,
        EDMR,
@@ -131,9 +135,7 @@ enum {
        TSU_POST3,
        TSU_POST4,
        TSU_ADRH0,
-       TSU_ADRL0,
-       TSU_ADRH31,
-       TSU_ADRL31,
+       /* TSU_ADR{H,L}{0..31} are assumed to be contiguous */
 
        TXNLCR0,
        TXALCR0,
@@ -491,6 +493,7 @@ struct sh_eth_cpu_data {
        unsigned select_mii:1;  /* EtherC have RMII_MII (MII select register) */
        unsigned shift_rd0:1;   /* shift Rx descriptor word 0 right by 16 */
        unsigned rmiimode:1;    /* EtherC has RMIIMODE register */
+       unsigned rtrate:1;      /* EtherC has RTRATE register */
 };
 
 struct sh_eth_private {
@@ -543,19 +546,29 @@ static inline void sh_eth_soft_swap(char *src, int len)
 #endif
 }
 
+#define SH_ETH_OFFSET_INVALID  ((u16) ~0)
+
 static inline void sh_eth_write(struct net_device *ndev, u32 data,
                                int enum_index)
 {
        struct sh_eth_private *mdp = netdev_priv(ndev);
+       u16 offset = mdp->reg_offset[enum_index];
 
-       iowrite32(data, mdp->addr + mdp->reg_offset[enum_index]);
+       if (WARN_ON(offset == SH_ETH_OFFSET_INVALID))
+               return;
+
+       iowrite32(data, mdp->addr + offset);
 }
 
 static inline u32 sh_eth_read(struct net_device *ndev, int enum_index)
 {
        struct sh_eth_private *mdp = netdev_priv(ndev);
+       u16 offset = mdp->reg_offset[enum_index];
+
+       if (WARN_ON(offset == SH_ETH_OFFSET_INVALID))
+               return ~0U;
 
-       return ioread32(mdp->addr + mdp->reg_offset[enum_index]);
+       return ioread32(mdp->addr + offset);
 }
 
 static inline void *sh_eth_tsu_get_offset(struct sh_eth_private *mdp,
index 5cecec282aba8b471b60b91af594ba6374edcda7..a87b177bd7234a2610b7183a0a221241575a35b1 100644 (file)
 #include <linux/if_vlan.h>
 #include <linux/if_bridge.h>
 #include <linux/bitops.h>
+#include <linux/ctype.h>
 #include <net/switchdev.h>
 #include <net/rtnetlink.h>
+#include <net/ip_fib.h>
+#include <net/netevent.h>
+#include <net/arp.h>
 #include <asm-generic/io-64-nonatomic-lo-hi.h>
 #include <generated/utsrelease.h>
 
@@ -49,12 +53,12 @@ struct rocker_flow_tbl_key {
        enum rocker_of_dpa_table_id tbl_id;
        union {
                struct {
-                       u32 in_lport;
-                       u32 in_lport_mask;
+                       u32 in_pport;
+                       u32 in_pport_mask;
                        enum rocker_of_dpa_table_id goto_tbl;
                } ig_port;
                struct {
-                       u32 in_lport;
+                       u32 in_pport;
                        __be16 vlan_id;
                        __be16 vlan_id_mask;
                        enum rocker_of_dpa_table_id goto_tbl;
@@ -62,8 +66,8 @@ struct rocker_flow_tbl_key {
                        __be16 new_vlan_id;
                } vlan;
                struct {
-                       u32 in_lport;
-                       u32 in_lport_mask;
+                       u32 in_pport;
+                       u32 in_pport_mask;
                        __be16 eth_type;
                        u8 eth_dst[ETH_ALEN];
                        u8 eth_dst_mask[ETH_ALEN];
@@ -91,8 +95,8 @@ struct rocker_flow_tbl_key {
                        bool copy_to_cpu;
                } bridge;
                struct {
-                       u32 in_lport;
-                       u32 in_lport_mask;
+                       u32 in_pport;
+                       u32 in_pport_mask;
                        u8 eth_src[ETH_ALEN];
                        u8 eth_src_mask[ETH_ALEN];
                        u8 eth_dst[ETH_ALEN];
@@ -111,9 +115,10 @@ struct rocker_flow_tbl_key {
 
 struct rocker_flow_tbl_entry {
        struct hlist_node entry;
-       u32 ref_count;
+       u32 cmd;
        u64 cookie;
        struct rocker_flow_tbl_key key;
+       size_t key_len;
        u32 key_crc32; /* key */
 };
 
@@ -148,7 +153,7 @@ struct rocker_fdb_tbl_entry {
        u32 key_crc32; /* key */
        bool learned;
        struct rocker_fdb_tbl_key {
-               u32 lport;
+               u32 pport;
                u8 addr[ETH_ALEN];
                __be16 vlan_id;
        } key;
@@ -161,6 +166,16 @@ struct rocker_internal_vlan_tbl_entry {
        __be16 vlan_id;
 };
 
+struct rocker_neigh_tbl_entry {
+       struct hlist_node entry;
+       __be32 ip_addr; /* key */
+       struct net_device *dev;
+       u32 ref_count;
+       u32 index;
+       u8 eth_dst[ETH_ALEN];
+       bool ttl_check;
+};
+
 struct rocker_desc_info {
        char *data; /* mapped */
        size_t data_size;
@@ -200,7 +215,7 @@ struct rocker_port {
        struct net_device *bridge_dev;
        struct rocker *rocker;
        unsigned int port_number;
-       u32 lport;
+       u32 pport;
        __be16 internal_vlan_id;
        int stp_state;
        u32 brport_flags;
@@ -234,6 +249,9 @@ struct rocker {
        unsigned long internal_vlan_bitmap[ROCKER_INTERNAL_VLAN_BITMAP_LEN];
        DECLARE_HASHTABLE(internal_vlan_tbl, 8);
        spinlock_t internal_vlan_tbl_lock;
+       DECLARE_HASHTABLE(neigh_tbl, 16);
+       spinlock_t neigh_tbl_lock;
+       u32 neigh_tbl_next_index;
 };
 
 static const u8 zero_mac[ETH_ALEN]   = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
@@ -256,7 +274,6 @@ enum {
        ROCKER_PRIORITY_VLAN = 1,
        ROCKER_PRIORITY_TERM_MAC_UCAST = 0,
        ROCKER_PRIORITY_TERM_MAC_MCAST = 1,
-       ROCKER_PRIORITY_UNICAST_ROUTING = 1,
        ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_EXACT = 1,
        ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_WILD = 2,
        ROCKER_PRIORITY_BRIDGING_VLAN = 3,
@@ -789,7 +806,30 @@ static u32 __pos_inc(u32 pos, size_t limit)
 
 static int rocker_desc_err(struct rocker_desc_info *desc_info)
 {
-       return -(desc_info->desc->comp_err & ~ROCKER_DMA_DESC_COMP_ERR_GEN);
+       int err = desc_info->desc->comp_err & ~ROCKER_DMA_DESC_COMP_ERR_GEN;
+
+       switch (err) {
+       case ROCKER_OK:
+               return 0;
+       case -ROCKER_ENOENT:
+               return -ENOENT;
+       case -ROCKER_ENXIO:
+               return -ENXIO;
+       case -ROCKER_ENOMEM:
+               return -ENOMEM;
+       case -ROCKER_EEXIST:
+               return -EEXIST;
+       case -ROCKER_EINVAL:
+               return -EINVAL;
+       case -ROCKER_EMSGSIZE:
+               return -EMSGSIZE;
+       case -ROCKER_ENOTSUP:
+               return -EOPNOTSUPP;
+       case -ROCKER_ENOBUFS:
+               return -ENOBUFS;
+       }
+
+       return -EINVAL;
 }
 
 static void rocker_desc_gen_clear(struct rocker_desc_info *desc_info)
@@ -1257,9 +1297,9 @@ static void rocker_port_set_enable(struct rocker_port *rocker_port, bool enable)
        u64 val = rocker_read64(rocker_port->rocker, PORT_PHYS_ENABLE);
 
        if (enable)
-               val |= 1ULL << rocker_port->lport;
+               val |= 1ULL << rocker_port->pport;
        else
-               val &= ~(1ULL << rocker_port->lport);
+               val &= ~(1ULL << rocker_port->pport);
        rocker_write64(rocker_port->rocker, PORT_PHYS_ENABLE, val);
 }
 
@@ -1312,11 +1352,11 @@ static int rocker_event_link_change(struct rocker *rocker,
        struct rocker_port *rocker_port;
 
        rocker_tlv_parse_nested(attrs, ROCKER_TLV_EVENT_LINK_CHANGED_MAX, info);
-       if (!attrs[ROCKER_TLV_EVENT_LINK_CHANGED_LPORT] ||
+       if (!attrs[ROCKER_TLV_EVENT_LINK_CHANGED_PPORT] ||
            !attrs[ROCKER_TLV_EVENT_LINK_CHANGED_LINKUP])
                return -EIO;
        port_number =
-               rocker_tlv_get_u32(attrs[ROCKER_TLV_EVENT_LINK_CHANGED_LPORT]) - 1;
+               rocker_tlv_get_u32(attrs[ROCKER_TLV_EVENT_LINK_CHANGED_PPORT]) - 1;
        link_up = rocker_tlv_get_u8(attrs[ROCKER_TLV_EVENT_LINK_CHANGED_LINKUP]);
 
        if (port_number >= rocker->port_count)
@@ -1353,12 +1393,12 @@ static int rocker_event_mac_vlan_seen(struct rocker *rocker,
        __be16 vlan_id;
 
        rocker_tlv_parse_nested(attrs, ROCKER_TLV_EVENT_MAC_VLAN_MAX, info);
-       if (!attrs[ROCKER_TLV_EVENT_MAC_VLAN_LPORT] ||
+       if (!attrs[ROCKER_TLV_EVENT_MAC_VLAN_PPORT] ||
            !attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAC] ||
            !attrs[ROCKER_TLV_EVENT_MAC_VLAN_VLAN_ID])
                return -EIO;
        port_number =
-               rocker_tlv_get_u32(attrs[ROCKER_TLV_EVENT_MAC_VLAN_LPORT]) - 1;
+               rocker_tlv_get_u32(attrs[ROCKER_TLV_EVENT_MAC_VLAN_PPORT]) - 1;
        addr = rocker_tlv_data(attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAC]);
        vlan_id = rocker_tlv_get_be16(attrs[ROCKER_TLV_EVENT_MAC_VLAN_VLAN_ID]);
 
@@ -1517,8 +1557,8 @@ rocker_cmd_get_port_settings_prep(struct rocker *rocker,
        cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
        if (!cmd_info)
                return -EMSGSIZE;
-       if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_LPORT,
-                              rocker_port->lport))
+       if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
+                              rocker_port->pport))
                return -EMSGSIZE;
        rocker_tlv_nest_end(desc_info, cmd_info);
        return 0;
@@ -1591,6 +1631,53 @@ rocker_cmd_get_port_settings_macaddr_proc(struct rocker *rocker,
        return 0;
 }
 
+struct port_name {
+       char *buf;
+       size_t len;
+};
+
+static int
+rocker_cmd_get_port_settings_phys_name_proc(struct rocker *rocker,
+                                           struct rocker_port *rocker_port,
+                                           struct rocker_desc_info *desc_info,
+                                           void *priv)
+{
+       struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
+       struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
+       struct port_name *name = priv;
+       struct rocker_tlv *attr;
+       size_t i, j, len;
+       char *str;
+
+       rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
+       if (!attrs[ROCKER_TLV_CMD_INFO])
+               return -EIO;
+
+       rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX,
+                               attrs[ROCKER_TLV_CMD_INFO]);
+       attr = info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_PHYS_NAME];
+       if (!attr)
+               return -EIO;
+
+       len = min_t(size_t, rocker_tlv_len(attr), name->len);
+       str = rocker_tlv_data(attr);
+
+       /* make sure name only contains alphanumeric characters */
+       for (i = j = 0; i < len; ++i) {
+               if (isalnum(str[i])) {
+                       name->buf[j] = str[i];
+                       j++;
+               }
+       }
+
+       if (j == 0)
+               return -EIO;
+
+       name->buf[j] = '\0';
+
+       return 0;
+}
+
 static int
 rocker_cmd_set_port_settings_ethtool_prep(struct rocker *rocker,
                                          struct rocker_port *rocker_port,
@@ -1606,8 +1693,8 @@ rocker_cmd_set_port_settings_ethtool_prep(struct rocker *rocker,
        cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
        if (!cmd_info)
                return -EMSGSIZE;
-       if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_LPORT,
-                              rocker_port->lport))
+       if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
+                              rocker_port->pport))
                return -EMSGSIZE;
        if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_SPEED,
                               ethtool_cmd_speed(ecmd)))
@@ -1637,8 +1724,8 @@ rocker_cmd_set_port_settings_macaddr_prep(struct rocker *rocker,
        cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
        if (!cmd_info)
                return -EMSGSIZE;
-       if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_LPORT,
-                              rocker_port->lport))
+       if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
+                              rocker_port->pport))
                return -EMSGSIZE;
        if (rocker_tlv_put(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_MACADDR,
                           ETH_ALEN, macaddr))
@@ -1661,8 +1748,8 @@ rocker_cmd_set_port_learning_prep(struct rocker *rocker,
        cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
        if (!cmd_info)
                return -EMSGSIZE;
-       if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_LPORT,
-                              rocker_port->lport))
+       if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
+                              rocker_port->pport))
                return -EMSGSIZE;
        if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_LEARNING,
                              !!(rocker_port->brport_flags & BR_LEARNING)))
@@ -1715,11 +1802,11 @@ static int rocker_port_set_learning(struct rocker_port *rocker_port)
 static int rocker_cmd_flow_tbl_add_ig_port(struct rocker_desc_info *desc_info,
                                           struct rocker_flow_tbl_entry *entry)
 {
-       if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_LPORT,
-                              entry->key.ig_port.in_lport))
+       if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
+                              entry->key.ig_port.in_pport))
                return -EMSGSIZE;
-       if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_LPORT_MASK,
-                              entry->key.ig_port.in_lport_mask))
+       if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK,
+                              entry->key.ig_port.in_pport_mask))
                return -EMSGSIZE;
        if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
                               entry->key.ig_port.goto_tbl))
@@ -1731,8 +1818,8 @@ static int rocker_cmd_flow_tbl_add_ig_port(struct rocker_desc_info *desc_info,
 static int rocker_cmd_flow_tbl_add_vlan(struct rocker_desc_info *desc_info,
                                        struct rocker_flow_tbl_entry *entry)
 {
-       if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_LPORT,
-                              entry->key.vlan.in_lport))
+       if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
+                              entry->key.vlan.in_pport))
                return -EMSGSIZE;
        if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
                                entry->key.vlan.vlan_id))
@@ -1754,11 +1841,11 @@ static int rocker_cmd_flow_tbl_add_vlan(struct rocker_desc_info *desc_info,
 static int rocker_cmd_flow_tbl_add_term_mac(struct rocker_desc_info *desc_info,
                                            struct rocker_flow_tbl_entry *entry)
 {
-       if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_LPORT,
-                              entry->key.term_mac.in_lport))
+       if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
+                              entry->key.term_mac.in_pport))
                return -EMSGSIZE;
-       if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_LPORT_MASK,
-                              entry->key.term_mac.in_lport_mask))
+       if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK,
+                              entry->key.term_mac.in_pport_mask))
                return -EMSGSIZE;
        if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE,
                                entry->key.term_mac.eth_type))
@@ -1845,11 +1932,11 @@ static int rocker_cmd_flow_tbl_add_bridge(struct rocker_desc_info *desc_info,
 static int rocker_cmd_flow_tbl_add_acl(struct rocker_desc_info *desc_info,
                                       struct rocker_flow_tbl_entry *entry)
 {
-       if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_LPORT,
-                              entry->key.acl.in_lport))
+       if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
+                              entry->key.acl.in_pport))
                return -EMSGSIZE;
-       if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_LPORT_MASK,
-                              entry->key.acl.in_lport_mask))
+       if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK,
+                              entry->key.acl.in_pport_mask))
                return -EMSGSIZE;
        if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC,
                           ETH_ALEN, entry->key.acl.eth_src))
@@ -1917,8 +2004,7 @@ static int rocker_cmd_flow_tbl_add(struct rocker *rocker,
        struct rocker_tlv *cmd_info;
        int err = 0;
 
-       if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
-                              ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_ADD))
+       if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
                return -EMSGSIZE;
        cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
        if (!cmd_info)
@@ -1975,8 +2061,7 @@ static int rocker_cmd_flow_tbl_del(struct rocker *rocker,
        const struct rocker_flow_tbl_entry *entry = priv;
        struct rocker_tlv *cmd_info;
 
-       if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
-                              ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_DEL))
+       if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
                return -EMSGSIZE;
        cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
        if (!cmd_info)
@@ -1993,7 +2078,7 @@ static int
 rocker_cmd_group_tbl_add_l2_interface(struct rocker_desc_info *desc_info,
                                      struct rocker_group_tbl_entry *entry)
 {
-       if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_OUT_LPORT,
+       if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_OUT_PPORT,
                               ROCKER_GROUP_PORT_GET(entry->group_id)))
                return -EMSGSIZE;
        if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_POP_VLAN,
@@ -2145,9 +2230,9 @@ static int rocker_cmd_group_tbl_del(struct rocker *rocker,
        return 0;
 }
 
-/*****************************************
- * Flow, group, FDB, internal VLAN tables
- *****************************************/
+/***************************************************
+ * Flow, group, FDB, internal VLAN and neigh tables
+ ***************************************************/
 
 static int rocker_init_tbls(struct rocker *rocker)
 {
@@ -2163,6 +2248,9 @@ static int rocker_init_tbls(struct rocker *rocker)
        hash_init(rocker->internal_vlan_tbl);
        spin_lock_init(&rocker->internal_vlan_tbl_lock);
 
+       hash_init(rocker->neigh_tbl);
+       spin_lock_init(&rocker->neigh_tbl_lock);
+
        return 0;
 }
 
@@ -2173,6 +2261,7 @@ static void rocker_free_tbls(struct rocker *rocker)
        struct rocker_group_tbl_entry *group_entry;
        struct rocker_fdb_tbl_entry *fdb_entry;
        struct rocker_internal_vlan_tbl_entry *internal_vlan_entry;
+       struct rocker_neigh_tbl_entry *neigh_entry;
        struct hlist_node *tmp;
        int bkt;
 
@@ -2196,16 +2285,22 @@ static void rocker_free_tbls(struct rocker *rocker)
                           tmp, internal_vlan_entry, entry)
                hash_del(&internal_vlan_entry->entry);
        spin_unlock_irqrestore(&rocker->internal_vlan_tbl_lock, flags);
+
+       spin_lock_irqsave(&rocker->neigh_tbl_lock, flags);
+       hash_for_each_safe(rocker->neigh_tbl, bkt, tmp, neigh_entry, entry)
+               hash_del(&neigh_entry->entry);
+       spin_unlock_irqrestore(&rocker->neigh_tbl_lock, flags);
 }
 
 static struct rocker_flow_tbl_entry *
 rocker_flow_tbl_find(struct rocker *rocker, struct rocker_flow_tbl_entry *match)
 {
        struct rocker_flow_tbl_entry *found;
+       size_t key_len = match->key_len ? match->key_len : sizeof(found->key);
 
        hash_for_each_possible(rocker->flow_tbl, found,
                               entry, match->key_crc32) {
-               if (memcmp(&found->key, &match->key, sizeof(found->key)) == 0)
+               if (memcmp(&found->key, &match->key, key_len) == 0)
                        return found;
        }
 
@@ -2218,42 +2313,34 @@ static int rocker_flow_tbl_add(struct rocker_port *rocker_port,
 {
        struct rocker *rocker = rocker_port->rocker;
        struct rocker_flow_tbl_entry *found;
+       size_t key_len = match->key_len ? match->key_len : sizeof(found->key);
        unsigned long flags;
-       bool add_to_hw = false;
-       int err = 0;
 
-       match->key_crc32 = crc32(~0, &match->key, sizeof(match->key));
+       match->key_crc32 = crc32(~0, &match->key, key_len);
 
        spin_lock_irqsave(&rocker->flow_tbl_lock, flags);
 
        found = rocker_flow_tbl_find(rocker, match);
 
        if (found) {
-               kfree(match);
+               match->cookie = found->cookie;
+               hash_del(&found->entry);
+               kfree(found);
+               found = match;
+               found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_MOD;
        } else {
                found = match;
                found->cookie = rocker->flow_tbl_next_cookie++;
-               hash_add(rocker->flow_tbl, &found->entry, found->key_crc32);
-               add_to_hw = true;
+               found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_ADD;
        }
 
-       found->ref_count++;
+       hash_add(rocker->flow_tbl, &found->entry, found->key_crc32);
 
        spin_unlock_irqrestore(&rocker->flow_tbl_lock, flags);
 
-       if (add_to_hw) {
-               err = rocker_cmd_exec(rocker, rocker_port,
-                                     rocker_cmd_flow_tbl_add,
-                                     found, NULL, NULL, nowait);
-               if (err) {
-                       spin_lock_irqsave(&rocker->flow_tbl_lock, flags);
-                       hash_del(&found->entry);
-                       spin_unlock_irqrestore(&rocker->flow_tbl_lock, flags);
-                       kfree(found);
-               }
-       }
-
-       return err;
+       return rocker_cmd_exec(rocker, rocker_port,
+                              rocker_cmd_flow_tbl_add,
+                              found, NULL, NULL, nowait);
 }
 
 static int rocker_flow_tbl_del(struct rocker_port *rocker_port,
@@ -2262,29 +2349,26 @@ static int rocker_flow_tbl_del(struct rocker_port *rocker_port,
 {
        struct rocker *rocker = rocker_port->rocker;
        struct rocker_flow_tbl_entry *found;
+       size_t key_len = match->key_len ? match->key_len : sizeof(found->key);
        unsigned long flags;
-       bool del_from_hw = false;
        int err = 0;
 
-       match->key_crc32 = crc32(~0, &match->key, sizeof(match->key));
+       match->key_crc32 = crc32(~0, &match->key, key_len);
 
        spin_lock_irqsave(&rocker->flow_tbl_lock, flags);
 
        found = rocker_flow_tbl_find(rocker, match);
 
        if (found) {
-               found->ref_count--;
-               if (found->ref_count == 0) {
-                       hash_del(&found->entry);
-                       del_from_hw = true;
-               }
+               hash_del(&found->entry);
+               found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_DEL;
        }
 
        spin_unlock_irqrestore(&rocker->flow_tbl_lock, flags);
 
        kfree(match);
 
-       if (del_from_hw) {
+       if (found) {
                err = rocker_cmd_exec(rocker, rocker_port,
                                      rocker_cmd_flow_tbl_del,
                                      found, NULL, NULL, nowait);
@@ -2311,7 +2395,7 @@ static int rocker_flow_tbl_do(struct rocker_port *rocker_port,
 }
 
 static int rocker_flow_tbl_ig_port(struct rocker_port *rocker_port,
-                                  int flags, u32 in_lport, u32 in_lport_mask,
+                                  int flags, u32 in_pport, u32 in_pport_mask,
                                   enum rocker_of_dpa_table_id goto_tbl)
 {
        struct rocker_flow_tbl_entry *entry;
@@ -2322,15 +2406,15 @@ static int rocker_flow_tbl_ig_port(struct rocker_port *rocker_port,
 
        entry->key.priority = ROCKER_PRIORITY_IG_PORT;
        entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT;
-       entry->key.ig_port.in_lport = in_lport;
-       entry->key.ig_port.in_lport_mask = in_lport_mask;
+       entry->key.ig_port.in_pport = in_pport;
+       entry->key.ig_port.in_pport_mask = in_pport_mask;
        entry->key.ig_port.goto_tbl = goto_tbl;
 
        return rocker_flow_tbl_do(rocker_port, flags, entry);
 }
 
 static int rocker_flow_tbl_vlan(struct rocker_port *rocker_port,
-                               int flags, u32 in_lport,
+                               int flags, u32 in_pport,
                                __be16 vlan_id, __be16 vlan_id_mask,
                                enum rocker_of_dpa_table_id goto_tbl,
                                bool untagged, __be16 new_vlan_id)
@@ -2343,7 +2427,7 @@ static int rocker_flow_tbl_vlan(struct rocker_port *rocker_port,
 
        entry->key.priority = ROCKER_PRIORITY_VLAN;
        entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_VLAN;
-       entry->key.vlan.in_lport = in_lport;
+       entry->key.vlan.in_pport = in_pport;
        entry->key.vlan.vlan_id = vlan_id;
        entry->key.vlan.vlan_id_mask = vlan_id_mask;
        entry->key.vlan.goto_tbl = goto_tbl;
@@ -2355,7 +2439,7 @@ static int rocker_flow_tbl_vlan(struct rocker_port *rocker_port,
 }
 
 static int rocker_flow_tbl_term_mac(struct rocker_port *rocker_port,
-                                   u32 in_lport, u32 in_lport_mask,
+                                   u32 in_pport, u32 in_pport_mask,
                                    __be16 eth_type, const u8 *eth_dst,
                                    const u8 *eth_dst_mask, __be16 vlan_id,
                                    __be16 vlan_id_mask, bool copy_to_cpu,
@@ -2378,8 +2462,8 @@ static int rocker_flow_tbl_term_mac(struct rocker_port *rocker_port,
        }
 
        entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC;
-       entry->key.term_mac.in_lport = in_lport;
-       entry->key.term_mac.in_lport_mask = in_lport_mask;
+       entry->key.term_mac.in_pport = in_pport;
+       entry->key.term_mac.in_pport_mask = in_pport_mask;
        entry->key.term_mac.eth_type = eth_type;
        ether_addr_copy(entry->key.term_mac.eth_dst, eth_dst);
        ether_addr_copy(entry->key.term_mac.eth_dst_mask, eth_dst_mask);
@@ -2444,9 +2528,34 @@ static int rocker_flow_tbl_bridge(struct rocker_port *rocker_port,
        return rocker_flow_tbl_do(rocker_port, flags, entry);
 }
 
+static int rocker_flow_tbl_ucast4_routing(struct rocker_port *rocker_port,
+                                         __be16 eth_type, __be32 dst,
+                                         __be32 dst_mask, u32 priority,
+                                         enum rocker_of_dpa_table_id goto_tbl,
+                                         u32 group_id, int flags)
+{
+       struct rocker_flow_tbl_entry *entry;
+
+       entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags));
+       if (!entry)
+               return -ENOMEM;
+
+       entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING;
+       entry->key.priority = priority;
+       entry->key.ucast_routing.eth_type = eth_type;
+       entry->key.ucast_routing.dst4 = dst;
+       entry->key.ucast_routing.dst4_mask = dst_mask;
+       entry->key.ucast_routing.goto_tbl = goto_tbl;
+       entry->key.ucast_routing.group_id = group_id;
+       entry->key_len = offsetof(struct rocker_flow_tbl_key,
+                                 ucast_routing.group_id);
+
+       return rocker_flow_tbl_do(rocker_port, flags, entry);
+}
+
 static int rocker_flow_tbl_acl(struct rocker_port *rocker_port,
-                              int flags, u32 in_lport,
-                              u32 in_lport_mask,
+                              int flags, u32 in_pport,
+                              u32 in_pport_mask,
                               const u8 *eth_src, const u8 *eth_src_mask,
                               const u8 *eth_dst, const u8 *eth_dst_mask,
                               __be16 eth_type,
@@ -2472,8 +2581,8 @@ static int rocker_flow_tbl_acl(struct rocker_port *rocker_port,
 
        entry->key.priority = priority;
        entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
-       entry->key.acl.in_lport = in_lport;
-       entry->key.acl.in_lport_mask = in_lport_mask;
+       entry->key.acl.in_pport = in_pport;
+       entry->key.acl.in_pport_mask = in_pport_mask;
 
        if (eth_src)
                ether_addr_copy(entry->key.acl.eth_src, eth_src);
@@ -2531,7 +2640,6 @@ static int rocker_group_tbl_add(struct rocker_port *rocker_port,
        struct rocker *rocker = rocker_port->rocker;
        struct rocker_group_tbl_entry *found;
        unsigned long flags;
-       int err = 0;
 
        spin_lock_irqsave(&rocker->group_tbl_lock, flags);
 
@@ -2551,12 +2659,9 @@ static int rocker_group_tbl_add(struct rocker_port *rocker_port,
 
        spin_unlock_irqrestore(&rocker->group_tbl_lock, flags);
 
-       if (found->cmd)
-               err = rocker_cmd_exec(rocker, rocker_port,
-                                     rocker_cmd_group_tbl_add,
-                                     found, NULL, NULL, nowait);
-
-       return err;
+       return rocker_cmd_exec(rocker, rocker_port,
+                              rocker_cmd_group_tbl_add,
+                              found, NULL, NULL, nowait);
 }
 
 static int rocker_group_tbl_del(struct rocker_port *rocker_port,
@@ -2604,7 +2709,7 @@ static int rocker_group_tbl_do(struct rocker_port *rocker_port,
 
 static int rocker_group_l2_interface(struct rocker_port *rocker_port,
                                     int flags, __be16 vlan_id,
-                                    u32 out_lport, int pop_vlan)
+                                    u32 out_pport, int pop_vlan)
 {
        struct rocker_group_tbl_entry *entry;
 
@@ -2612,7 +2717,7 @@ static int rocker_group_l2_interface(struct rocker_port *rocker_port,
        if (!entry)
                return -ENOMEM;
 
-       entry->group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_lport);
+       entry->group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
        entry->l2_interface.pop_vlan = pop_vlan;
 
        return rocker_group_tbl_do(rocker_port, flags, entry);
@@ -2652,17 +2757,262 @@ static int rocker_group_l2_flood(struct rocker_port *rocker_port,
                                       group_id);
 }
 
+static int rocker_group_l3_unicast(struct rocker_port *rocker_port,
+                                  int flags, u32 index, u8 *src_mac,
+                                  u8 *dst_mac, __be16 vlan_id,
+                                  bool ttl_check, u32 pport)
+{
+       struct rocker_group_tbl_entry *entry;
+
+       entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags));
+       if (!entry)
+               return -ENOMEM;
+
+       entry->group_id = ROCKER_GROUP_L3_UNICAST(index);
+       if (src_mac)
+               ether_addr_copy(entry->l3_unicast.eth_src, src_mac);
+       if (dst_mac)
+               ether_addr_copy(entry->l3_unicast.eth_dst, dst_mac);
+       entry->l3_unicast.vlan_id = vlan_id;
+       entry->l3_unicast.ttl_check = ttl_check;
+       entry->l3_unicast.group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, pport);
+
+       return rocker_group_tbl_do(rocker_port, flags, entry);
+}
+
+static struct rocker_neigh_tbl_entry *
+       rocker_neigh_tbl_find(struct rocker *rocker, __be32 ip_addr)
+{
+       struct rocker_neigh_tbl_entry *found;
+
+       hash_for_each_possible(rocker->neigh_tbl, found,
+                              entry, be32_to_cpu(ip_addr))
+               if (found->ip_addr == ip_addr)
+                       return found;
+
+       return NULL;
+}
+
+static void _rocker_neigh_add(struct rocker *rocker,
+                             struct rocker_neigh_tbl_entry *entry)
+{
+       entry->index = rocker->neigh_tbl_next_index++;
+       entry->ref_count++;
+       hash_add(rocker->neigh_tbl, &entry->entry,
+                be32_to_cpu(entry->ip_addr));
+}
+
+static void _rocker_neigh_del(struct rocker *rocker,
+                             struct rocker_neigh_tbl_entry *entry)
+{
+       if (--entry->ref_count == 0) {
+               hash_del(&entry->entry);
+               kfree(entry);
+       }
+}
+
+static void _rocker_neigh_update(struct rocker *rocker,
+                                struct rocker_neigh_tbl_entry *entry,
+                                u8 *eth_dst, bool ttl_check)
+{
+       if (eth_dst) {
+               ether_addr_copy(entry->eth_dst, eth_dst);
+               entry->ttl_check = ttl_check;
+       } else {
+               entry->ref_count++;
+       }
+}
+
+static int rocker_port_ipv4_neigh(struct rocker_port *rocker_port,
+                                 int flags, __be32 ip_addr, u8 *eth_dst)
+{
+       struct rocker *rocker = rocker_port->rocker;
+       struct rocker_neigh_tbl_entry *entry;
+       struct rocker_neigh_tbl_entry *found;
+       unsigned long lock_flags;
+       __be16 eth_type = htons(ETH_P_IP);
+       enum rocker_of_dpa_table_id goto_tbl =
+               ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
+       u32 group_id;
+       u32 priority = 0;
+       bool adding = !(flags & ROCKER_OP_FLAG_REMOVE);
+       bool updating;
+       bool removing;
+       int err = 0;
+
+       entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags));
+       if (!entry)
+               return -ENOMEM;
+
+       spin_lock_irqsave(&rocker->neigh_tbl_lock, lock_flags);
+
+       found = rocker_neigh_tbl_find(rocker, ip_addr);
+
+       updating = found && adding;
+       removing = found && !adding;
+       adding = !found && adding;
+
+       if (adding) {
+               entry->ip_addr = ip_addr;
+               entry->dev = rocker_port->dev;
+               ether_addr_copy(entry->eth_dst, eth_dst);
+               entry->ttl_check = true;
+               _rocker_neigh_add(rocker, entry);
+       } else if (removing) {
+               memcpy(entry, found, sizeof(*entry));
+               _rocker_neigh_del(rocker, found);
+       } else if (updating) {
+               _rocker_neigh_update(rocker, found, eth_dst, true);
+               memcpy(entry, found, sizeof(*entry));
+       } else {
+               err = -ENOENT;
+       }
+
+       spin_unlock_irqrestore(&rocker->neigh_tbl_lock, lock_flags);
+
+       if (err)
+               goto err_out;
+
+       /* For each active neighbor, we have an L3 unicast group and
+        * a /32 route to the neighbor, which uses the L3 unicast
+        * group.  The L3 unicast group can also be referred to by
+        * other routes' nexthops.
+        */
+
+       err = rocker_group_l3_unicast(rocker_port, flags,
+                                     entry->index,
+                                     rocker_port->dev->dev_addr,
+                                     entry->eth_dst,
+                                     rocker_port->internal_vlan_id,
+                                     entry->ttl_check,
+                                     rocker_port->pport);
+       if (err) {
+               netdev_err(rocker_port->dev,
+                          "Error (%d) L3 unicast group index %d\n",
+                          err, entry->index);
+               goto err_out;
+       }
+
+       if (adding || removing) {
+               group_id = ROCKER_GROUP_L3_UNICAST(entry->index);
+               err = rocker_flow_tbl_ucast4_routing(rocker_port,
+                                                    eth_type, ip_addr,
+                                                    inet_make_mask(32),
+                                                    priority, goto_tbl,
+                                                    group_id, flags);
+
+               if (err)
+                       netdev_err(rocker_port->dev,
+                                  "Error (%d) /32 unicast route %pI4 group 0x%08x\n",
+                                  err, &entry->ip_addr, group_id);
+       }
+
+err_out:
+       if (!adding)
+               kfree(entry);
+
+       return err;
+}
+
+static int rocker_port_ipv4_resolve(struct rocker_port *rocker_port,
+                                   __be32 ip_addr)
+{
+       struct net_device *dev = rocker_port->dev;
+       struct neighbour *n = __ipv4_neigh_lookup(dev, (__force u32)ip_addr);
+       int err = 0;
+
+       if (!n)
+               n = neigh_create(&arp_tbl, &ip_addr, dev);
+       if (!n)
+               return -ENOMEM;
+
+       /* If the neigh is already resolved, then go ahead and
+        * install the entry, otherwise start the ARP process to
+        * resolve the neigh.
+        */
+
+       if (n->nud_state & NUD_VALID)
+               err = rocker_port_ipv4_neigh(rocker_port, 0, ip_addr, n->ha);
+       else
+               neigh_event_send(n, NULL);
+
+       return err;
+}
+
+static int rocker_port_ipv4_nh(struct rocker_port *rocker_port, int flags,
+                              __be32 ip_addr, u32 *index)
+{
+       struct rocker *rocker = rocker_port->rocker;
+       struct rocker_neigh_tbl_entry *entry;
+       struct rocker_neigh_tbl_entry *found;
+       unsigned long lock_flags;
+       bool adding = !(flags & ROCKER_OP_FLAG_REMOVE);
+       bool updating;
+       bool removing;
+       bool resolved = true;
+       int err = 0;
+
+       entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags));
+       if (!entry)
+               return -ENOMEM;
+
+       spin_lock_irqsave(&rocker->neigh_tbl_lock, lock_flags);
+
+       found = rocker_neigh_tbl_find(rocker, ip_addr);
+       if (found)
+               *index = found->index;
+
+       updating = found && adding;
+       removing = found && !adding;
+       adding = !found && adding;
+
+       if (adding) {
+               entry->ip_addr = ip_addr;
+               entry->dev = rocker_port->dev;
+               _rocker_neigh_add(rocker, entry);
+               *index = entry->index;
+               resolved = false;
+       } else if (removing) {
+               _rocker_neigh_del(rocker, found);
+       } else if (updating) {
+               _rocker_neigh_update(rocker, found, NULL, false);
+               resolved = !is_zero_ether_addr(found->eth_dst);
+       } else {
+               err = -ENOENT;
+       }
+
+       spin_unlock_irqrestore(&rocker->neigh_tbl_lock, lock_flags);
+
+       if (!adding)
+               kfree(entry);
+
+       if (err)
+               return err;
+
+       /* Resolved means neigh ip_addr is resolved to neigh mac. */
+
+       if (!resolved)
+               err = rocker_port_ipv4_resolve(rocker_port, ip_addr);
+
+       return err;
+}
+
 static int rocker_port_vlan_flood_group(struct rocker_port *rocker_port,
                                        int flags, __be16 vlan_id)
 {
        struct rocker_port *p;
        struct rocker *rocker = rocker_port->rocker;
        u32 group_id = ROCKER_GROUP_L2_FLOOD(vlan_id, 0);
-       u32 group_ids[rocker->port_count];
+       u32 *group_ids;
        u8 group_count = 0;
-       int err;
+       int err = 0;
        int i;
 
+       group_ids = kcalloc(rocker->port_count, sizeof(u32),
+                           rocker_op_flags_gfp(flags));
+       if (!group_ids)
+               return -ENOMEM;
+
        /* Adjust the flood group for this VLAN.  The flood group
         * references an L2 interface group for each port in this
         * VLAN.
@@ -2674,14 +3024,13 @@ static int rocker_port_vlan_flood_group(struct rocker_port *rocker_port,
                        continue;
                if (test_bit(ntohs(vlan_id), p->vlan_bitmap)) {
                        group_ids[group_count++] =
-                               ROCKER_GROUP_L2_INTERFACE(vlan_id,
-                                                         p->lport);
+                               ROCKER_GROUP_L2_INTERFACE(vlan_id, p->pport);
                }
        }
 
        /* If there are no bridged ports in this VLAN, we're done */
        if (group_count == 0)
-               return 0;
+               goto no_ports_in_vlan;
 
        err = rocker_group_l2_flood(rocker_port, flags, vlan_id,
                                    group_count, group_ids,
@@ -2690,6 +3039,8 @@ static int rocker_port_vlan_flood_group(struct rocker_port *rocker_port,
                netdev_err(rocker_port->dev,
                           "Error (%d) port VLAN l2 flood group\n", err);
 
+no_ports_in_vlan:
+       kfree(group_ids);
        return err;
 }
 
@@ -2700,7 +3051,7 @@ static int rocker_port_vlan_l2_groups(struct rocker_port *rocker_port,
        struct rocker *rocker = rocker_port->rocker;
        struct rocker_port *p;
        bool adding = !(flags & ROCKER_OP_FLAG_REMOVE);
-       u32 out_lport;
+       u32 out_pport;
        int ref = 0;
        int err;
        int i;
@@ -2711,14 +3062,14 @@ static int rocker_port_vlan_l2_groups(struct rocker_port *rocker_port,
 
        if (rocker_port->stp_state == BR_STATE_LEARNING ||
            rocker_port->stp_state == BR_STATE_FORWARDING) {
-               out_lport = rocker_port->lport;
+               out_pport = rocker_port->pport;
                err = rocker_group_l2_interface(rocker_port, flags,
-                                               vlan_id, out_lport,
+                                               vlan_id, out_pport,
                                                pop_vlan);
                if (err) {
                        netdev_err(rocker_port->dev,
-                                  "Error (%d) port VLAN l2 group for lport %d\n",
-                                  err, out_lport);
+                                  "Error (%d) port VLAN l2 group for pport %d\n",
+                                  err, out_pport);
                        return err;
                }
        }
@@ -2737,9 +3088,9 @@ static int rocker_port_vlan_l2_groups(struct rocker_port *rocker_port,
        if ((!adding || ref != 1) && (adding || ref != 0))
                return 0;
 
-       out_lport = 0;
+       out_pport = 0;
        err = rocker_group_l2_interface(rocker_port, flags,
-                                       vlan_id, out_lport,
+                                       vlan_id, out_pport,
                                        pop_vlan);
        if (err) {
                netdev_err(rocker_port->dev,
@@ -2799,9 +3150,9 @@ static int rocker_port_ctrl_vlan_acl(struct rocker_port *rocker_port,
                                     int flags, struct rocker_ctrl *ctrl,
                                     __be16 vlan_id)
 {
-       u32 in_lport = rocker_port->lport;
-       u32 in_lport_mask = 0xffffffff;
-       u32 out_lport = 0;
+       u32 in_pport = rocker_port->pport;
+       u32 in_pport_mask = 0xffffffff;
+       u32 out_pport = 0;
        u8 *eth_src = NULL;
        u8 *eth_src_mask = NULL;
        __be16 vlan_id_mask = htons(0xffff);
@@ -2809,11 +3160,11 @@ static int rocker_port_ctrl_vlan_acl(struct rocker_port *rocker_port,
        u8 ip_proto_mask = 0;
        u8 ip_tos = 0;
        u8 ip_tos_mask = 0;
-       u32 group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_lport);
+       u32 group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
        int err;
 
        err = rocker_flow_tbl_acl(rocker_port, flags,
-                                 in_lport, in_lport_mask,
+                                 in_pport, in_pport_mask,
                                  eth_src, eth_src_mask,
                                  ctrl->eth_dst, ctrl->eth_dst_mask,
                                  ctrl->eth_type,
@@ -2856,7 +3207,7 @@ static int rocker_port_ctrl_vlan_term(struct rocker_port *rocker_port,
                                      int flags, struct rocker_ctrl *ctrl,
                                      __be16 vlan_id)
 {
-       u32 in_lport_mask = 0xffffffff;
+       u32 in_pport_mask = 0xffffffff;
        __be16 vlan_id_mask = htons(0xffff);
        int err;
 
@@ -2864,7 +3215,7 @@ static int rocker_port_ctrl_vlan_term(struct rocker_port *rocker_port,
                vlan_id = rocker_port->internal_vlan_id;
 
        err = rocker_flow_tbl_term_mac(rocker_port,
-                                      rocker_port->lport, in_lport_mask,
+                                      rocker_port->pport, in_pport_mask,
                                       ctrl->eth_type, ctrl->eth_dst,
                                       ctrl->eth_dst_mask, vlan_id,
                                       vlan_id_mask, ctrl->copy_to_cpu,
@@ -2934,7 +3285,7 @@ static int rocker_port_vlan(struct rocker_port *rocker_port, int flags,
 {
        enum rocker_of_dpa_table_id goto_tbl =
                ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC;
-       u32 in_lport = rocker_port->lport;
+       u32 in_pport = rocker_port->pport;
        __be16 vlan_id = htons(vid);
        __be16 vlan_id_mask = htons(0xffff);
        __be16 internal_vlan_id;
@@ -2978,7 +3329,7 @@ static int rocker_port_vlan(struct rocker_port *rocker_port, int flags,
        }
 
        err = rocker_flow_tbl_vlan(rocker_port, flags,
-                                  in_lport, vlan_id, vlan_id_mask,
+                                  in_pport, vlan_id, vlan_id_mask,
                                   goto_tbl, untagged, internal_vlan_id);
        if (err)
                netdev_err(rocker_port->dev,
@@ -2990,20 +3341,20 @@ static int rocker_port_vlan(struct rocker_port *rocker_port, int flags,
 static int rocker_port_ig_tbl(struct rocker_port *rocker_port, int flags)
 {
        enum rocker_of_dpa_table_id goto_tbl;
-       u32 in_lport;
-       u32 in_lport_mask;
+       u32 in_pport;
+       u32 in_pport_mask;
        int err;
 
        /* Normal Ethernet Frames.  Matches pkts from any local physical
         * ports.  Goto VLAN tbl.
         */
 
-       in_lport = 0;
-       in_lport_mask = 0xffff0000;
+       in_pport = 0;
+       in_pport_mask = 0xffff0000;
        goto_tbl = ROCKER_OF_DPA_TABLE_ID_VLAN;
 
        err = rocker_flow_tbl_ig_port(rocker_port, flags,
-                                     in_lport, in_lport_mask,
+                                     in_pport, in_pport_mask,
                                      goto_tbl);
        if (err)
                netdev_err(rocker_port->dev,
@@ -3047,7 +3398,7 @@ static int rocker_port_fdb_learn(struct rocker_port *rocker_port,
        struct rocker_fdb_learn_work *lw;
        enum rocker_of_dpa_table_id goto_tbl =
                ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
-       u32 out_lport = rocker_port->lport;
+       u32 out_pport = rocker_port->pport;
        u32 tunnel_id = 0;
        u32 group_id = ROCKER_GROUP_NONE;
        bool syncing = !!(rocker_port->brport_flags & BR_LEARNING_SYNC);
@@ -3055,7 +3406,7 @@ static int rocker_port_fdb_learn(struct rocker_port *rocker_port,
        int err;
 
        if (rocker_port_is_bridged(rocker_port))
-               group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_lport);
+               group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
 
        if (!(flags & ROCKER_OP_FLAG_REFRESH)) {
                err = rocker_flow_tbl_bridge(rocker_port, flags, addr, NULL,
@@ -3114,7 +3465,7 @@ static int rocker_port_fdb(struct rocker_port *rocker_port,
                return -ENOMEM;
 
        fdb->learned = (flags & ROCKER_OP_FLAG_LEARNED);
-       fdb->key.lport = rocker_port->lport;
+       fdb->key.pport = rocker_port->pport;
        ether_addr_copy(fdb->key.addr, addr);
        fdb->key.vlan_id = vlan_id;
        fdb->key_crc32 = crc32(~0, &fdb->key, sizeof(fdb->key));
@@ -3161,7 +3512,7 @@ static int rocker_port_fdb_flush(struct rocker_port *rocker_port)
        spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags);
 
        hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, found, entry) {
-               if (found->key.lport != rocker_port->lport)
+               if (found->key.pport != rocker_port->pport)
                        continue;
                if (!found->learned)
                        continue;
@@ -3182,7 +3533,7 @@ err_out:
 static int rocker_port_router_mac(struct rocker_port *rocker_port,
                                  int flags, __be16 vlan_id)
 {
-       u32 in_lport_mask = 0xffffffff;
+       u32 in_pport_mask = 0xffffffff;
        __be16 eth_type;
        const u8 *dst_mac_mask = ff_mac;
        __be16 vlan_id_mask = htons(0xffff);
@@ -3194,7 +3545,7 @@ static int rocker_port_router_mac(struct rocker_port *rocker_port,
 
        eth_type = htons(ETH_P_IP);
        err = rocker_flow_tbl_term_mac(rocker_port,
-                                      rocker_port->lport, in_lport_mask,
+                                      rocker_port->pport, in_pport_mask,
                                       eth_type, rocker_port->dev->dev_addr,
                                       dst_mac_mask, vlan_id, vlan_id_mask,
                                       copy_to_cpu, flags);
@@ -3203,7 +3554,7 @@ static int rocker_port_router_mac(struct rocker_port *rocker_port,
 
        eth_type = htons(ETH_P_IPV6);
        err = rocker_flow_tbl_term_mac(rocker_port,
-                                      rocker_port->lport, in_lport_mask,
+                                      rocker_port->pport, in_pport_mask,
                                       eth_type, rocker_port->dev->dev_addr,
                                       dst_mac_mask, vlan_id, vlan_id_mask,
                                       copy_to_cpu, flags);
@@ -3214,7 +3565,7 @@ static int rocker_port_router_mac(struct rocker_port *rocker_port,
 static int rocker_port_fwding(struct rocker_port *rocker_port)
 {
        bool pop_vlan;
-       u32 out_lport;
+       u32 out_pport;
        __be16 vlan_id;
        u16 vid;
        int flags = ROCKER_OP_FLAG_NOWAIT;
@@ -3231,19 +3582,19 @@ static int rocker_port_fwding(struct rocker_port *rocker_port)
            rocker_port->stp_state != BR_STATE_FORWARDING)
                flags |= ROCKER_OP_FLAG_REMOVE;
 
-       out_lport = rocker_port->lport;
+       out_pport = rocker_port->pport;
        for (vid = 1; vid < VLAN_N_VID; vid++) {
                if (!test_bit(vid, rocker_port->vlan_bitmap))
                        continue;
                vlan_id = htons(vid);
                pop_vlan = rocker_vlan_id_is_internal(vlan_id);
                err = rocker_group_l2_interface(rocker_port, flags,
-                                               vlan_id, out_lport,
+                                               vlan_id, out_pport,
                                                pop_vlan);
                if (err) {
                        netdev_err(rocker_port->dev,
-                                  "Error (%d) port VLAN l2 group for lport %d\n",
-                                  err, out_lport);
+                                  "Error (%d) port VLAN l2 group for pport %d\n",
+                                  err, out_pport);
                        return err;
                }
        }
@@ -3302,6 +3653,26 @@ static int rocker_port_stp_update(struct rocker_port *rocker_port, u8 state)
        return rocker_port_fwding(rocker_port);
 }
 
+static int rocker_port_fwd_enable(struct rocker_port *rocker_port)
+{
+       if (rocker_port_is_bridged(rocker_port))
+               /* bridge STP will enable port */
+               return 0;
+
+       /* port is not bridged, so simulate going to FORWARDING state */
+       return rocker_port_stp_update(rocker_port, BR_STATE_FORWARDING);
+}
+
+static int rocker_port_fwd_disable(struct rocker_port *rocker_port)
+{
+       if (rocker_port_is_bridged(rocker_port))
+               /* bridge STP will disable port */
+               return 0;
+
+       /* port is not bridged, so simulate going to DISABLED state */
+       return rocker_port_stp_update(rocker_port, BR_STATE_DISABLED);
+}
+
 static struct rocker_internal_vlan_tbl_entry *
 rocker_internal_vlan_tbl_find(struct rocker *rocker, int ifindex)
 {
@@ -3387,6 +3758,51 @@ not_found:
        spin_unlock_irqrestore(&rocker->internal_vlan_tbl_lock, lock_flags);
 }
 
+static int rocker_port_fib_ipv4(struct rocker_port *rocker_port, __be32 dst,
+                               int dst_len, struct fib_info *fi, u32 tb_id,
+                               int flags)
+{
+       struct fib_nh *nh;
+       __be16 eth_type = htons(ETH_P_IP);
+       __be32 dst_mask = inet_make_mask(dst_len);
+       __be16 internal_vlan_id = rocker_port->internal_vlan_id;
+       u32 priority = fi->fib_priority;
+       enum rocker_of_dpa_table_id goto_tbl =
+               ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
+       u32 group_id;
+       bool nh_on_port;
+       bool has_gw;
+       u32 index;
+       int err;
+
+       /* XXX support ECMP */
+
+       nh = fi->fib_nh;
+       nh_on_port = (fi->fib_dev == rocker_port->dev);
+       has_gw = !!nh->nh_gw;
+
+       if (has_gw && nh_on_port) {
+               err = rocker_port_ipv4_nh(rocker_port, flags,
+                                         nh->nh_gw, &index);
+               if (err)
+                       return err;
+
+               group_id = ROCKER_GROUP_L3_UNICAST(index);
+       } else {
+               /* Send to CPU for processing */
+               group_id = ROCKER_GROUP_L2_INTERFACE(internal_vlan_id, 0);
+       }
+
+       err = rocker_flow_tbl_ucast4_routing(rocker_port, eth_type, dst,
+                                            dst_mask, priority, goto_tbl,
+                                            group_id, flags);
+       if (err)
+               netdev_err(rocker_port->dev, "Error (%d) IPv4 route %pI4\n",
+                          err, &dst);
+
+       return err;
+}
+
 /*****************
  * Net device ops
  *****************/
@@ -3394,8 +3810,6 @@ not_found:
 static int rocker_port_open(struct net_device *dev)
 {
        struct rocker_port *rocker_port = netdev_priv(dev);
-       u8 stp_state = rocker_port_is_bridged(rocker_port) ?
-               BR_STATE_BLOCKING : BR_STATE_FORWARDING;
        int err;
 
        err = rocker_port_dma_rings_init(rocker_port);
@@ -3418,9 +3832,9 @@ static int rocker_port_open(struct net_device *dev)
                goto err_request_rx_irq;
        }
 
-       err = rocker_port_stp_update(rocker_port, stp_state);
+       err = rocker_port_fwd_enable(rocker_port);
        if (err)
-               goto err_stp_update;
+               goto err_fwd_enable;
 
        napi_enable(&rocker_port->napi_tx);
        napi_enable(&rocker_port->napi_rx);
@@ -3428,7 +3842,7 @@ static int rocker_port_open(struct net_device *dev)
        netif_start_queue(dev);
        return 0;
 
-err_stp_update:
+err_fwd_enable:
        free_irq(rocker_msix_rx_vector(rocker_port), rocker_port);
 err_request_rx_irq:
        free_irq(rocker_msix_tx_vector(rocker_port), rocker_port);
@@ -3445,7 +3859,7 @@ static int rocker_port_stop(struct net_device *dev)
        rocker_port_set_enable(rocker_port, false);
        napi_disable(&rocker_port->napi_rx);
        napi_disable(&rocker_port->napi_tx);
-       rocker_port_stp_update(rocker_port, BR_STATE_DISABLED);
+       rocker_port_fwd_disable(rocker_port);
        free_irq(rocker_msix_rx_vector(rocker_port), rocker_port);
        free_irq(rocker_msix_tx_vector(rocker_port), rocker_port);
        rocker_port_dma_rings_fini(rocker_port);
@@ -3702,7 +4116,7 @@ static int rocker_port_fdb_dump(struct sk_buff *skb,
 
        spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags);
        hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, found, entry) {
-               if (found->key.lport != rocker_port->lport)
+               if (found->key.pport != rocker_port->pport)
                        continue;
                if (idx < cb->args[0])
                        goto skip;
@@ -3772,22 +4186,19 @@ static int rocker_port_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
                                       rocker_port->brport_flags, mask);
 }
 
-static int rocker_port_switch_parent_id_get(struct net_device *dev,
-                                           struct netdev_phys_item_id *psid)
+static int rocker_port_get_phys_port_name(struct net_device *dev,
+                                         char *buf, size_t len)
 {
        struct rocker_port *rocker_port = netdev_priv(dev);
-       struct rocker *rocker = rocker_port->rocker;
-
-       psid->id_len = sizeof(rocker->hw.id);
-       memcpy(&psid->id, &rocker->hw.id, psid->id_len);
-       return 0;
-}
+       struct port_name name = { .buf = buf, .len = len };
+       int err;
 
-static int rocker_port_switch_port_stp_update(struct net_device *dev, u8 state)
-{
-       struct rocker_port *rocker_port = netdev_priv(dev);
+       err = rocker_cmd_exec(rocker_port->rocker, rocker_port,
+                             rocker_cmd_get_port_settings_prep, NULL,
+                             rocker_cmd_get_port_settings_phys_name_proc,
+                             &name, false);
 
-       return rocker_port_stp_update(rocker_port, state);
+       return err ? -EOPNOTSUPP : 0;
 }
 
 static const struct net_device_ops rocker_port_netdev_ops = {
@@ -3802,8 +4213,61 @@ static const struct net_device_ops rocker_port_netdev_ops = {
        .ndo_fdb_dump                   = rocker_port_fdb_dump,
        .ndo_bridge_setlink             = rocker_port_bridge_setlink,
        .ndo_bridge_getlink             = rocker_port_bridge_getlink,
-       .ndo_switch_parent_id_get       = rocker_port_switch_parent_id_get,
-       .ndo_switch_port_stp_update     = rocker_port_switch_port_stp_update,
+       .ndo_get_phys_port_name         = rocker_port_get_phys_port_name,
+};
+
+/********************
+ * swdev interface
+ ********************/
+
+static int rocker_port_swdev_parent_id_get(struct net_device *dev,
+                                          struct netdev_phys_item_id *psid)
+{
+       struct rocker_port *rocker_port = netdev_priv(dev);
+       struct rocker *rocker = rocker_port->rocker;
+
+       psid->id_len = sizeof(rocker->hw.id);
+       memcpy(&psid->id, &rocker->hw.id, psid->id_len);
+       return 0;
+}
+
+static int rocker_port_swdev_port_stp_update(struct net_device *dev, u8 state)
+{
+       struct rocker_port *rocker_port = netdev_priv(dev);
+
+       return rocker_port_stp_update(rocker_port, state);
+}
+
+static int rocker_port_swdev_fib_ipv4_add(struct net_device *dev,
+                                         __be32 dst, int dst_len,
+                                         struct fib_info *fi,
+                                         u8 tos, u8 type,
+                                         u32 nlflags, u32 tb_id)
+{
+       struct rocker_port *rocker_port = netdev_priv(dev);
+       int flags = 0;
+
+       return rocker_port_fib_ipv4(rocker_port, dst, dst_len,
+                                   fi, tb_id, flags);
+}
+
+static int rocker_port_swdev_fib_ipv4_del(struct net_device *dev,
+                                         __be32 dst, int dst_len,
+                                         struct fib_info *fi,
+                                         u8 tos, u8 type, u32 tb_id)
+{
+       struct rocker_port *rocker_port = netdev_priv(dev);
+       int flags = ROCKER_OP_FLAG_REMOVE;
+
+       return rocker_port_fib_ipv4(rocker_port, dst, dst_len,
+                                   fi, tb_id, flags);
+}
+
+static const struct swdev_ops rocker_port_swdev_ops = {
+       .swdev_parent_id_get            = rocker_port_swdev_parent_id_get,
+       .swdev_port_stp_update          = rocker_port_swdev_port_stp_update,
+       .swdev_fib_ipv4_add             = rocker_port_swdev_fib_ipv4_add,
+       .swdev_fib_ipv4_del             = rocker_port_swdev_fib_ipv4_del,
 };
 
 /********************
@@ -3882,8 +4346,8 @@ rocker_cmd_get_port_stats_prep(struct rocker *rocker,
        if (!cmd_stats)
                return -EMSGSIZE;
 
-       if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_STATS_LPORT,
-                              rocker_port->lport))
+       if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_STATS_PPORT,
+                              rocker_port->pport))
                return -EMSGSIZE;
 
        rocker_tlv_nest_end(desc_info, cmd_stats);
@@ -3900,7 +4364,7 @@ rocker_cmd_get_port_stats_ethtool_proc(struct rocker *rocker,
        struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
        struct rocker_tlv *stats_attrs[ROCKER_TLV_CMD_PORT_STATS_MAX + 1];
        struct rocker_tlv *pattr;
-       u32 lport;
+       u32 pport;
        u64 *data = priv;
        int i;
 
@@ -3912,11 +4376,11 @@ rocker_cmd_get_port_stats_ethtool_proc(struct rocker *rocker,
        rocker_tlv_parse_nested(stats_attrs, ROCKER_TLV_CMD_PORT_STATS_MAX,
                                attrs[ROCKER_TLV_CMD_INFO]);
 
-       if (!stats_attrs[ROCKER_TLV_CMD_PORT_STATS_LPORT])
+       if (!stats_attrs[ROCKER_TLV_CMD_PORT_STATS_PPORT])
                return -EIO;
 
-       lport = rocker_tlv_get_u32(stats_attrs[ROCKER_TLV_CMD_PORT_STATS_LPORT]);
-       if (lport != rocker_port->lport)
+       pport = rocker_tlv_get_u32(stats_attrs[ROCKER_TLV_CMD_PORT_STATS_PPORT]);
+       if (pport != rocker_port->pport)
                return -EIO;
 
        for (i = 0; i < ARRAY_SIZE(rocker_port_stats); i++) {
@@ -4104,7 +4568,7 @@ static void rocker_carrier_init(struct rocker_port *rocker_port)
        u64 link_status = rocker_read64(rocker, PORT_PHYS_LINK_STATUS);
        bool link_up;
 
-       link_up = link_status & (1 << rocker_port->lport);
+       link_up = link_status & (1 << rocker_port->pport);
        if (link_up)
                netif_carrier_on(rocker_port->dev);
        else
@@ -4152,20 +4616,22 @@ static int rocker_probe_port(struct rocker *rocker, unsigned int port_number)
        rocker_port->dev = dev;
        rocker_port->rocker = rocker;
        rocker_port->port_number = port_number;
-       rocker_port->lport = port_number + 1;
+       rocker_port->pport = port_number + 1;
        rocker_port->brport_flags = BR_LEARNING | BR_LEARNING_SYNC;
 
        rocker_port_dev_addr_init(rocker, rocker_port);
        dev->netdev_ops = &rocker_port_netdev_ops;
        dev->ethtool_ops = &rocker_port_ethtool_ops;
+       dev->swdev_ops = &rocker_port_swdev_ops;
        netif_napi_add(dev, &rocker_port->napi_tx, rocker_port_poll_tx,
                       NAPI_POLL_WEIGHT);
        netif_napi_add(dev, &rocker_port->napi_rx, rocker_port_poll_rx,
                       NAPI_POLL_WEIGHT);
        rocker_carrier_init(rocker_port);
 
-       dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER |
-                               NETIF_F_HW_SWITCH_OFFLOAD;
+       dev->features |= NETIF_F_NETNS_LOCAL |
+                        NETIF_F_HW_VLAN_CTAG_FILTER |
+                        NETIF_F_HW_SWITCH_OFFLOAD;
 
        err = register_netdev(dev);
        if (err) {
@@ -4436,9 +4902,7 @@ static int rocker_port_bridge_join(struct rocker_port *rocker_port,
        rocker_port->internal_vlan_id =
                rocker_port_internal_vlan_id_get(rocker_port,
                                                 bridge->ifindex);
-       err = rocker_port_vlan(rocker_port, 0, 0);
-
-       return err;
+       return rocker_port_vlan(rocker_port, 0, 0);
 }
 
 static int rocker_port_bridge_leave(struct rocker_port *rocker_port)
@@ -4458,6 +4922,11 @@ static int rocker_port_bridge_leave(struct rocker_port *rocker_port)
                rocker_port_internal_vlan_id_get(rocker_port,
                                                 rocker_port->dev->ifindex);
        err = rocker_port_vlan(rocker_port, 0, 0);
+       if (err)
+               return err;
+
+       if (rocker_port->dev->flags & IFF_UP)
+               err = rocker_port_fwd_enable(rocker_port);
 
        return err;
 }
@@ -4509,6 +4978,48 @@ static struct notifier_block rocker_netdevice_nb __read_mostly = {
        .notifier_call = rocker_netdevice_event,
 };
 
+/************************************
+ * Net event notifier event handler
+ ************************************/
+
+static int rocker_neigh_update(struct net_device *dev, struct neighbour *n)
+{
+       struct rocker_port *rocker_port = netdev_priv(dev);
+       int flags = (n->nud_state & NUD_VALID) ? 0 : ROCKER_OP_FLAG_REMOVE;
+       __be32 ip_addr = *(__be32 *)n->primary_key;
+
+       return rocker_port_ipv4_neigh(rocker_port, flags, ip_addr, n->ha);
+}
+
+static int rocker_netevent_event(struct notifier_block *unused,
+                                unsigned long event, void *ptr)
+{
+       struct net_device *dev;
+       struct neighbour *n = ptr;
+       int err;
+
+       switch (event) {
+       case NETEVENT_NEIGH_UPDATE:
+               if (n->tbl != &arp_tbl)
+                       return NOTIFY_DONE;
+               dev = n->dev;
+               if (!rocker_port_dev_check(dev))
+                       return NOTIFY_DONE;
+               err = rocker_neigh_update(dev, n);
+               if (err)
+                       netdev_warn(dev,
+                                   "failed to handle neigh update (err %d)\n",
+                                   err);
+               break;
+       }
+
+       return NOTIFY_DONE;
+}
+
+static struct notifier_block rocker_netevent_nb __read_mostly = {
+       .notifier_call = rocker_netevent_event,
+};
+
 /***********************
  * Module init and exit
  ***********************/
@@ -4518,18 +5029,21 @@ static int __init rocker_module_init(void)
        int err;
 
        register_netdevice_notifier(&rocker_netdevice_nb);
+       register_netevent_notifier(&rocker_netevent_nb);
        err = pci_register_driver(&rocker_pci_driver);
        if (err)
                goto err_pci_register_driver;
        return 0;
 
 err_pci_register_driver:
+       unregister_netdevice_notifier(&rocker_netevent_nb);
        unregister_netdevice_notifier(&rocker_netdevice_nb);
        return err;
 }
 
 static void __exit rocker_module_exit(void)
 {
+       unregister_netevent_notifier(&rocker_netevent_nb);
        unregister_netdevice_notifier(&rocker_netdevice_nb);
        pci_unregister_driver(&rocker_pci_driver);
 }
index a5bc432feada1986eb41a1f3fa935c9fa38d5e4d..a4e9591d7457f3a5cb52b9b8c9201a4c77530907 100644 (file)
 
 #include <linux/types.h>
 
+/* Return codes */
+enum {
+       ROCKER_OK = 0,
+       ROCKER_ENOENT = 2,
+       ROCKER_ENXIO = 6,
+       ROCKER_ENOMEM = 12,
+       ROCKER_EEXIST = 17,
+       ROCKER_EINVAL = 22,
+       ROCKER_EMSGSIZE = 90,
+       ROCKER_ENOTSUP = 95,
+       ROCKER_ENOBUFS = 105,
+};
+
+#define ROCKER_FP_PORTS_MAX 62
+
 #define PCI_VENDOR_ID_REDHAT           0x1b36
 #define PCI_DEVICE_ID_REDHAT_ROCKER    0x0006
 
@@ -136,13 +151,14 @@ enum {
 
 enum {
        ROCKER_TLV_CMD_PORT_SETTINGS_UNSPEC,
-       ROCKER_TLV_CMD_PORT_SETTINGS_LPORT,             /* u32 */
+       ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,             /* u32 */
        ROCKER_TLV_CMD_PORT_SETTINGS_SPEED,             /* u32 */
        ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX,            /* u8 */
        ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG,           /* u8 */
        ROCKER_TLV_CMD_PORT_SETTINGS_MACADDR,           /* binary */
        ROCKER_TLV_CMD_PORT_SETTINGS_MODE,              /* u8 */
        ROCKER_TLV_CMD_PORT_SETTINGS_LEARNING,          /* u8 */
+       ROCKER_TLV_CMD_PORT_SETTINGS_PHYS_NAME,         /* binary */
 
        __ROCKER_TLV_CMD_PORT_SETTINGS_MAX,
        ROCKER_TLV_CMD_PORT_SETTINGS_MAX =
@@ -151,7 +167,7 @@ enum {
 
 enum {
        ROCKER_TLV_CMD_PORT_STATS_UNSPEC,
-       ROCKER_TLV_CMD_PORT_STATS_LPORT,            /* u32 */
+       ROCKER_TLV_CMD_PORT_STATS_PPORT,            /* u32 */
 
        ROCKER_TLV_CMD_PORT_STATS_RX_PKTS,          /* u64 */
        ROCKER_TLV_CMD_PORT_STATS_RX_BYTES,         /* u64 */
@@ -191,7 +207,7 @@ enum {
 
 enum {
        ROCKER_TLV_EVENT_LINK_CHANGED_UNSPEC,
-       ROCKER_TLV_EVENT_LINK_CHANGED_LPORT,    /* u32 */
+       ROCKER_TLV_EVENT_LINK_CHANGED_PPORT,    /* u32 */
        ROCKER_TLV_EVENT_LINK_CHANGED_LINKUP,   /* u8 */
 
        __ROCKER_TLV_EVENT_LINK_CHANGED_MAX,
@@ -201,7 +217,7 @@ enum {
 
 enum {
        ROCKER_TLV_EVENT_MAC_VLAN_UNSPEC,
-       ROCKER_TLV_EVENT_MAC_VLAN_LPORT,        /* u32 */
+       ROCKER_TLV_EVENT_MAC_VLAN_PPORT,        /* u32 */
        ROCKER_TLV_EVENT_MAC_VLAN_MAC,          /* binary */
        ROCKER_TLV_EVENT_MAC_VLAN_VLAN_ID,      /* __be16 */
 
@@ -275,9 +291,9 @@ enum {
        ROCKER_TLV_OF_DPA_HARDTIME,             /* u32 */
        ROCKER_TLV_OF_DPA_IDLETIME,             /* u32 */
        ROCKER_TLV_OF_DPA_COOKIE,               /* u64 */
-       ROCKER_TLV_OF_DPA_IN_LPORT,             /* u32 */
-       ROCKER_TLV_OF_DPA_IN_LPORT_MASK,        /* u32 */
-       ROCKER_TLV_OF_DPA_OUT_LPORT,            /* u32 */
+       ROCKER_TLV_OF_DPA_IN_PPORT,             /* u32 */
+       ROCKER_TLV_OF_DPA_IN_PPORT_MASK,        /* u32 */
+       ROCKER_TLV_OF_DPA_OUT_PPORT,            /* u32 */
        ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,        /* u16 */
        ROCKER_TLV_OF_DPA_GROUP_ID,             /* u32 */
        ROCKER_TLV_OF_DPA_GROUP_ID_LOWER,       /* u32 */
@@ -291,7 +307,7 @@ enum {
        ROCKER_TLV_OF_DPA_NEW_VLAN_ID,          /* __be16 */
        ROCKER_TLV_OF_DPA_NEW_VLAN_PCP,         /* u8 */
        ROCKER_TLV_OF_DPA_TUNNEL_ID,            /* u32 */
-       ROCKER_TLV_OF_DPA_TUN_LOG_LPORT,        /* u32 */
+       ROCKER_TLV_OF_DPA_TUNNEL_LPORT,         /* u32 */
        ROCKER_TLV_OF_DPA_ETHERTYPE,            /* __be16 */
        ROCKER_TLV_OF_DPA_DST_MAC,              /* binary */
        ROCKER_TLV_OF_DPA_DST_MAC_MASK,         /* binary */
index c8a01ee4d25e339ba22fff4ee991b3691170093d..413ea14ab91f7471f2e0f001478846cdfd07c605 100644 (file)
@@ -422,11 +422,11 @@ static int init_tx_ring(struct device *dev, u8 queue_no,
        /* assign queue number */
        tx_ring->queue_no = queue_no;
 
-       /* initalise counters */
+       /* initialise counters */
        tx_ring->dirty_tx = 0;
        tx_ring->cur_tx = 0;
 
-       /* initalise TX queue lock */
+       /* initialise TX queue lock */
        spin_lock_init(&tx_ring->tx_lock);
 
        return 0;
@@ -515,7 +515,7 @@ static int init_rx_ring(struct net_device *dev, u8 queue_no,
                        goto err_free_rx_buffers;
        }
 
-       /* initalise counters */
+       /* initialise counters */
        rx_ring->cur_rx = 0;
        rx_ring->dirty_rx = (unsigned int)(desc_index - rx_rsize);
        priv->dma_buf_sz = bfsize;
@@ -837,7 +837,7 @@ static void sxgbe_restart_tx_queue(struct sxgbe_priv_data *priv, int queue_num)
        /* free the skbuffs of the ring */
        tx_free_ring_skbufs(tx_ring);
 
-       /* initalise counters */
+       /* initialise counters */
        tx_ring->cur_tx = 0;
        tx_ring->dirty_tx = 0;
 
@@ -1176,7 +1176,7 @@ static int sxgbe_open(struct net_device *dev)
        if (priv->phydev)
                phy_start(priv->phydev);
 
-       /* initalise TX coalesce parameters */
+       /* initialise TX coalesce parameters */
        sxgbe_tx_init_coalesce(priv);
 
        if ((priv->use_riwt) && (priv->hw->dma->rx_watchdog)) {
@@ -1721,7 +1721,7 @@ static inline u64 sxgbe_get_stat64(void __iomem *ioaddr, int reg_lo, int reg_hi)
  *  Description:
  *  This function is a driver entry point whenever ifconfig command gets
  *  executed to see device statistics. Statistics are number of
- *  bytes sent or received, errors occured etc.
+ *  bytes sent or received, errors occurred etc.
  *  Return value:
  *  This function returns various statistical information of device.
  */
index 238482495e81fa3111554fc1d98fd0209e1499a8..33d2f9aa1b53262b39ad191236eef8d79a988ca1 100644 (file)
@@ -3215,7 +3215,7 @@ static pci_ers_result_t efx_io_error_detected(struct pci_dev *pdev,
        return status;
 }
 
-/* Fake a successfull reset, which will be performed later in efx_io_resume. */
+/* Fake a successful reset, which will be performed later in efx_io_resume. */
 static pci_ers_result_t efx_io_slot_reset(struct pci_dev *pdev)
 {
        struct efx_nic *efx = pci_get_drvdata(pdev);
index 75975328e0206ce94ca091b3b22201f43dfe02f4..bb89e96a125eab7bf0e5d8569f6119adb720dbb5 100644 (file)
@@ -645,7 +645,7 @@ static bool efx_check_tx_flush_complete(struct efx_nic *efx)
 }
 
 /* Flush all the transmit queues, and continue flushing receive queues until
- * they're all flushed. Wait for the DRAIN events to be recieved so that there
+ * they're all flushed. Wait for the DRAIN events to be received so that there
  * are no more RX and TX events left on any channel. */
 static int efx_farch_do_flush(struct efx_nic *efx)
 {
@@ -1108,7 +1108,7 @@ efx_farch_handle_tx_flush_done(struct efx_nic *efx, efx_qword_t *event)
 }
 
 /* If this flush done event corresponds to a &struct efx_rx_queue: If the flush
- * was succesful then send an %EFX_CHANNEL_MAGIC_RX_DRAIN, otherwise add
+ * was successful then send an %EFX_CHANNEL_MAGIC_RX_DRAIN, otherwise add
  * the RX queue back to the mask of RX queues in need of flushing.
  */
 static void
index a707fb5ef14c752b0b48b01b9e228d2235e6c238..e028de10e1b743d2e9adf6334d0ad3cbdb176001 100644 (file)
 #define    MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_LENMIN 12
 #define    MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_LENMAX 252
 #define    MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_LEN(num) (0+12*(num))
-/* Raw buffer table entries, layed out as BUFTBL_ENTRY. */
+/* Raw buffer table entries, laid out as BUFTBL_ENTRY. */
 #define       MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_OFST 0
 #define       MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_LEN 12
 #define       MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_MINNUM 1
index 6b861e3de4b0d0655879e5bb5740855d6b9c251d..a2e9aee05cdde8dc5af6528101862868abe00075 100644 (file)
@@ -323,9 +323,9 @@ struct efx_ptp_data {
 
 static int efx_phc_adjfreq(struct ptp_clock_info *ptp, s32 delta);
 static int efx_phc_adjtime(struct ptp_clock_info *ptp, s64 delta);
-static int efx_phc_gettime(struct ptp_clock_info *ptp, struct timespec *ts);
+static int efx_phc_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts);
 static int efx_phc_settime(struct ptp_clock_info *ptp,
-                          const struct timespec *e_ts);
+                          const struct timespec64 *e_ts);
 static int efx_phc_enable(struct ptp_clock_info *ptp,
                          struct ptp_clock_request *request, int on);
 
@@ -1198,8 +1198,8 @@ static const struct ptp_clock_info efx_phc_clock_info = {
        .pps            = 1,
        .adjfreq        = efx_phc_adjfreq,
        .adjtime        = efx_phc_adjtime,
-       .gettime        = efx_phc_gettime,
-       .settime        = efx_phc_settime,
+       .gettime64      = efx_phc_gettime,
+       .settime64      = efx_phc_settime,
        .enable         = efx_phc_enable,
 };
 
@@ -1837,7 +1837,7 @@ static int efx_phc_adjtime(struct ptp_clock_info *ptp, s64 delta)
                            NULL, 0, NULL);
 }
 
-static int efx_phc_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
+static int efx_phc_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
 {
        struct efx_ptp_data *ptp_data = container_of(ptp,
                                                     struct efx_ptp_data,
@@ -1859,28 +1859,28 @@ static int efx_phc_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
        kt = ptp_data->nic_to_kernel_time(
                MCDI_DWORD(outbuf, PTP_OUT_READ_NIC_TIME_MAJOR),
                MCDI_DWORD(outbuf, PTP_OUT_READ_NIC_TIME_MINOR), 0);
-       *ts = ktime_to_timespec(kt);
+       *ts = ktime_to_timespec64(kt);
        return 0;
 }
 
 static int efx_phc_settime(struct ptp_clock_info *ptp,
-                          const struct timespec *e_ts)
+                          const struct timespec64 *e_ts)
 {
        /* Get the current NIC time, efx_phc_gettime.
         * Subtract from the desired time to get the offset
         * call efx_phc_adjtime with the offset
         */
        int rc;
-       struct timespec time_now;
-       struct timespec delta;
+       struct timespec64 time_now;
+       struct timespec64 delta;
 
        rc = efx_phc_gettime(ptp, &time_now);
        if (rc != 0)
                return rc;
 
-       delta = timespec_sub(*e_ts, time_now);
+       delta = timespec64_sub(*e_ts, time_now);
 
-       rc = efx_phc_adjtime(ptp, timespec_to_ns(&delta));
+       rc = efx_phc_adjtime(ptp, timespec64_to_ns(&delta));
        if (rc != 0)
                return rc;
 
index a8bbbad68a88e6e3c2c9b7480bdb88582b8abf41..fe83430796fd04f3aa502de8f324f107380a680f 100644 (file)
@@ -1067,7 +1067,7 @@ void efx_siena_sriov_probe(struct efx_nic *efx)
 }
 
 /* Copy the list of individual addresses into the vfdi_status.peers
- * array and auxillary pages, protected by %local_lock. Drop that lock
+ * array and auxiliary pages, protected by %local_lock. Drop that lock
  * and then broadcast the address list to every VF.
  */
 static void efx_siena_sriov_peer_work(struct work_struct *data)
index ae044f44936a2fd49bf9ff2728dd6e7fe7700baf..f62901d4cae0e033d9b4e9849cf0d54c378b5be0 100644 (file)
@@ -98,7 +98,7 @@ struct vfdi_endpoint {
  * @VFDI_OP_INIT_TXQ: Initialize SRAM entries and initialize a TXQ.
  * @VFDI_OP_FINI_ALL_QUEUES: Flush all queues, finalize all queues, then
  *     finalize the SRAM entries.
- * @VFDI_OP_INSERT_FILTER: Insert a MAC filter targetting the given RXQ.
+ * @VFDI_OP_INSERT_FILTER: Insert a MAC filter targeting the given RXQ.
  * @VFDI_OP_REMOVE_ALL_FILTERS: Remove all filters.
  * @VFDI_OP_SET_STATUS_PAGE: Set the DMA page(s) used for status updates
  *     from PF and write the initial status.
@@ -148,7 +148,7 @@ enum vfdi_op {
  * @u.init_txq.flags: Checksum offload flags.
  * @u.init_txq.addr: Array of length %u.init_txq.buf_count containing DMA
  *     address of each page backing the transmit queue.
- * @u.mac_filter.rxq: Insert MAC filter at VF local address/VLAN targetting
+ * @u.mac_filter.rxq: Insert MAC filter at VF local address/VLAN targeting
  *     all traffic at this receive queue.
  * @u.mac_filter.flags: MAC filter flags.
  * @u.set_status_page.dma_addr: Base address for the &struct vfdi_status.
index 8678e39aba08cfe0d5b3b0578348b258812ec4ec..14b363a25c023c70f13b73e9c485bf28e9d533e2 100644 (file)
@@ -2204,27 +2204,17 @@ static int try_toggle_control_gpio(struct device *dev,
                                   int value, unsigned int nsdelay)
 {
        struct gpio_desc *gpio = *desc;
-       int res;
-
-       gpio = devm_gpiod_get_index(dev, name, index);
-       if (IS_ERR(gpio)) {
-               if (PTR_ERR(gpio) == -ENOENT) {
-                       *desc = NULL;
-                       return 0;
-               }
+       enum gpiod_flags flags = value ? GPIOD_OUT_LOW : GPIOD_OUT_HIGH;
 
+       gpio = devm_gpiod_get_index_optional(dev, name, index, flags);
+       if (IS_ERR(gpio))
                return PTR_ERR(gpio);
+
+       if (gpio) {
+               if (nsdelay)
+                       usleep_range(nsdelay, 2 * nsdelay);
+               gpiod_set_value_cansleep(gpio, value);
        }
-       res = gpiod_direction_output(gpio, !value);
-       if (res) {
-               dev_err(dev, "unable to toggle gpio %s: %i\n", name, res);
-               devm_gpiod_put(dev, gpio);
-               gpio = NULL;
-               return res;
-       }
-       if (nsdelay)
-               usleep_range(nsdelay, 2 * nsdelay);
-       gpiod_set_value_cansleep(gpio, value);
        *desc = gpio;
 
        return 0;
index 2965c6ae7d6e4692ff491185550548bf985cf681..41047c9143d0a66cde1441311fb5feb3ce0796d0 100644 (file)
@@ -843,7 +843,7 @@ static int smsc911x_phy_loopbacktest(struct net_device *dev)
        unsigned long flags;
 
        /* Initialise tx packet using broadcast destination address */
-       memset(pdata->loopback_tx_pkt, 0xff, ETH_ALEN);
+       eth_broadcast_addr(pdata->loopback_tx_pkt);
 
        /* Use incrementing source address */
        for (i = 6; i < 12; i++)
index e97074cd5800a7c67466b60925cb6dfaf6aaab0b..5a36bd2c7837d3f4c84e9344ba6bc040e872af2c 100644 (file)
@@ -91,7 +91,9 @@ static int socfpga_dwmac_parse_data(struct socfpga_dwmac *dwmac, struct device *
                                                  STMMAC_RESOURCE_NAME);
        if (IS_ERR(dwmac->stmmac_rst)) {
                dev_info(dev, "Could not get reset control!\n");
-               return -EINVAL;
+               if (PTR_ERR(dwmac->stmmac_rst) == -EPROBE_DEFER)
+                       return -EPROBE_DEFER;
+               dwmac->stmmac_rst = NULL;
        }
 
        dwmac->interface = of_get_phy_mode(np);
index a0ea84fe6519badffb8b5cabf0e9892d135ea081..5336594abed1c373259b67f1a6cb8ef1866fc92d 100644 (file)
@@ -609,7 +609,7 @@ static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
                 * where, freq_div_ratio = clk_ptp_ref_i/50MHz
                 * hence, addend = ((2^32) * 50MHz)/clk_ptp_ref_i;
                 * NOTE: clk_ptp_ref_i should be >= 50MHz to
-                *       achive 20ns accuracy.
+                *       achieve 20ns accuracy.
                 *
                 * 2^x * y == (y << x), hence
                 * 2^32 * 50000000 ==> (50000000 << 32)
index c5ee79d8a8c56478f9987efc2c0631c140aafc0d..170a18b61281d5ab406e96ce3231878f08fbad28 100644 (file)
@@ -105,13 +105,12 @@ static int stmmac_adjust_time(struct ptp_clock_info *ptp, s64 delta)
  * Description: this function will read the current time from the
  * hardware clock and store it in @ts.
  */
-static int stmmac_get_time(struct ptp_clock_info *ptp, struct timespec *ts)
+static int stmmac_get_time(struct ptp_clock_info *ptp, struct timespec64 *ts)
 {
        struct stmmac_priv *priv =
            container_of(ptp, struct stmmac_priv, ptp_clock_ops);
        unsigned long flags;
        u64 ns;
-       u32 reminder;
 
        spin_lock_irqsave(&priv->ptp_lock, flags);
 
@@ -119,8 +118,7 @@ static int stmmac_get_time(struct ptp_clock_info *ptp, struct timespec *ts)
 
        spin_unlock_irqrestore(&priv->ptp_lock, flags);
 
-       ts->tv_sec = div_u64_rem(ns, 1000000000ULL, &reminder);
-       ts->tv_nsec = reminder;
+       *ts = ns_to_timespec64(ns);
 
        return 0;
 }
@@ -135,7 +133,7 @@ static int stmmac_get_time(struct ptp_clock_info *ptp, struct timespec *ts)
  * hardware clock.
  */
 static int stmmac_set_time(struct ptp_clock_info *ptp,
-                          const struct timespec *ts)
+                          const struct timespec64 *ts)
 {
        struct stmmac_priv *priv =
            container_of(ptp, struct stmmac_priv, ptp_clock_ops);
@@ -168,8 +166,8 @@ static struct ptp_clock_info stmmac_ptp_clock_ops = {
        .pps = 0,
        .adjfreq = stmmac_adjust_freq,
        .adjtime = stmmac_adjust_time,
-       .gettime = stmmac_get_time,
-       .settime = stmmac_set_time,
+       .gettime64 = stmmac_get_time,
+       .settime64 = stmmac_set_time,
        .enable = stmmac_enable,
 };
 
index fef5dec2cffe9c3bb7f09bbe728ab2dc54b0cba9..74e9b148378c1b1e1551976f1bfe4aa7c7920abe 100644 (file)
@@ -2175,7 +2175,7 @@ static int gem_do_start(struct net_device *dev)
        }
 
        /* Mark us as attached again if we come from resume(), this has
-        * no effect if we weren't detatched and needs to be done now.
+        * no effect if we weren't detached and needs to be done now.
         */
        netif_device_attach(dev);
 
@@ -2794,7 +2794,7 @@ static void gem_remove_one(struct pci_dev *pdev)
 
                unregister_netdev(dev);
 
-               /* Ensure reset task is truely gone */
+               /* Ensure reset task is truly gone */
                cancel_work_sync(&gp->reset_task);
 
                /* Free resources */
index 22e0cad1b4b5a21e0edc9215781c1052a9c6ef84..401abf7254d33bfb0e81e12dc1eeaf4abbf32b51 100644 (file)
@@ -1411,6 +1411,8 @@ static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev)
        if (unlikely(err < 0)) {
                netdev_info(dev, "TX trigger error %d\n", err);
                d->hdr.state = VIO_DESC_FREE;
+               skb = port->tx_bufs[txi].skb;
+               port->tx_bufs[txi].skb = NULL;
                dev->stats.tx_carrier_errors++;
                goto out_dropped;
        }
index f6a71092e1359ea095aad0aeca093065080ceb5c..631e0afd07d2a2dcfa5b7238d988fb05b12b4850 100644 (file)
@@ -88,6 +88,7 @@ config TI_CPTS
 config TI_KEYSTONE_NETCP
        tristate "TI Keystone NETCP Core Support"
        select TI_CPSW_ALE
+       select TI_DAVINCI_MDIO
        depends on OF
        depends on KEYSTONE_NAVIGATOR_DMA && KEYSTONE_NAVIGATOR_QMSS
        ---help---
index a1bbaf6352ba379d209c7fc5cac33c8bfcbcb347..b536b4c82752a233e7c18d4f5194041f6dc80e20 100644 (file)
@@ -726,7 +726,7 @@ static void cpsw_rx_handler(void *token, int len, int status)
                if (ndev_status && (status >= 0)) {
                        /* The packet received is for the interface which
                         * is already down and the other interface is up
-                        * and running, intead of freeing which results
+                        * and running, instead of freeing which results
                         * in reducing of the number of rx descriptor in
                         * DMA engine, requeue skb back to cpdma.
                         */
index fbe42cb107ecadde04bc5d6d07a709020075e7bb..85a55b4ff8c0731a1952068e968715b256afdb91 100644 (file)
@@ -167,10 +167,9 @@ static int cpts_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
        return 0;
 }
 
-static int cpts_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
+static int cpts_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
 {
        u64 ns;
-       u32 remainder;
        unsigned long flags;
        struct cpts *cpts = container_of(ptp, struct cpts, info);
 
@@ -178,21 +177,19 @@ static int cpts_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
        ns = timecounter_read(&cpts->tc);
        spin_unlock_irqrestore(&cpts->lock, flags);
 
-       ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder);
-       ts->tv_nsec = remainder;
+       *ts = ns_to_timespec64(ns);
 
        return 0;
 }
 
 static int cpts_ptp_settime(struct ptp_clock_info *ptp,
-                           const struct timespec *ts)
+                           const struct timespec64 *ts)
 {
        u64 ns;
        unsigned long flags;
        struct cpts *cpts = container_of(ptp, struct cpts, info);
 
-       ns = ts->tv_sec * 1000000000ULL;
-       ns += ts->tv_nsec;
+       ns = timespec64_to_ns(ts);
 
        spin_lock_irqsave(&cpts->lock, flags);
        timecounter_init(&cpts->tc, &cpts->cc, ns);
@@ -216,20 +213,20 @@ static struct ptp_clock_info cpts_info = {
        .pps            = 0,
        .adjfreq        = cpts_ptp_adjfreq,
        .adjtime        = cpts_ptp_adjtime,
-       .gettime        = cpts_ptp_gettime,
-       .settime        = cpts_ptp_settime,
+       .gettime64      = cpts_ptp_gettime,
+       .settime64      = cpts_ptp_settime,
        .enable         = cpts_ptp_enable,
 };
 
 static void cpts_overflow_check(struct work_struct *work)
 {
-       struct timespec ts;
+       struct timespec64 ts;
        struct cpts *cpts = container_of(work, struct cpts, overflow_work.work);
 
        cpts_write32(cpts, CPTS_EN, control);
        cpts_write32(cpts, TS_PEND_EN, int_enable);
        cpts_ptp_gettime(&cpts->info, &ts);
-       pr_debug("cpts overflow check at %ld.%09lu\n", ts.tv_sec, ts.tv_nsec);
+       pr_debug("cpts overflow check at %lld.%09lu\n", ts.tv_sec, ts.tv_nsec);
        schedule_delayed_work(&cpts->overflow_work, CPTS_OVERFLOW_PERIOD);
 }
 
index 906e9bc412f5a56a35174faeff4d7cd12443ee4c..bbacf5cccec2fcbc3831e8f61b559d51a5211816 100644 (file)
@@ -41,7 +41,10 @@ struct netcp_tx_pipe {
        struct netcp_device     *netcp_device;
        void                    *dma_queue;
        unsigned int            dma_queue_id;
-       u8                      dma_psflags;
+       /* To port for packet forwarded to switch. Used only by ethss */
+       u8                      switch_to_port;
+#define        SWITCH_TO_PORT_IN_TAGINFO       BIT(0)
+       u8                      flags;
        void                    *dma_channel;
        const char              *dma_chan_name;
 };
index a31a8c3c8e7c74c91651a934eac853625805d2ff..43efc3a0cda58b26cededf117180f6782fbca76a 100644 (file)
@@ -1098,9 +1098,9 @@ static int netcp_tx_submit_skb(struct netcp_intf *netcp,
        struct netcp_tx_pipe *tx_pipe = NULL;
        struct netcp_hook_list *tx_hook;
        struct netcp_packet p_info;
-       u32 packet_info = 0;
        unsigned int dma_sz;
        dma_addr_t dma;
+       u32 tmp = 0;
        int ret = 0;
 
        p_info.netcp = netcp;
@@ -1140,20 +1140,27 @@ static int netcp_tx_submit_skb(struct netcp_intf *netcp,
                memmove(p_info.psdata, p_info.psdata + p_info.psdata_len,
                        p_info.psdata_len);
                set_words(psdata, p_info.psdata_len, psdata);
-               packet_info |=
-                       (p_info.psdata_len & KNAV_DMA_DESC_PSLEN_MASK) <<
+               tmp |= (p_info.psdata_len & KNAV_DMA_DESC_PSLEN_MASK) <<
                        KNAV_DMA_DESC_PSLEN_SHIFT;
        }
 
-       packet_info |= KNAV_DMA_DESC_HAS_EPIB |
+       tmp |= KNAV_DMA_DESC_HAS_EPIB |
                ((netcp->tx_compl_qid & KNAV_DMA_DESC_RETQ_MASK) <<
-               KNAV_DMA_DESC_RETQ_SHIFT) |
-               ((tx_pipe->dma_psflags & KNAV_DMA_DESC_PSFLAG_MASK) <<
-               KNAV_DMA_DESC_PSFLAG_SHIFT);
+               KNAV_DMA_DESC_RETQ_SHIFT);
 
-       set_words(&packet_info, 1, &desc->packet_info);
+       if (!(tx_pipe->flags & SWITCH_TO_PORT_IN_TAGINFO)) {
+               tmp |= ((tx_pipe->switch_to_port & KNAV_DMA_DESC_PSFLAG_MASK) <<
+                       KNAV_DMA_DESC_PSFLAG_SHIFT);
+       }
+
+       set_words(&tmp, 1, &desc->packet_info);
        set_words((u32 *)&skb, 1, &desc->pad[0]);
 
+       if (tx_pipe->flags & SWITCH_TO_PORT_IN_TAGINFO) {
+               tmp = tx_pipe->switch_to_port;
+               set_words((u32 *)&tmp, 1, &desc->tag_info);
+       }
+
        /* submit packet descriptor */
        ret = knav_pool_desc_map(netcp->tx_pool, desc, sizeof(*desc), &dma,
                                 &dma_sz);
@@ -1320,7 +1327,7 @@ static struct netcp_addr *netcp_addr_add(struct netcp_intf *netcp,
        if (addr)
                ether_addr_copy(naddr->addr, addr);
        else
-               memset(naddr->addr, 0, ETH_ALEN);
+               eth_zero_addr(naddr->addr);
        list_add_tail(&naddr->node, &netcp->addr_list);
 
        return naddr;
@@ -2127,7 +2134,7 @@ static int netcp_remove(struct platform_device *pdev)
        return 0;
 }
 
-static struct of_device_id of_match[] = {
+static const struct of_device_id of_match[] = {
        { .compatible = "ti,netcp-1.0", },
        {},
 };
index 84f5ce525750d2c44ea80194975c92602f5793b5..2bef655279f32a4ffb6097b295362e2da22867c0 100644 (file)
 #define GBE_MODULE_NAME                        "netcp-gbe"
 #define GBE_SS_VERSION_14              0x4ed21104
 
+#define GBE_SS_REG_INDEX               0
+#define GBE_SGMII34_REG_INDEX          1
+#define GBE_SM_REG_INDEX               2
+/* offset relative to base of GBE_SS_REG_INDEX */
 #define GBE13_SGMII_MODULE_OFFSET      0x100
-#define GBE13_SGMII34_MODULE_OFFSET    0x400
-#define GBE13_SWITCH_MODULE_OFFSET     0x800
-#define GBE13_HOST_PORT_OFFSET         0x834
-#define GBE13_SLAVE_PORT_OFFSET                0x860
-#define GBE13_EMAC_OFFSET              0x900
-#define GBE13_SLAVE_PORT2_OFFSET       0xa00
-#define GBE13_HW_STATS_OFFSET          0xb00
-#define GBE13_ALE_OFFSET               0xe00
+/* offset relative to base of GBE_SM_REG_INDEX */
+#define GBE13_HOST_PORT_OFFSET         0x34
+#define GBE13_SLAVE_PORT_OFFSET                0x60
+#define GBE13_EMAC_OFFSET              0x100
+#define GBE13_SLAVE_PORT2_OFFSET       0x200
+#define GBE13_HW_STATS_OFFSET          0x300
+#define GBE13_ALE_OFFSET               0x600
 #define GBE13_HOST_PORT_NUM            0
-#define GBE13_NUM_SLAVES               4
-#define GBE13_NUM_ALE_PORTS            (GBE13_NUM_SLAVES + 1)
 #define GBE13_NUM_ALE_ENTRIES          1024
 
+/* 1G Ethernet NU SS defines */
+#define GBENU_MODULE_NAME              "netcp-gbenu"
+#define GBE_SS_ID_NU                   0x4ee6
+#define GBE_SS_ID_2U                   0x4ee8
+
+#define IS_SS_ID_MU(d) \
+       ((GBE_IDENT((d)->ss_version) == GBE_SS_ID_NU) || \
+        (GBE_IDENT((d)->ss_version) == GBE_SS_ID_2U))
+
+#define IS_SS_ID_NU(d) \
+       (GBE_IDENT((d)->ss_version) == GBE_SS_ID_NU)
+
+#define GBENU_SS_REG_INDEX             0
+#define GBENU_SM_REG_INDEX             1
+#define GBENU_SGMII_MODULE_OFFSET      0x100
+#define GBENU_HOST_PORT_OFFSET         0x1000
+#define GBENU_SLAVE_PORT_OFFSET                0x2000
+#define GBENU_EMAC_OFFSET              0x2330
+#define GBENU_HW_STATS_OFFSET          0x1a000
+#define GBENU_ALE_OFFSET               0x1e000
+#define GBENU_HOST_PORT_NUM            0
+#define GBENU_NUM_ALE_ENTRIES          1024
+
 /* 10G Ethernet SS defines */
 #define XGBE_MODULE_NAME               "netcp-xgbe"
 #define XGBE_SS_VERSION_10             0x4ee42100
 
-#define XGBE_SERDES_REG_INDEX          1
+#define XGBE_SS_REG_INDEX              0
+#define XGBE_SM_REG_INDEX              1
+#define XGBE_SERDES_REG_INDEX          2
+
+/* offset relative to base of XGBE_SS_REG_INDEX */
 #define XGBE10_SGMII_MODULE_OFFSET     0x100
-#define XGBE10_SWITCH_MODULE_OFFSET    0x1000
-#define XGBE10_HOST_PORT_OFFSET                0x1034
-#define XGBE10_SLAVE_PORT_OFFSET       0x1064
-#define XGBE10_EMAC_OFFSET             0x1400
-#define XGBE10_ALE_OFFSET              0x1700
-#define XGBE10_HW_STATS_OFFSET         0x1800
+/* offset relative to base of XGBE_SM_REG_INDEX */
+#define XGBE10_HOST_PORT_OFFSET                0x34
+#define XGBE10_SLAVE_PORT_OFFSET       0x64
+#define XGBE10_EMAC_OFFSET             0x400
+#define XGBE10_ALE_OFFSET              0x700
+#define XGBE10_HW_STATS_OFFSET         0x800
 #define XGBE10_HOST_PORT_NUM           0
-#define XGBE10_NUM_SLAVES              2
-#define XGBE10_NUM_ALE_PORTS           (XGBE10_NUM_SLAVES + 1)
 #define XGBE10_NUM_ALE_ENTRIES         1024
 
 #define        GBE_TIMER_INTERVAL                      (HZ / 2)
 #define MACSL_FULLDUPLEX                       BIT(0)
 
 #define GBE_CTL_P0_ENABLE                      BIT(2)
-#define GBE_REG_VAL_STAT_ENABLE_ALL            0xff
+#define GBE13_REG_VAL_STAT_ENABLE_ALL          0xff
 #define XGBE_REG_VAL_STAT_ENABLE_ALL           0xf
 #define GBE_STATS_CD_SEL                       BIT(28)
 
 #define GBE_STATSC_MODULE                      2
 #define GBE_STATSD_MODULE                      3
 
+#define GBENU_STATS0_MODULE                    0
+#define GBENU_STATS1_MODULE                    1
+#define GBENU_STATS2_MODULE                    2
+#define GBENU_STATS3_MODULE                    3
+#define GBENU_STATS4_MODULE                    4
+#define GBENU_STATS5_MODULE                    5
+#define GBENU_STATS6_MODULE                    6
+#define GBENU_STATS7_MODULE                    7
+#define GBENU_STATS8_MODULE                    8
+
 #define XGBE_STATS0_MODULE                     0
 #define XGBE_STATS1_MODULE                     1
 #define XGBE_STATS2_MODULE                     2
 
-#define MAX_SLAVES                             GBE13_NUM_SLAVES
 /* s: 0-based slave_port */
 #define SGMII_BASE(s) \
        (((s) < 2) ? gbe_dev->sgmii_port_regs : gbe_dev->sgmii_port34_regs)
 
 #define GBE_SET_REG_OFS(p, rb, rn) p->rb##_ofs.rn = \
                offsetof(struct gbe##_##rb, rn)
+#define GBENU_SET_REG_OFS(p, rb, rn) p->rb##_ofs.rn = \
+               offsetof(struct gbenu##_##rb, rn)
 #define XGBE_SET_REG_OFS(p, rb, rn) p->rb##_ofs.rn = \
                offsetof(struct xgbe##_##rb, rn)
 #define GBE_REG_ADDR(p, rb, rn) (p->rb + p->rb##_ofs.rn)
 
+#define HOST_TX_PRI_MAP_DEFAULT                        0x00000000
+
 struct xgbe_ss_regs {
        u32     id_ver;
        u32     synce_count;
@@ -258,6 +297,192 @@ struct xgbe_hw_stats {
 
 #define XGBE10_NUM_STAT_ENTRIES (sizeof(struct xgbe_hw_stats)/sizeof(u32))
 
+struct gbenu_ss_regs {
+       u32     id_ver;
+       u32     synce_count;            /* NU */
+       u32     synce_mux;              /* NU */
+       u32     control;                /* 2U */
+       u32     __rsvd_0[2];            /* 2U */
+       u32     rgmii_status;           /* 2U */
+       u32     ss_status;              /* 2U */
+};
+
+struct gbenu_switch_regs {
+       u32     id_ver;
+       u32     control;
+       u32     __rsvd_0[2];
+       u32     emcontrol;
+       u32     stat_port_en;
+       u32     ptype;                  /* NU */
+       u32     soft_idle;
+       u32     thru_rate;              /* NU */
+       u32     gap_thresh;             /* NU */
+       u32     tx_start_wds;           /* NU */
+       u32     eee_prescale;           /* 2U */
+       u32     tx_g_oflow_thresh_set;  /* NU */
+       u32     tx_g_oflow_thresh_clr;  /* NU */
+       u32     tx_g_buf_thresh_set_l;  /* NU */
+       u32     tx_g_buf_thresh_set_h;  /* NU */
+       u32     tx_g_buf_thresh_clr_l;  /* NU */
+       u32     tx_g_buf_thresh_clr_h;  /* NU */
+};
+
+struct gbenu_port_regs {
+       u32     __rsvd_0;
+       u32     control;
+       u32     max_blks;               /* 2U */
+       u32     mem_align1;
+       u32     blk_cnt;
+       u32     port_vlan;
+       u32     tx_pri_map;             /* NU */
+       u32     pri_ctl;                /* 2U */
+       u32     rx_pri_map;
+       u32     rx_maxlen;
+       u32     tx_blks_pri;            /* NU */
+       u32     __rsvd_1;
+       u32     idle2lpi;               /* 2U */
+       u32     lpi2idle;               /* 2U */
+       u32     eee_status;             /* 2U */
+       u32     __rsvd_2;
+       u32     __rsvd_3[176];          /* NU: more to add */
+       u32     __rsvd_4[2];
+       u32     sa_lo;
+       u32     sa_hi;
+       u32     ts_ctl;
+       u32     ts_seq_ltype;
+       u32     ts_vlan;
+       u32     ts_ctl_ltype2;
+       u32     ts_ctl2;
+};
+
+struct gbenu_host_port_regs {
+       u32     __rsvd_0;
+       u32     control;
+       u32     flow_id_offset;         /* 2U */
+       u32     __rsvd_1;
+       u32     blk_cnt;
+       u32     port_vlan;
+       u32     tx_pri_map;             /* NU */
+       u32     pri_ctl;
+       u32     rx_pri_map;
+       u32     rx_maxlen;
+       u32     tx_blks_pri;            /* NU */
+       u32     __rsvd_2;
+       u32     idle2lpi;               /* 2U */
+       u32     lpi2wake;               /* 2U */
+       u32     eee_status;             /* 2U */
+       u32     __rsvd_3;
+       u32     __rsvd_4[184];          /* NU */
+       u32     host_blks_pri;          /* NU */
+};
+
+struct gbenu_emac_regs {
+       u32     mac_control;
+       u32     mac_status;
+       u32     soft_reset;
+       u32     boff_test;
+       u32     rx_pause;
+       u32     __rsvd_0[11];           /* NU */
+       u32     tx_pause;
+       u32     __rsvd_1[11];           /* NU */
+       u32     em_control;
+       u32     tx_gap;
+};
+
+/* Some hw stat regs are applicable to slave port only.
+ * This is handled by gbenu_et_stats struct.  Also some
+ * are for SS version NU and some are for 2U.
+ */
+struct gbenu_hw_stats {
+       u32     rx_good_frames;
+       u32     rx_broadcast_frames;
+       u32     rx_multicast_frames;
+       u32     rx_pause_frames;                /* slave */
+       u32     rx_crc_errors;
+       u32     rx_align_code_errors;           /* slave */
+       u32     rx_oversized_frames;
+       u32     rx_jabber_frames;               /* slave */
+       u32     rx_undersized_frames;
+       u32     rx_fragments;                   /* slave */
+       u32     ale_drop;
+       u32     ale_overrun_drop;
+       u32     rx_bytes;
+       u32     tx_good_frames;
+       u32     tx_broadcast_frames;
+       u32     tx_multicast_frames;
+       u32     tx_pause_frames;                /* slave */
+       u32     tx_deferred_frames;             /* slave */
+       u32     tx_collision_frames;            /* slave */
+       u32     tx_single_coll_frames;          /* slave */
+       u32     tx_mult_coll_frames;            /* slave */
+       u32     tx_excessive_collisions;        /* slave */
+       u32     tx_late_collisions;             /* slave */
+       u32     rx_ipg_error;                   /* slave 10G only */
+       u32     tx_carrier_sense_errors;        /* slave */
+       u32     tx_bytes;
+       u32     tx_64B_frames;
+       u32     tx_65_to_127B_frames;
+       u32     tx_128_to_255B_frames;
+       u32     tx_256_to_511B_frames;
+       u32     tx_512_to_1023B_frames;
+       u32     tx_1024B_frames;
+       u32     net_bytes;
+       u32     rx_bottom_fifo_drop;
+       u32     rx_port_mask_drop;
+       u32     rx_top_fifo_drop;
+       u32     ale_rate_limit_drop;
+       u32     ale_vid_ingress_drop;
+       u32     ale_da_eq_sa_drop;
+       u32     __rsvd_0[3];
+       u32     ale_unknown_ucast;
+       u32     ale_unknown_ucast_bytes;
+       u32     ale_unknown_mcast;
+       u32     ale_unknown_mcast_bytes;
+       u32     ale_unknown_bcast;
+       u32     ale_unknown_bcast_bytes;
+       u32     ale_pol_match;
+       u32     ale_pol_match_red;              /* NU */
+       u32     ale_pol_match_yellow;           /* NU */
+       u32     __rsvd_1[44];
+       u32     tx_mem_protect_err;
+       /* following NU only */
+       u32     tx_pri0;
+       u32     tx_pri1;
+       u32     tx_pri2;
+       u32     tx_pri3;
+       u32     tx_pri4;
+       u32     tx_pri5;
+       u32     tx_pri6;
+       u32     tx_pri7;
+       u32     tx_pri0_bcnt;
+       u32     tx_pri1_bcnt;
+       u32     tx_pri2_bcnt;
+       u32     tx_pri3_bcnt;
+       u32     tx_pri4_bcnt;
+       u32     tx_pri5_bcnt;
+       u32     tx_pri6_bcnt;
+       u32     tx_pri7_bcnt;
+       u32     tx_pri0_drop;
+       u32     tx_pri1_drop;
+       u32     tx_pri2_drop;
+       u32     tx_pri3_drop;
+       u32     tx_pri4_drop;
+       u32     tx_pri5_drop;
+       u32     tx_pri6_drop;
+       u32     tx_pri7_drop;
+       u32     tx_pri0_drop_bcnt;
+       u32     tx_pri1_drop_bcnt;
+       u32     tx_pri2_drop_bcnt;
+       u32     tx_pri3_drop_bcnt;
+       u32     tx_pri4_drop_bcnt;
+       u32     tx_pri5_drop_bcnt;
+       u32     tx_pri6_drop_bcnt;
+       u32     tx_pri7_drop_bcnt;
+};
+
+#define GBENU_NUM_HW_STAT_ENTRIES (sizeof(struct gbenu_hw_stats) / sizeof(u32))
+#define GBENU_HW_STATS_REG_MAP_SZ      0x200
+
 struct gbe_ss_regs {
        u32     id_ver;
        u32     synce_count;
@@ -316,6 +541,7 @@ struct gbe_port_regs_ofs {
        u16     ts_vlan;
        u16     ts_ctl_ltype2;
        u16     ts_ctl2;
+       u16     rx_maxlen;      /* 2U, NU */
 };
 
 struct gbe_host_port_regs {
@@ -390,9 +616,7 @@ struct gbe_hw_stats {
 };
 
 #define GBE13_NUM_HW_STAT_ENTRIES (sizeof(struct gbe_hw_stats)/sizeof(u32))
-#define GBE13_NUM_HW_STATS_MOD                 2
-#define XGBE10_NUM_HW_STATS_MOD                        3
-#define GBE_MAX_HW_STAT_MODS                   3
+#define GBE_MAX_HW_STAT_MODS                   9
 #define GBE_HW_STATS_REG_MAP_SZ                        0x100
 
 struct gbe_slave {
@@ -420,11 +644,14 @@ struct gbe_priv {
        u32                             ale_entries;
        u32                             ale_ports;
        bool                            enable_ale;
+       u8                              max_num_slaves;
+       u8                              max_num_ports; /* max_num_slaves + 1 */
        struct netcp_tx_pipe            tx_pipe;
 
        int                             host_port;
        u32                             rx_packet_max;
        u32                             ss_version;
+       u32                             stats_en_mask;
 
        void __iomem                    *ss_regs;
        void __iomem                    *switch_regs;
@@ -475,275 +702,778 @@ struct netcp_ethtool_stat {
        int offset;
 };
 
-#define GBE_STATSA_INFO(field)         "GBE_A:"#field, GBE_STATSA_MODULE,\
-                               FIELD_SIZEOF(struct gbe_hw_stats, field), \
-                               offsetof(struct gbe_hw_stats, field)
+#define GBE_STATSA_INFO(field)                                         \
+{                                                                      \
+       "GBE_A:"#field, GBE_STATSA_MODULE,                              \
+       FIELD_SIZEOF(struct gbe_hw_stats, field),                       \
+       offsetof(struct gbe_hw_stats, field)                            \
+}
 
-#define GBE_STATSB_INFO(field)         "GBE_B:"#field, GBE_STATSB_MODULE,\
-                               FIELD_SIZEOF(struct gbe_hw_stats, field), \
-                               offsetof(struct gbe_hw_stats, field)
+#define GBE_STATSB_INFO(field)                                         \
+{                                                                      \
+       "GBE_B:"#field, GBE_STATSB_MODULE,                              \
+       FIELD_SIZEOF(struct gbe_hw_stats, field),                       \
+       offsetof(struct gbe_hw_stats, field)                            \
+}
 
-#define GBE_STATSC_INFO(field)         "GBE_C:"#field, GBE_STATSC_MODULE,\
-                               FIELD_SIZEOF(struct gbe_hw_stats, field), \
-                               offsetof(struct gbe_hw_stats, field)
+#define GBE_STATSC_INFO(field)                                         \
+{                                                                      \
+       "GBE_C:"#field, GBE_STATSC_MODULE,                              \
+       FIELD_SIZEOF(struct gbe_hw_stats, field),                       \
+       offsetof(struct gbe_hw_stats, field)                            \
+}
 
-#define GBE_STATSD_INFO(field)         "GBE_D:"#field, GBE_STATSD_MODULE,\
-                               FIELD_SIZEOF(struct gbe_hw_stats, field), \
-                               offsetof(struct gbe_hw_stats, field)
+#define GBE_STATSD_INFO(field)                                         \
+{                                                                      \
+       "GBE_D:"#field, GBE_STATSD_MODULE,                              \
+       FIELD_SIZEOF(struct gbe_hw_stats, field),                       \
+       offsetof(struct gbe_hw_stats, field)                            \
+}
 
 static const struct netcp_ethtool_stat gbe13_et_stats[] = {
        /* GBE module A */
-       {GBE_STATSA_INFO(rx_good_frames)},
-       {GBE_STATSA_INFO(rx_broadcast_frames)},
-       {GBE_STATSA_INFO(rx_multicast_frames)},
-       {GBE_STATSA_INFO(rx_pause_frames)},
-       {GBE_STATSA_INFO(rx_crc_errors)},
-       {GBE_STATSA_INFO(rx_align_code_errors)},
-       {GBE_STATSA_INFO(rx_oversized_frames)},
-       {GBE_STATSA_INFO(rx_jabber_frames)},
-       {GBE_STATSA_INFO(rx_undersized_frames)},
-       {GBE_STATSA_INFO(rx_fragments)},
-       {GBE_STATSA_INFO(rx_bytes)},
-       {GBE_STATSA_INFO(tx_good_frames)},
-       {GBE_STATSA_INFO(tx_broadcast_frames)},
-       {GBE_STATSA_INFO(tx_multicast_frames)},
-       {GBE_STATSA_INFO(tx_pause_frames)},
-       {GBE_STATSA_INFO(tx_deferred_frames)},
-       {GBE_STATSA_INFO(tx_collision_frames)},
-       {GBE_STATSA_INFO(tx_single_coll_frames)},
-       {GBE_STATSA_INFO(tx_mult_coll_frames)},
-       {GBE_STATSA_INFO(tx_excessive_collisions)},
-       {GBE_STATSA_INFO(tx_late_collisions)},
-       {GBE_STATSA_INFO(tx_underrun)},
-       {GBE_STATSA_INFO(tx_carrier_sense_errors)},
-       {GBE_STATSA_INFO(tx_bytes)},
-       {GBE_STATSA_INFO(tx_64byte_frames)},
-       {GBE_STATSA_INFO(tx_65_to_127byte_frames)},
-       {GBE_STATSA_INFO(tx_128_to_255byte_frames)},
-       {GBE_STATSA_INFO(tx_256_to_511byte_frames)},
-       {GBE_STATSA_INFO(tx_512_to_1023byte_frames)},
-       {GBE_STATSA_INFO(tx_1024byte_frames)},
-       {GBE_STATSA_INFO(net_bytes)},
-       {GBE_STATSA_INFO(rx_sof_overruns)},
-       {GBE_STATSA_INFO(rx_mof_overruns)},
-       {GBE_STATSA_INFO(rx_dma_overruns)},
+       GBE_STATSA_INFO(rx_good_frames),
+       GBE_STATSA_INFO(rx_broadcast_frames),
+       GBE_STATSA_INFO(rx_multicast_frames),
+       GBE_STATSA_INFO(rx_pause_frames),
+       GBE_STATSA_INFO(rx_crc_errors),
+       GBE_STATSA_INFO(rx_align_code_errors),
+       GBE_STATSA_INFO(rx_oversized_frames),
+       GBE_STATSA_INFO(rx_jabber_frames),
+       GBE_STATSA_INFO(rx_undersized_frames),
+       GBE_STATSA_INFO(rx_fragments),
+       GBE_STATSA_INFO(rx_bytes),
+       GBE_STATSA_INFO(tx_good_frames),
+       GBE_STATSA_INFO(tx_broadcast_frames),
+       GBE_STATSA_INFO(tx_multicast_frames),
+       GBE_STATSA_INFO(tx_pause_frames),
+       GBE_STATSA_INFO(tx_deferred_frames),
+       GBE_STATSA_INFO(tx_collision_frames),
+       GBE_STATSA_INFO(tx_single_coll_frames),
+       GBE_STATSA_INFO(tx_mult_coll_frames),
+       GBE_STATSA_INFO(tx_excessive_collisions),
+       GBE_STATSA_INFO(tx_late_collisions),
+       GBE_STATSA_INFO(tx_underrun),
+       GBE_STATSA_INFO(tx_carrier_sense_errors),
+       GBE_STATSA_INFO(tx_bytes),
+       GBE_STATSA_INFO(tx_64byte_frames),
+       GBE_STATSA_INFO(tx_65_to_127byte_frames),
+       GBE_STATSA_INFO(tx_128_to_255byte_frames),
+       GBE_STATSA_INFO(tx_256_to_511byte_frames),
+       GBE_STATSA_INFO(tx_512_to_1023byte_frames),
+       GBE_STATSA_INFO(tx_1024byte_frames),
+       GBE_STATSA_INFO(net_bytes),
+       GBE_STATSA_INFO(rx_sof_overruns),
+       GBE_STATSA_INFO(rx_mof_overruns),
+       GBE_STATSA_INFO(rx_dma_overruns),
        /* GBE module B */
-       {GBE_STATSB_INFO(rx_good_frames)},
-       {GBE_STATSB_INFO(rx_broadcast_frames)},
-       {GBE_STATSB_INFO(rx_multicast_frames)},
-       {GBE_STATSB_INFO(rx_pause_frames)},
-       {GBE_STATSB_INFO(rx_crc_errors)},
-       {GBE_STATSB_INFO(rx_align_code_errors)},
-       {GBE_STATSB_INFO(rx_oversized_frames)},
-       {GBE_STATSB_INFO(rx_jabber_frames)},
-       {GBE_STATSB_INFO(rx_undersized_frames)},
-       {GBE_STATSB_INFO(rx_fragments)},
-       {GBE_STATSB_INFO(rx_bytes)},
-       {GBE_STATSB_INFO(tx_good_frames)},
-       {GBE_STATSB_INFO(tx_broadcast_frames)},
-       {GBE_STATSB_INFO(tx_multicast_frames)},
-       {GBE_STATSB_INFO(tx_pause_frames)},
-       {GBE_STATSB_INFO(tx_deferred_frames)},
-       {GBE_STATSB_INFO(tx_collision_frames)},
-       {GBE_STATSB_INFO(tx_single_coll_frames)},
-       {GBE_STATSB_INFO(tx_mult_coll_frames)},
-       {GBE_STATSB_INFO(tx_excessive_collisions)},
-       {GBE_STATSB_INFO(tx_late_collisions)},
-       {GBE_STATSB_INFO(tx_underrun)},
-       {GBE_STATSB_INFO(tx_carrier_sense_errors)},
-       {GBE_STATSB_INFO(tx_bytes)},
-       {GBE_STATSB_INFO(tx_64byte_frames)},
-       {GBE_STATSB_INFO(tx_65_to_127byte_frames)},
-       {GBE_STATSB_INFO(tx_128_to_255byte_frames)},
-       {GBE_STATSB_INFO(tx_256_to_511byte_frames)},
-       {GBE_STATSB_INFO(tx_512_to_1023byte_frames)},
-       {GBE_STATSB_INFO(tx_1024byte_frames)},
-       {GBE_STATSB_INFO(net_bytes)},
-       {GBE_STATSB_INFO(rx_sof_overruns)},
-       {GBE_STATSB_INFO(rx_mof_overruns)},
-       {GBE_STATSB_INFO(rx_dma_overruns)},
+       GBE_STATSB_INFO(rx_good_frames),
+       GBE_STATSB_INFO(rx_broadcast_frames),
+       GBE_STATSB_INFO(rx_multicast_frames),
+       GBE_STATSB_INFO(rx_pause_frames),
+       GBE_STATSB_INFO(rx_crc_errors),
+       GBE_STATSB_INFO(rx_align_code_errors),
+       GBE_STATSB_INFO(rx_oversized_frames),
+       GBE_STATSB_INFO(rx_jabber_frames),
+       GBE_STATSB_INFO(rx_undersized_frames),
+       GBE_STATSB_INFO(rx_fragments),
+       GBE_STATSB_INFO(rx_bytes),
+       GBE_STATSB_INFO(tx_good_frames),
+       GBE_STATSB_INFO(tx_broadcast_frames),
+       GBE_STATSB_INFO(tx_multicast_frames),
+       GBE_STATSB_INFO(tx_pause_frames),
+       GBE_STATSB_INFO(tx_deferred_frames),
+       GBE_STATSB_INFO(tx_collision_frames),
+       GBE_STATSB_INFO(tx_single_coll_frames),
+       GBE_STATSB_INFO(tx_mult_coll_frames),
+       GBE_STATSB_INFO(tx_excessive_collisions),
+       GBE_STATSB_INFO(tx_late_collisions),
+       GBE_STATSB_INFO(tx_underrun),
+       GBE_STATSB_INFO(tx_carrier_sense_errors),
+       GBE_STATSB_INFO(tx_bytes),
+       GBE_STATSB_INFO(tx_64byte_frames),
+       GBE_STATSB_INFO(tx_65_to_127byte_frames),
+       GBE_STATSB_INFO(tx_128_to_255byte_frames),
+       GBE_STATSB_INFO(tx_256_to_511byte_frames),
+       GBE_STATSB_INFO(tx_512_to_1023byte_frames),
+       GBE_STATSB_INFO(tx_1024byte_frames),
+       GBE_STATSB_INFO(net_bytes),
+       GBE_STATSB_INFO(rx_sof_overruns),
+       GBE_STATSB_INFO(rx_mof_overruns),
+       GBE_STATSB_INFO(rx_dma_overruns),
        /* GBE module C */
-       {GBE_STATSC_INFO(rx_good_frames)},
-       {GBE_STATSC_INFO(rx_broadcast_frames)},
-       {GBE_STATSC_INFO(rx_multicast_frames)},
-       {GBE_STATSC_INFO(rx_pause_frames)},
-       {GBE_STATSC_INFO(rx_crc_errors)},
-       {GBE_STATSC_INFO(rx_align_code_errors)},
-       {GBE_STATSC_INFO(rx_oversized_frames)},
-       {GBE_STATSC_INFO(rx_jabber_frames)},
-       {GBE_STATSC_INFO(rx_undersized_frames)},
-       {GBE_STATSC_INFO(rx_fragments)},
-       {GBE_STATSC_INFO(rx_bytes)},
-       {GBE_STATSC_INFO(tx_good_frames)},
-       {GBE_STATSC_INFO(tx_broadcast_frames)},
-       {GBE_STATSC_INFO(tx_multicast_frames)},
-       {GBE_STATSC_INFO(tx_pause_frames)},
-       {GBE_STATSC_INFO(tx_deferred_frames)},
-       {GBE_STATSC_INFO(tx_collision_frames)},
-       {GBE_STATSC_INFO(tx_single_coll_frames)},
-       {GBE_STATSC_INFO(tx_mult_coll_frames)},
-       {GBE_STATSC_INFO(tx_excessive_collisions)},
-       {GBE_STATSC_INFO(tx_late_collisions)},
-       {GBE_STATSC_INFO(tx_underrun)},
-       {GBE_STATSC_INFO(tx_carrier_sense_errors)},
-       {GBE_STATSC_INFO(tx_bytes)},
-       {GBE_STATSC_INFO(tx_64byte_frames)},
-       {GBE_STATSC_INFO(tx_65_to_127byte_frames)},
-       {GBE_STATSC_INFO(tx_128_to_255byte_frames)},
-       {GBE_STATSC_INFO(tx_256_to_511byte_frames)},
-       {GBE_STATSC_INFO(tx_512_to_1023byte_frames)},
-       {GBE_STATSC_INFO(tx_1024byte_frames)},
-       {GBE_STATSC_INFO(net_bytes)},
-       {GBE_STATSC_INFO(rx_sof_overruns)},
-       {GBE_STATSC_INFO(rx_mof_overruns)},
-       {GBE_STATSC_INFO(rx_dma_overruns)},
+       GBE_STATSC_INFO(rx_good_frames),
+       GBE_STATSC_INFO(rx_broadcast_frames),
+       GBE_STATSC_INFO(rx_multicast_frames),
+       GBE_STATSC_INFO(rx_pause_frames),
+       GBE_STATSC_INFO(rx_crc_errors),
+       GBE_STATSC_INFO(rx_align_code_errors),
+       GBE_STATSC_INFO(rx_oversized_frames),
+       GBE_STATSC_INFO(rx_jabber_frames),
+       GBE_STATSC_INFO(rx_undersized_frames),
+       GBE_STATSC_INFO(rx_fragments),
+       GBE_STATSC_INFO(rx_bytes),
+       GBE_STATSC_INFO(tx_good_frames),
+       GBE_STATSC_INFO(tx_broadcast_frames),
+       GBE_STATSC_INFO(tx_multicast_frames),
+       GBE_STATSC_INFO(tx_pause_frames),
+       GBE_STATSC_INFO(tx_deferred_frames),
+       GBE_STATSC_INFO(tx_collision_frames),
+       GBE_STATSC_INFO(tx_single_coll_frames),
+       GBE_STATSC_INFO(tx_mult_coll_frames),
+       GBE_STATSC_INFO(tx_excessive_collisions),
+       GBE_STATSC_INFO(tx_late_collisions),
+       GBE_STATSC_INFO(tx_underrun),
+       GBE_STATSC_INFO(tx_carrier_sense_errors),
+       GBE_STATSC_INFO(tx_bytes),
+       GBE_STATSC_INFO(tx_64byte_frames),
+       GBE_STATSC_INFO(tx_65_to_127byte_frames),
+       GBE_STATSC_INFO(tx_128_to_255byte_frames),
+       GBE_STATSC_INFO(tx_256_to_511byte_frames),
+       GBE_STATSC_INFO(tx_512_to_1023byte_frames),
+       GBE_STATSC_INFO(tx_1024byte_frames),
+       GBE_STATSC_INFO(net_bytes),
+       GBE_STATSC_INFO(rx_sof_overruns),
+       GBE_STATSC_INFO(rx_mof_overruns),
+       GBE_STATSC_INFO(rx_dma_overruns),
        /* GBE module D */
-       {GBE_STATSD_INFO(rx_good_frames)},
-       {GBE_STATSD_INFO(rx_broadcast_frames)},
-       {GBE_STATSD_INFO(rx_multicast_frames)},
-       {GBE_STATSD_INFO(rx_pause_frames)},
-       {GBE_STATSD_INFO(rx_crc_errors)},
-       {GBE_STATSD_INFO(rx_align_code_errors)},
-       {GBE_STATSD_INFO(rx_oversized_frames)},
-       {GBE_STATSD_INFO(rx_jabber_frames)},
-       {GBE_STATSD_INFO(rx_undersized_frames)},
-       {GBE_STATSD_INFO(rx_fragments)},
-       {GBE_STATSD_INFO(rx_bytes)},
-       {GBE_STATSD_INFO(tx_good_frames)},
-       {GBE_STATSD_INFO(tx_broadcast_frames)},
-       {GBE_STATSD_INFO(tx_multicast_frames)},
-       {GBE_STATSD_INFO(tx_pause_frames)},
-       {GBE_STATSD_INFO(tx_deferred_frames)},
-       {GBE_STATSD_INFO(tx_collision_frames)},
-       {GBE_STATSD_INFO(tx_single_coll_frames)},
-       {GBE_STATSD_INFO(tx_mult_coll_frames)},
-       {GBE_STATSD_INFO(tx_excessive_collisions)},
-       {GBE_STATSD_INFO(tx_late_collisions)},
-       {GBE_STATSD_INFO(tx_underrun)},
-       {GBE_STATSD_INFO(tx_carrier_sense_errors)},
-       {GBE_STATSD_INFO(tx_bytes)},
-       {GBE_STATSD_INFO(tx_64byte_frames)},
-       {GBE_STATSD_INFO(tx_65_to_127byte_frames)},
-       {GBE_STATSD_INFO(tx_128_to_255byte_frames)},
-       {GBE_STATSD_INFO(tx_256_to_511byte_frames)},
-       {GBE_STATSD_INFO(tx_512_to_1023byte_frames)},
-       {GBE_STATSD_INFO(tx_1024byte_frames)},
-       {GBE_STATSD_INFO(net_bytes)},
-       {GBE_STATSD_INFO(rx_sof_overruns)},
-       {GBE_STATSD_INFO(rx_mof_overruns)},
-       {GBE_STATSD_INFO(rx_dma_overruns)},
+       GBE_STATSD_INFO(rx_good_frames),
+       GBE_STATSD_INFO(rx_broadcast_frames),
+       GBE_STATSD_INFO(rx_multicast_frames),
+       GBE_STATSD_INFO(rx_pause_frames),
+       GBE_STATSD_INFO(rx_crc_errors),
+       GBE_STATSD_INFO(rx_align_code_errors),
+       GBE_STATSD_INFO(rx_oversized_frames),
+       GBE_STATSD_INFO(rx_jabber_frames),
+       GBE_STATSD_INFO(rx_undersized_frames),
+       GBE_STATSD_INFO(rx_fragments),
+       GBE_STATSD_INFO(rx_bytes),
+       GBE_STATSD_INFO(tx_good_frames),
+       GBE_STATSD_INFO(tx_broadcast_frames),
+       GBE_STATSD_INFO(tx_multicast_frames),
+       GBE_STATSD_INFO(tx_pause_frames),
+       GBE_STATSD_INFO(tx_deferred_frames),
+       GBE_STATSD_INFO(tx_collision_frames),
+       GBE_STATSD_INFO(tx_single_coll_frames),
+       GBE_STATSD_INFO(tx_mult_coll_frames),
+       GBE_STATSD_INFO(tx_excessive_collisions),
+       GBE_STATSD_INFO(tx_late_collisions),
+       GBE_STATSD_INFO(tx_underrun),
+       GBE_STATSD_INFO(tx_carrier_sense_errors),
+       GBE_STATSD_INFO(tx_bytes),
+       GBE_STATSD_INFO(tx_64byte_frames),
+       GBE_STATSD_INFO(tx_65_to_127byte_frames),
+       GBE_STATSD_INFO(tx_128_to_255byte_frames),
+       GBE_STATSD_INFO(tx_256_to_511byte_frames),
+       GBE_STATSD_INFO(tx_512_to_1023byte_frames),
+       GBE_STATSD_INFO(tx_1024byte_frames),
+       GBE_STATSD_INFO(net_bytes),
+       GBE_STATSD_INFO(rx_sof_overruns),
+       GBE_STATSD_INFO(rx_mof_overruns),
+       GBE_STATSD_INFO(rx_dma_overruns),
 };
 
-#define XGBE_STATS0_INFO(field)        "GBE_0:"#field, XGBE_STATS0_MODULE, \
-                               FIELD_SIZEOF(struct xgbe_hw_stats, field), \
-                               offsetof(struct xgbe_hw_stats, field)
+/* This is the size of entries in GBENU_STATS_HOST */
+#define GBENU_ET_STATS_HOST_SIZE       33
+
+#define GBENU_STATS_HOST(field)                                        \
+{                                                              \
+       "GBE_HOST:"#field, GBENU_STATS0_MODULE,                 \
+       FIELD_SIZEOF(struct gbenu_hw_stats, field),             \
+       offsetof(struct gbenu_hw_stats, field)                  \
+}
 
-#define XGBE_STATS1_INFO(field)        "GBE_1:"#field, XGBE_STATS1_MODULE, \
-                               FIELD_SIZEOF(struct xgbe_hw_stats, field), \
-                               offsetof(struct xgbe_hw_stats, field)
+/* This is the size of entries in GBENU_STATS_HOST */
+#define GBENU_ET_STATS_PORT_SIZE       46
 
-#define XGBE_STATS2_INFO(field)        "GBE_2:"#field, XGBE_STATS2_MODULE, \
-                               FIELD_SIZEOF(struct xgbe_hw_stats, field), \
-                               offsetof(struct xgbe_hw_stats, field)
+#define GBENU_STATS_P1(field)                                  \
+{                                                              \
+       "GBE_P1:"#field, GBENU_STATS1_MODULE,                   \
+       FIELD_SIZEOF(struct gbenu_hw_stats, field),             \
+       offsetof(struct gbenu_hw_stats, field)                  \
+}
+
+#define GBENU_STATS_P2(field)                                  \
+{                                                              \
+       "GBE_P2:"#field, GBENU_STATS2_MODULE,                   \
+       FIELD_SIZEOF(struct gbenu_hw_stats, field),             \
+       offsetof(struct gbenu_hw_stats, field)                  \
+}
+
+#define GBENU_STATS_P3(field)                                  \
+{                                                              \
+       "GBE_P3:"#field, GBENU_STATS3_MODULE,                   \
+       FIELD_SIZEOF(struct gbenu_hw_stats, field),             \
+       offsetof(struct gbenu_hw_stats, field)                  \
+}
+
+#define GBENU_STATS_P4(field)                                  \
+{                                                              \
+       "GBE_P4:"#field, GBENU_STATS4_MODULE,                   \
+       FIELD_SIZEOF(struct gbenu_hw_stats, field),             \
+       offsetof(struct gbenu_hw_stats, field)                  \
+}
+
+#define GBENU_STATS_P5(field)                                  \
+{                                                              \
+       "GBE_P5:"#field, GBENU_STATS5_MODULE,                   \
+       FIELD_SIZEOF(struct gbenu_hw_stats, field),             \
+       offsetof(struct gbenu_hw_stats, field)                  \
+}
+
+#define GBENU_STATS_P6(field)                                  \
+{                                                              \
+       "GBE_P6:"#field, GBENU_STATS6_MODULE,                   \
+       FIELD_SIZEOF(struct gbenu_hw_stats, field),             \
+       offsetof(struct gbenu_hw_stats, field)                  \
+}
+
+#define GBENU_STATS_P7(field)                                  \
+{                                                              \
+       "GBE_P7:"#field, GBENU_STATS7_MODULE,                   \
+       FIELD_SIZEOF(struct gbenu_hw_stats, field),             \
+       offsetof(struct gbenu_hw_stats, field)                  \
+}
+
+#define GBENU_STATS_P8(field)                                  \
+{                                                              \
+       "GBE_P8:"#field, GBENU_STATS8_MODULE,                   \
+       FIELD_SIZEOF(struct gbenu_hw_stats, field),             \
+       offsetof(struct gbenu_hw_stats, field)                  \
+}
+
+static const struct netcp_ethtool_stat gbenu_et_stats[] = {
+       /* GBENU Host Module */
+       GBENU_STATS_HOST(rx_good_frames),
+       GBENU_STATS_HOST(rx_broadcast_frames),
+       GBENU_STATS_HOST(rx_multicast_frames),
+       GBENU_STATS_HOST(rx_crc_errors),
+       GBENU_STATS_HOST(rx_oversized_frames),
+       GBENU_STATS_HOST(rx_undersized_frames),
+       GBENU_STATS_HOST(ale_drop),
+       GBENU_STATS_HOST(ale_overrun_drop),
+       GBENU_STATS_HOST(rx_bytes),
+       GBENU_STATS_HOST(tx_good_frames),
+       GBENU_STATS_HOST(tx_broadcast_frames),
+       GBENU_STATS_HOST(tx_multicast_frames),
+       GBENU_STATS_HOST(tx_bytes),
+       GBENU_STATS_HOST(tx_64B_frames),
+       GBENU_STATS_HOST(tx_65_to_127B_frames),
+       GBENU_STATS_HOST(tx_128_to_255B_frames),
+       GBENU_STATS_HOST(tx_256_to_511B_frames),
+       GBENU_STATS_HOST(tx_512_to_1023B_frames),
+       GBENU_STATS_HOST(tx_1024B_frames),
+       GBENU_STATS_HOST(net_bytes),
+       GBENU_STATS_HOST(rx_bottom_fifo_drop),
+       GBENU_STATS_HOST(rx_port_mask_drop),
+       GBENU_STATS_HOST(rx_top_fifo_drop),
+       GBENU_STATS_HOST(ale_rate_limit_drop),
+       GBENU_STATS_HOST(ale_vid_ingress_drop),
+       GBENU_STATS_HOST(ale_da_eq_sa_drop),
+       GBENU_STATS_HOST(ale_unknown_ucast),
+       GBENU_STATS_HOST(ale_unknown_ucast_bytes),
+       GBENU_STATS_HOST(ale_unknown_mcast),
+       GBENU_STATS_HOST(ale_unknown_mcast_bytes),
+       GBENU_STATS_HOST(ale_unknown_bcast),
+       GBENU_STATS_HOST(ale_unknown_bcast_bytes),
+       GBENU_STATS_HOST(tx_mem_protect_err),
+       /* GBENU Module 1 */
+       GBENU_STATS_P1(rx_good_frames),
+       GBENU_STATS_P1(rx_broadcast_frames),
+       GBENU_STATS_P1(rx_multicast_frames),
+       GBENU_STATS_P1(rx_pause_frames),
+       GBENU_STATS_P1(rx_crc_errors),
+       GBENU_STATS_P1(rx_align_code_errors),
+       GBENU_STATS_P1(rx_oversized_frames),
+       GBENU_STATS_P1(rx_jabber_frames),
+       GBENU_STATS_P1(rx_undersized_frames),
+       GBENU_STATS_P1(rx_fragments),
+       GBENU_STATS_P1(ale_drop),
+       GBENU_STATS_P1(ale_overrun_drop),
+       GBENU_STATS_P1(rx_bytes),
+       GBENU_STATS_P1(tx_good_frames),
+       GBENU_STATS_P1(tx_broadcast_frames),
+       GBENU_STATS_P1(tx_multicast_frames),
+       GBENU_STATS_P1(tx_pause_frames),
+       GBENU_STATS_P1(tx_deferred_frames),
+       GBENU_STATS_P1(tx_collision_frames),
+       GBENU_STATS_P1(tx_single_coll_frames),
+       GBENU_STATS_P1(tx_mult_coll_frames),
+       GBENU_STATS_P1(tx_excessive_collisions),
+       GBENU_STATS_P1(tx_late_collisions),
+       GBENU_STATS_P1(rx_ipg_error),
+       GBENU_STATS_P1(tx_carrier_sense_errors),
+       GBENU_STATS_P1(tx_bytes),
+       GBENU_STATS_P1(tx_64B_frames),
+       GBENU_STATS_P1(tx_65_to_127B_frames),
+       GBENU_STATS_P1(tx_128_to_255B_frames),
+       GBENU_STATS_P1(tx_256_to_511B_frames),
+       GBENU_STATS_P1(tx_512_to_1023B_frames),
+       GBENU_STATS_P1(tx_1024B_frames),
+       GBENU_STATS_P1(net_bytes),
+       GBENU_STATS_P1(rx_bottom_fifo_drop),
+       GBENU_STATS_P1(rx_port_mask_drop),
+       GBENU_STATS_P1(rx_top_fifo_drop),
+       GBENU_STATS_P1(ale_rate_limit_drop),
+       GBENU_STATS_P1(ale_vid_ingress_drop),
+       GBENU_STATS_P1(ale_da_eq_sa_drop),
+       GBENU_STATS_P1(ale_unknown_ucast),
+       GBENU_STATS_P1(ale_unknown_ucast_bytes),
+       GBENU_STATS_P1(ale_unknown_mcast),
+       GBENU_STATS_P1(ale_unknown_mcast_bytes),
+       GBENU_STATS_P1(ale_unknown_bcast),
+       GBENU_STATS_P1(ale_unknown_bcast_bytes),
+       GBENU_STATS_P1(tx_mem_protect_err),
+       /* GBENU Module 2 */
+       GBENU_STATS_P2(rx_good_frames),
+       GBENU_STATS_P2(rx_broadcast_frames),
+       GBENU_STATS_P2(rx_multicast_frames),
+       GBENU_STATS_P2(rx_pause_frames),
+       GBENU_STATS_P2(rx_crc_errors),
+       GBENU_STATS_P2(rx_align_code_errors),
+       GBENU_STATS_P2(rx_oversized_frames),
+       GBENU_STATS_P2(rx_jabber_frames),
+       GBENU_STATS_P2(rx_undersized_frames),
+       GBENU_STATS_P2(rx_fragments),
+       GBENU_STATS_P2(ale_drop),
+       GBENU_STATS_P2(ale_overrun_drop),
+       GBENU_STATS_P2(rx_bytes),
+       GBENU_STATS_P2(tx_good_frames),
+       GBENU_STATS_P2(tx_broadcast_frames),
+       GBENU_STATS_P2(tx_multicast_frames),
+       GBENU_STATS_P2(tx_pause_frames),
+       GBENU_STATS_P2(tx_deferred_frames),
+       GBENU_STATS_P2(tx_collision_frames),
+       GBENU_STATS_P2(tx_single_coll_frames),
+       GBENU_STATS_P2(tx_mult_coll_frames),
+       GBENU_STATS_P2(tx_excessive_collisions),
+       GBENU_STATS_P2(tx_late_collisions),
+       GBENU_STATS_P2(rx_ipg_error),
+       GBENU_STATS_P2(tx_carrier_sense_errors),
+       GBENU_STATS_P2(tx_bytes),
+       GBENU_STATS_P2(tx_64B_frames),
+       GBENU_STATS_P2(tx_65_to_127B_frames),
+       GBENU_STATS_P2(tx_128_to_255B_frames),
+       GBENU_STATS_P2(tx_256_to_511B_frames),
+       GBENU_STATS_P2(tx_512_to_1023B_frames),
+       GBENU_STATS_P2(tx_1024B_frames),
+       GBENU_STATS_P2(net_bytes),
+       GBENU_STATS_P2(rx_bottom_fifo_drop),
+       GBENU_STATS_P2(rx_port_mask_drop),
+       GBENU_STATS_P2(rx_top_fifo_drop),
+       GBENU_STATS_P2(ale_rate_limit_drop),
+       GBENU_STATS_P2(ale_vid_ingress_drop),
+       GBENU_STATS_P2(ale_da_eq_sa_drop),
+       GBENU_STATS_P2(ale_unknown_ucast),
+       GBENU_STATS_P2(ale_unknown_ucast_bytes),
+       GBENU_STATS_P2(ale_unknown_mcast),
+       GBENU_STATS_P2(ale_unknown_mcast_bytes),
+       GBENU_STATS_P2(ale_unknown_bcast),
+       GBENU_STATS_P2(ale_unknown_bcast_bytes),
+       GBENU_STATS_P2(tx_mem_protect_err),
+       /* GBENU Module 3 */
+       GBENU_STATS_P3(rx_good_frames),
+       GBENU_STATS_P3(rx_broadcast_frames),
+       GBENU_STATS_P3(rx_multicast_frames),
+       GBENU_STATS_P3(rx_pause_frames),
+       GBENU_STATS_P3(rx_crc_errors),
+       GBENU_STATS_P3(rx_align_code_errors),
+       GBENU_STATS_P3(rx_oversized_frames),
+       GBENU_STATS_P3(rx_jabber_frames),
+       GBENU_STATS_P3(rx_undersized_frames),
+       GBENU_STATS_P3(rx_fragments),
+       GBENU_STATS_P3(ale_drop),
+       GBENU_STATS_P3(ale_overrun_drop),
+       GBENU_STATS_P3(rx_bytes),
+       GBENU_STATS_P3(tx_good_frames),
+       GBENU_STATS_P3(tx_broadcast_frames),
+       GBENU_STATS_P3(tx_multicast_frames),
+       GBENU_STATS_P3(tx_pause_frames),
+       GBENU_STATS_P3(tx_deferred_frames),
+       GBENU_STATS_P3(tx_collision_frames),
+       GBENU_STATS_P3(tx_single_coll_frames),
+       GBENU_STATS_P3(tx_mult_coll_frames),
+       GBENU_STATS_P3(tx_excessive_collisions),
+       GBENU_STATS_P3(tx_late_collisions),
+       GBENU_STATS_P3(rx_ipg_error),
+       GBENU_STATS_P3(tx_carrier_sense_errors),
+       GBENU_STATS_P3(tx_bytes),
+       GBENU_STATS_P3(tx_64B_frames),
+       GBENU_STATS_P3(tx_65_to_127B_frames),
+       GBENU_STATS_P3(tx_128_to_255B_frames),
+       GBENU_STATS_P3(tx_256_to_511B_frames),
+       GBENU_STATS_P3(tx_512_to_1023B_frames),
+       GBENU_STATS_P3(tx_1024B_frames),
+       GBENU_STATS_P3(net_bytes),
+       GBENU_STATS_P3(rx_bottom_fifo_drop),
+       GBENU_STATS_P3(rx_port_mask_drop),
+       GBENU_STATS_P3(rx_top_fifo_drop),
+       GBENU_STATS_P3(ale_rate_limit_drop),
+       GBENU_STATS_P3(ale_vid_ingress_drop),
+       GBENU_STATS_P3(ale_da_eq_sa_drop),
+       GBENU_STATS_P3(ale_unknown_ucast),
+       GBENU_STATS_P3(ale_unknown_ucast_bytes),
+       GBENU_STATS_P3(ale_unknown_mcast),
+       GBENU_STATS_P3(ale_unknown_mcast_bytes),
+       GBENU_STATS_P3(ale_unknown_bcast),
+       GBENU_STATS_P3(ale_unknown_bcast_bytes),
+       GBENU_STATS_P3(tx_mem_protect_err),
+       /* GBENU Module 4 */
+       GBENU_STATS_P4(rx_good_frames),
+       GBENU_STATS_P4(rx_broadcast_frames),
+       GBENU_STATS_P4(rx_multicast_frames),
+       GBENU_STATS_P4(rx_pause_frames),
+       GBENU_STATS_P4(rx_crc_errors),
+       GBENU_STATS_P4(rx_align_code_errors),
+       GBENU_STATS_P4(rx_oversized_frames),
+       GBENU_STATS_P4(rx_jabber_frames),
+       GBENU_STATS_P4(rx_undersized_frames),
+       GBENU_STATS_P4(rx_fragments),
+       GBENU_STATS_P4(ale_drop),
+       GBENU_STATS_P4(ale_overrun_drop),
+       GBENU_STATS_P4(rx_bytes),
+       GBENU_STATS_P4(tx_good_frames),
+       GBENU_STATS_P4(tx_broadcast_frames),
+       GBENU_STATS_P4(tx_multicast_frames),
+       GBENU_STATS_P4(tx_pause_frames),
+       GBENU_STATS_P4(tx_deferred_frames),
+       GBENU_STATS_P4(tx_collision_frames),
+       GBENU_STATS_P4(tx_single_coll_frames),
+       GBENU_STATS_P4(tx_mult_coll_frames),
+       GBENU_STATS_P4(tx_excessive_collisions),
+       GBENU_STATS_P4(tx_late_collisions),
+       GBENU_STATS_P4(rx_ipg_error),
+       GBENU_STATS_P4(tx_carrier_sense_errors),
+       GBENU_STATS_P4(tx_bytes),
+       GBENU_STATS_P4(tx_64B_frames),
+       GBENU_STATS_P4(tx_65_to_127B_frames),
+       GBENU_STATS_P4(tx_128_to_255B_frames),
+       GBENU_STATS_P4(tx_256_to_511B_frames),
+       GBENU_STATS_P4(tx_512_to_1023B_frames),
+       GBENU_STATS_P4(tx_1024B_frames),
+       GBENU_STATS_P4(net_bytes),
+       GBENU_STATS_P4(rx_bottom_fifo_drop),
+       GBENU_STATS_P4(rx_port_mask_drop),
+       GBENU_STATS_P4(rx_top_fifo_drop),
+       GBENU_STATS_P4(ale_rate_limit_drop),
+       GBENU_STATS_P4(ale_vid_ingress_drop),
+       GBENU_STATS_P4(ale_da_eq_sa_drop),
+       GBENU_STATS_P4(ale_unknown_ucast),
+       GBENU_STATS_P4(ale_unknown_ucast_bytes),
+       GBENU_STATS_P4(ale_unknown_mcast),
+       GBENU_STATS_P4(ale_unknown_mcast_bytes),
+       GBENU_STATS_P4(ale_unknown_bcast),
+       GBENU_STATS_P4(ale_unknown_bcast_bytes),
+       GBENU_STATS_P4(tx_mem_protect_err),
+       /* GBENU Module 5 */
+       GBENU_STATS_P5(rx_good_frames),
+       GBENU_STATS_P5(rx_broadcast_frames),
+       GBENU_STATS_P5(rx_multicast_frames),
+       GBENU_STATS_P5(rx_pause_frames),
+       GBENU_STATS_P5(rx_crc_errors),
+       GBENU_STATS_P5(rx_align_code_errors),
+       GBENU_STATS_P5(rx_oversized_frames),
+       GBENU_STATS_P5(rx_jabber_frames),
+       GBENU_STATS_P5(rx_undersized_frames),
+       GBENU_STATS_P5(rx_fragments),
+       GBENU_STATS_P5(ale_drop),
+       GBENU_STATS_P5(ale_overrun_drop),
+       GBENU_STATS_P5(rx_bytes),
+       GBENU_STATS_P5(tx_good_frames),
+       GBENU_STATS_P5(tx_broadcast_frames),
+       GBENU_STATS_P5(tx_multicast_frames),
+       GBENU_STATS_P5(tx_pause_frames),
+       GBENU_STATS_P5(tx_deferred_frames),
+       GBENU_STATS_P5(tx_collision_frames),
+       GBENU_STATS_P5(tx_single_coll_frames),
+       GBENU_STATS_P5(tx_mult_coll_frames),
+       GBENU_STATS_P5(tx_excessive_collisions),
+       GBENU_STATS_P5(tx_late_collisions),
+       GBENU_STATS_P5(rx_ipg_error),
+       GBENU_STATS_P5(tx_carrier_sense_errors),
+       GBENU_STATS_P5(tx_bytes),
+       GBENU_STATS_P5(tx_64B_frames),
+       GBENU_STATS_P5(tx_65_to_127B_frames),
+       GBENU_STATS_P5(tx_128_to_255B_frames),
+       GBENU_STATS_P5(tx_256_to_511B_frames),
+       GBENU_STATS_P5(tx_512_to_1023B_frames),
+       GBENU_STATS_P5(tx_1024B_frames),
+       GBENU_STATS_P5(net_bytes),
+       GBENU_STATS_P5(rx_bottom_fifo_drop),
+       GBENU_STATS_P5(rx_port_mask_drop),
+       GBENU_STATS_P5(rx_top_fifo_drop),
+       GBENU_STATS_P5(ale_rate_limit_drop),
+       GBENU_STATS_P5(ale_vid_ingress_drop),
+       GBENU_STATS_P5(ale_da_eq_sa_drop),
+       GBENU_STATS_P5(ale_unknown_ucast),
+       GBENU_STATS_P5(ale_unknown_ucast_bytes),
+       GBENU_STATS_P5(ale_unknown_mcast),
+       GBENU_STATS_P5(ale_unknown_mcast_bytes),
+       GBENU_STATS_P5(ale_unknown_bcast),
+       GBENU_STATS_P5(ale_unknown_bcast_bytes),
+       GBENU_STATS_P5(tx_mem_protect_err),
+       /* GBENU Module 6 */
+       GBENU_STATS_P6(rx_good_frames),
+       GBENU_STATS_P6(rx_broadcast_frames),
+       GBENU_STATS_P6(rx_multicast_frames),
+       GBENU_STATS_P6(rx_pause_frames),
+       GBENU_STATS_P6(rx_crc_errors),
+       GBENU_STATS_P6(rx_align_code_errors),
+       GBENU_STATS_P6(rx_oversized_frames),
+       GBENU_STATS_P6(rx_jabber_frames),
+       GBENU_STATS_P6(rx_undersized_frames),
+       GBENU_STATS_P6(rx_fragments),
+       GBENU_STATS_P6(ale_drop),
+       GBENU_STATS_P6(ale_overrun_drop),
+       GBENU_STATS_P6(rx_bytes),
+       GBENU_STATS_P6(tx_good_frames),
+       GBENU_STATS_P6(tx_broadcast_frames),
+       GBENU_STATS_P6(tx_multicast_frames),
+       GBENU_STATS_P6(tx_pause_frames),
+       GBENU_STATS_P6(tx_deferred_frames),
+       GBENU_STATS_P6(tx_collision_frames),
+       GBENU_STATS_P6(tx_single_coll_frames),
+       GBENU_STATS_P6(tx_mult_coll_frames),
+       GBENU_STATS_P6(tx_excessive_collisions),
+       GBENU_STATS_P6(tx_late_collisions),
+       GBENU_STATS_P6(rx_ipg_error),
+       GBENU_STATS_P6(tx_carrier_sense_errors),
+       GBENU_STATS_P6(tx_bytes),
+       GBENU_STATS_P6(tx_64B_frames),
+       GBENU_STATS_P6(tx_65_to_127B_frames),
+       GBENU_STATS_P6(tx_128_to_255B_frames),
+       GBENU_STATS_P6(tx_256_to_511B_frames),
+       GBENU_STATS_P6(tx_512_to_1023B_frames),
+       GBENU_STATS_P6(tx_1024B_frames),
+       GBENU_STATS_P6(net_bytes),
+       GBENU_STATS_P6(rx_bottom_fifo_drop),
+       GBENU_STATS_P6(rx_port_mask_drop),
+       GBENU_STATS_P6(rx_top_fifo_drop),
+       GBENU_STATS_P6(ale_rate_limit_drop),
+       GBENU_STATS_P6(ale_vid_ingress_drop),
+       GBENU_STATS_P6(ale_da_eq_sa_drop),
+       GBENU_STATS_P6(ale_unknown_ucast),
+       GBENU_STATS_P6(ale_unknown_ucast_bytes),
+       GBENU_STATS_P6(ale_unknown_mcast),
+       GBENU_STATS_P6(ale_unknown_mcast_bytes),
+       GBENU_STATS_P6(ale_unknown_bcast),
+       GBENU_STATS_P6(ale_unknown_bcast_bytes),
+       GBENU_STATS_P6(tx_mem_protect_err),
+       /* GBENU Module 7 */
+       GBENU_STATS_P7(rx_good_frames),
+       GBENU_STATS_P7(rx_broadcast_frames),
+       GBENU_STATS_P7(rx_multicast_frames),
+       GBENU_STATS_P7(rx_pause_frames),
+       GBENU_STATS_P7(rx_crc_errors),
+       GBENU_STATS_P7(rx_align_code_errors),
+       GBENU_STATS_P7(rx_oversized_frames),
+       GBENU_STATS_P7(rx_jabber_frames),
+       GBENU_STATS_P7(rx_undersized_frames),
+       GBENU_STATS_P7(rx_fragments),
+       GBENU_STATS_P7(ale_drop),
+       GBENU_STATS_P7(ale_overrun_drop),
+       GBENU_STATS_P7(rx_bytes),
+       GBENU_STATS_P7(tx_good_frames),
+       GBENU_STATS_P7(tx_broadcast_frames),
+       GBENU_STATS_P7(tx_multicast_frames),
+       GBENU_STATS_P7(tx_pause_frames),
+       GBENU_STATS_P7(tx_deferred_frames),
+       GBENU_STATS_P7(tx_collision_frames),
+       GBENU_STATS_P7(tx_single_coll_frames),
+       GBENU_STATS_P7(tx_mult_coll_frames),
+       GBENU_STATS_P7(tx_excessive_collisions),
+       GBENU_STATS_P7(tx_late_collisions),
+       GBENU_STATS_P7(rx_ipg_error),
+       GBENU_STATS_P7(tx_carrier_sense_errors),
+       GBENU_STATS_P7(tx_bytes),
+       GBENU_STATS_P7(tx_64B_frames),
+       GBENU_STATS_P7(tx_65_to_127B_frames),
+       GBENU_STATS_P7(tx_128_to_255B_frames),
+       GBENU_STATS_P7(tx_256_to_511B_frames),
+       GBENU_STATS_P7(tx_512_to_1023B_frames),
+       GBENU_STATS_P7(tx_1024B_frames),
+       GBENU_STATS_P7(net_bytes),
+       GBENU_STATS_P7(rx_bottom_fifo_drop),
+       GBENU_STATS_P7(rx_port_mask_drop),
+       GBENU_STATS_P7(rx_top_fifo_drop),
+       GBENU_STATS_P7(ale_rate_limit_drop),
+       GBENU_STATS_P7(ale_vid_ingress_drop),
+       GBENU_STATS_P7(ale_da_eq_sa_drop),
+       GBENU_STATS_P7(ale_unknown_ucast),
+       GBENU_STATS_P7(ale_unknown_ucast_bytes),
+       GBENU_STATS_P7(ale_unknown_mcast),
+       GBENU_STATS_P7(ale_unknown_mcast_bytes),
+       GBENU_STATS_P7(ale_unknown_bcast),
+       GBENU_STATS_P7(ale_unknown_bcast_bytes),
+       GBENU_STATS_P7(tx_mem_protect_err),
+       /* GBENU Module 8 */
+       GBENU_STATS_P8(rx_good_frames),
+       GBENU_STATS_P8(rx_broadcast_frames),
+       GBENU_STATS_P8(rx_multicast_frames),
+       GBENU_STATS_P8(rx_pause_frames),
+       GBENU_STATS_P8(rx_crc_errors),
+       GBENU_STATS_P8(rx_align_code_errors),
+       GBENU_STATS_P8(rx_oversized_frames),
+       GBENU_STATS_P8(rx_jabber_frames),
+       GBENU_STATS_P8(rx_undersized_frames),
+       GBENU_STATS_P8(rx_fragments),
+       GBENU_STATS_P8(ale_drop),
+       GBENU_STATS_P8(ale_overrun_drop),
+       GBENU_STATS_P8(rx_bytes),
+       GBENU_STATS_P8(tx_good_frames),
+       GBENU_STATS_P8(tx_broadcast_frames),
+       GBENU_STATS_P8(tx_multicast_frames),
+       GBENU_STATS_P8(tx_pause_frames),
+       GBENU_STATS_P8(tx_deferred_frames),
+       GBENU_STATS_P8(tx_collision_frames),
+       GBENU_STATS_P8(tx_single_coll_frames),
+       GBENU_STATS_P8(tx_mult_coll_frames),
+       GBENU_STATS_P8(tx_excessive_collisions),
+       GBENU_STATS_P8(tx_late_collisions),
+       GBENU_STATS_P8(rx_ipg_error),
+       GBENU_STATS_P8(tx_carrier_sense_errors),
+       GBENU_STATS_P8(tx_bytes),
+       GBENU_STATS_P8(tx_64B_frames),
+       GBENU_STATS_P8(tx_65_to_127B_frames),
+       GBENU_STATS_P8(tx_128_to_255B_frames),
+       GBENU_STATS_P8(tx_256_to_511B_frames),
+       GBENU_STATS_P8(tx_512_to_1023B_frames),
+       GBENU_STATS_P8(tx_1024B_frames),
+       GBENU_STATS_P8(net_bytes),
+       GBENU_STATS_P8(rx_bottom_fifo_drop),
+       GBENU_STATS_P8(rx_port_mask_drop),
+       GBENU_STATS_P8(rx_top_fifo_drop),
+       GBENU_STATS_P8(ale_rate_limit_drop),
+       GBENU_STATS_P8(ale_vid_ingress_drop),
+       GBENU_STATS_P8(ale_da_eq_sa_drop),
+       GBENU_STATS_P8(ale_unknown_ucast),
+       GBENU_STATS_P8(ale_unknown_ucast_bytes),
+       GBENU_STATS_P8(ale_unknown_mcast),
+       GBENU_STATS_P8(ale_unknown_mcast_bytes),
+       GBENU_STATS_P8(ale_unknown_bcast),
+       GBENU_STATS_P8(ale_unknown_bcast_bytes),
+       GBENU_STATS_P8(tx_mem_protect_err),
+};
+
+#define XGBE_STATS0_INFO(field)                                \
+{                                                      \
+       "GBE_0:"#field, XGBE_STATS0_MODULE,             \
+       FIELD_SIZEOF(struct xgbe_hw_stats, field),      \
+       offsetof(struct xgbe_hw_stats, field)           \
+}
+
+#define XGBE_STATS1_INFO(field)                                \
+{                                                      \
+       "GBE_1:"#field, XGBE_STATS1_MODULE,             \
+       FIELD_SIZEOF(struct xgbe_hw_stats, field),      \
+       offsetof(struct xgbe_hw_stats, field)           \
+}
+
+#define XGBE_STATS2_INFO(field)                                \
+{                                                      \
+       "GBE_2:"#field, XGBE_STATS2_MODULE,             \
+       FIELD_SIZEOF(struct xgbe_hw_stats, field),      \
+       offsetof(struct xgbe_hw_stats, field)           \
+}
 
 static const struct netcp_ethtool_stat xgbe10_et_stats[] = {
        /* GBE module 0 */
-       {XGBE_STATS0_INFO(rx_good_frames)},
-       {XGBE_STATS0_INFO(rx_broadcast_frames)},
-       {XGBE_STATS0_INFO(rx_multicast_frames)},
-       {XGBE_STATS0_INFO(rx_oversized_frames)},
-       {XGBE_STATS0_INFO(rx_undersized_frames)},
-       {XGBE_STATS0_INFO(overrun_type4)},
-       {XGBE_STATS0_INFO(overrun_type5)},
-       {XGBE_STATS0_INFO(rx_bytes)},
-       {XGBE_STATS0_INFO(tx_good_frames)},
-       {XGBE_STATS0_INFO(tx_broadcast_frames)},
-       {XGBE_STATS0_INFO(tx_multicast_frames)},
-       {XGBE_STATS0_INFO(tx_bytes)},
-       {XGBE_STATS0_INFO(tx_64byte_frames)},
-       {XGBE_STATS0_INFO(tx_65_to_127byte_frames)},
-       {XGBE_STATS0_INFO(tx_128_to_255byte_frames)},
-       {XGBE_STATS0_INFO(tx_256_to_511byte_frames)},
-       {XGBE_STATS0_INFO(tx_512_to_1023byte_frames)},
-       {XGBE_STATS0_INFO(tx_1024byte_frames)},
-       {XGBE_STATS0_INFO(net_bytes)},
-       {XGBE_STATS0_INFO(rx_sof_overruns)},
-       {XGBE_STATS0_INFO(rx_mof_overruns)},
-       {XGBE_STATS0_INFO(rx_dma_overruns)},
+       XGBE_STATS0_INFO(rx_good_frames),
+       XGBE_STATS0_INFO(rx_broadcast_frames),
+       XGBE_STATS0_INFO(rx_multicast_frames),
+       XGBE_STATS0_INFO(rx_oversized_frames),
+       XGBE_STATS0_INFO(rx_undersized_frames),
+       XGBE_STATS0_INFO(overrun_type4),
+       XGBE_STATS0_INFO(overrun_type5),
+       XGBE_STATS0_INFO(rx_bytes),
+       XGBE_STATS0_INFO(tx_good_frames),
+       XGBE_STATS0_INFO(tx_broadcast_frames),
+       XGBE_STATS0_INFO(tx_multicast_frames),
+       XGBE_STATS0_INFO(tx_bytes),
+       XGBE_STATS0_INFO(tx_64byte_frames),
+       XGBE_STATS0_INFO(tx_65_to_127byte_frames),
+       XGBE_STATS0_INFO(tx_128_to_255byte_frames),
+       XGBE_STATS0_INFO(tx_256_to_511byte_frames),
+       XGBE_STATS0_INFO(tx_512_to_1023byte_frames),
+       XGBE_STATS0_INFO(tx_1024byte_frames),
+       XGBE_STATS0_INFO(net_bytes),
+       XGBE_STATS0_INFO(rx_sof_overruns),
+       XGBE_STATS0_INFO(rx_mof_overruns),
+       XGBE_STATS0_INFO(rx_dma_overruns),
        /* XGBE module 1 */
-       {XGBE_STATS1_INFO(rx_good_frames)},
-       {XGBE_STATS1_INFO(rx_broadcast_frames)},
-       {XGBE_STATS1_INFO(rx_multicast_frames)},
-       {XGBE_STATS1_INFO(rx_pause_frames)},
-       {XGBE_STATS1_INFO(rx_crc_errors)},
-       {XGBE_STATS1_INFO(rx_align_code_errors)},
-       {XGBE_STATS1_INFO(rx_oversized_frames)},
-       {XGBE_STATS1_INFO(rx_jabber_frames)},
-       {XGBE_STATS1_INFO(rx_undersized_frames)},
-       {XGBE_STATS1_INFO(rx_fragments)},
-       {XGBE_STATS1_INFO(overrun_type4)},
-       {XGBE_STATS1_INFO(overrun_type5)},
-       {XGBE_STATS1_INFO(rx_bytes)},
-       {XGBE_STATS1_INFO(tx_good_frames)},
-       {XGBE_STATS1_INFO(tx_broadcast_frames)},
-       {XGBE_STATS1_INFO(tx_multicast_frames)},
-       {XGBE_STATS1_INFO(tx_pause_frames)},
-       {XGBE_STATS1_INFO(tx_deferred_frames)},
-       {XGBE_STATS1_INFO(tx_collision_frames)},
-       {XGBE_STATS1_INFO(tx_single_coll_frames)},
-       {XGBE_STATS1_INFO(tx_mult_coll_frames)},
-       {XGBE_STATS1_INFO(tx_excessive_collisions)},
-       {XGBE_STATS1_INFO(tx_late_collisions)},
-       {XGBE_STATS1_INFO(tx_underrun)},
-       {XGBE_STATS1_INFO(tx_carrier_sense_errors)},
-       {XGBE_STATS1_INFO(tx_bytes)},
-       {XGBE_STATS1_INFO(tx_64byte_frames)},
-       {XGBE_STATS1_INFO(tx_65_to_127byte_frames)},
-       {XGBE_STATS1_INFO(tx_128_to_255byte_frames)},
-       {XGBE_STATS1_INFO(tx_256_to_511byte_frames)},
-       {XGBE_STATS1_INFO(tx_512_to_1023byte_frames)},
-       {XGBE_STATS1_INFO(tx_1024byte_frames)},
-       {XGBE_STATS1_INFO(net_bytes)},
-       {XGBE_STATS1_INFO(rx_sof_overruns)},
-       {XGBE_STATS1_INFO(rx_mof_overruns)},
-       {XGBE_STATS1_INFO(rx_dma_overruns)},
+       XGBE_STATS1_INFO(rx_good_frames),
+       XGBE_STATS1_INFO(rx_broadcast_frames),
+       XGBE_STATS1_INFO(rx_multicast_frames),
+       XGBE_STATS1_INFO(rx_pause_frames),
+       XGBE_STATS1_INFO(rx_crc_errors),
+       XGBE_STATS1_INFO(rx_align_code_errors),
+       XGBE_STATS1_INFO(rx_oversized_frames),
+       XGBE_STATS1_INFO(rx_jabber_frames),
+       XGBE_STATS1_INFO(rx_undersized_frames),
+       XGBE_STATS1_INFO(rx_fragments),
+       XGBE_STATS1_INFO(overrun_type4),
+       XGBE_STATS1_INFO(overrun_type5),
+       XGBE_STATS1_INFO(rx_bytes),
+       XGBE_STATS1_INFO(tx_good_frames),
+       XGBE_STATS1_INFO(tx_broadcast_frames),
+       XGBE_STATS1_INFO(tx_multicast_frames),
+       XGBE_STATS1_INFO(tx_pause_frames),
+       XGBE_STATS1_INFO(tx_deferred_frames),
+       XGBE_STATS1_INFO(tx_collision_frames),
+       XGBE_STATS1_INFO(tx_single_coll_frames),
+       XGBE_STATS1_INFO(tx_mult_coll_frames),
+       XGBE_STATS1_INFO(tx_excessive_collisions),
+       XGBE_STATS1_INFO(tx_late_collisions),
+       XGBE_STATS1_INFO(tx_underrun),
+       XGBE_STATS1_INFO(tx_carrier_sense_errors),
+       XGBE_STATS1_INFO(tx_bytes),
+       XGBE_STATS1_INFO(tx_64byte_frames),
+       XGBE_STATS1_INFO(tx_65_to_127byte_frames),
+       XGBE_STATS1_INFO(tx_128_to_255byte_frames),
+       XGBE_STATS1_INFO(tx_256_to_511byte_frames),
+       XGBE_STATS1_INFO(tx_512_to_1023byte_frames),
+       XGBE_STATS1_INFO(tx_1024byte_frames),
+       XGBE_STATS1_INFO(net_bytes),
+       XGBE_STATS1_INFO(rx_sof_overruns),
+       XGBE_STATS1_INFO(rx_mof_overruns),
+       XGBE_STATS1_INFO(rx_dma_overruns),
        /* XGBE module 2 */
-       {XGBE_STATS2_INFO(rx_good_frames)},
-       {XGBE_STATS2_INFO(rx_broadcast_frames)},
-       {XGBE_STATS2_INFO(rx_multicast_frames)},
-       {XGBE_STATS2_INFO(rx_pause_frames)},
-       {XGBE_STATS2_INFO(rx_crc_errors)},
-       {XGBE_STATS2_INFO(rx_align_code_errors)},
-       {XGBE_STATS2_INFO(rx_oversized_frames)},
-       {XGBE_STATS2_INFO(rx_jabber_frames)},
-       {XGBE_STATS2_INFO(rx_undersized_frames)},
-       {XGBE_STATS2_INFO(rx_fragments)},
-       {XGBE_STATS2_INFO(overrun_type4)},
-       {XGBE_STATS2_INFO(overrun_type5)},
-       {XGBE_STATS2_INFO(rx_bytes)},
-       {XGBE_STATS2_INFO(tx_good_frames)},
-       {XGBE_STATS2_INFO(tx_broadcast_frames)},
-       {XGBE_STATS2_INFO(tx_multicast_frames)},
-       {XGBE_STATS2_INFO(tx_pause_frames)},
-       {XGBE_STATS2_INFO(tx_deferred_frames)},
-       {XGBE_STATS2_INFO(tx_collision_frames)},
-       {XGBE_STATS2_INFO(tx_single_coll_frames)},
-       {XGBE_STATS2_INFO(tx_mult_coll_frames)},
-       {XGBE_STATS2_INFO(tx_excessive_collisions)},
-       {XGBE_STATS2_INFO(tx_late_collisions)},
-       {XGBE_STATS2_INFO(tx_underrun)},
-       {XGBE_STATS2_INFO(tx_carrier_sense_errors)},
-       {XGBE_STATS2_INFO(tx_bytes)},
-       {XGBE_STATS2_INFO(tx_64byte_frames)},
-       {XGBE_STATS2_INFO(tx_65_to_127byte_frames)},
-       {XGBE_STATS2_INFO(tx_128_to_255byte_frames)},
-       {XGBE_STATS2_INFO(tx_256_to_511byte_frames)},
-       {XGBE_STATS2_INFO(tx_512_to_1023byte_frames)},
-       {XGBE_STATS2_INFO(tx_1024byte_frames)},
-       {XGBE_STATS2_INFO(net_bytes)},
-       {XGBE_STATS2_INFO(rx_sof_overruns)},
-       {XGBE_STATS2_INFO(rx_mof_overruns)},
-       {XGBE_STATS2_INFO(rx_dma_overruns)},
+       XGBE_STATS2_INFO(rx_good_frames),
+       XGBE_STATS2_INFO(rx_broadcast_frames),
+       XGBE_STATS2_INFO(rx_multicast_frames),
+       XGBE_STATS2_INFO(rx_pause_frames),
+       XGBE_STATS2_INFO(rx_crc_errors),
+       XGBE_STATS2_INFO(rx_align_code_errors),
+       XGBE_STATS2_INFO(rx_oversized_frames),
+       XGBE_STATS2_INFO(rx_jabber_frames),
+       XGBE_STATS2_INFO(rx_undersized_frames),
+       XGBE_STATS2_INFO(rx_fragments),
+       XGBE_STATS2_INFO(overrun_type4),
+       XGBE_STATS2_INFO(overrun_type5),
+       XGBE_STATS2_INFO(rx_bytes),
+       XGBE_STATS2_INFO(tx_good_frames),
+       XGBE_STATS2_INFO(tx_broadcast_frames),
+       XGBE_STATS2_INFO(tx_multicast_frames),
+       XGBE_STATS2_INFO(tx_pause_frames),
+       XGBE_STATS2_INFO(tx_deferred_frames),
+       XGBE_STATS2_INFO(tx_collision_frames),
+       XGBE_STATS2_INFO(tx_single_coll_frames),
+       XGBE_STATS2_INFO(tx_mult_coll_frames),
+       XGBE_STATS2_INFO(tx_excessive_collisions),
+       XGBE_STATS2_INFO(tx_late_collisions),
+       XGBE_STATS2_INFO(tx_underrun),
+       XGBE_STATS2_INFO(tx_carrier_sense_errors),
+       XGBE_STATS2_INFO(tx_bytes),
+       XGBE_STATS2_INFO(tx_64byte_frames),
+       XGBE_STATS2_INFO(tx_65_to_127byte_frames),
+       XGBE_STATS2_INFO(tx_128_to_255byte_frames),
+       XGBE_STATS2_INFO(tx_256_to_511byte_frames),
+       XGBE_STATS2_INFO(tx_512_to_1023byte_frames),
+       XGBE_STATS2_INFO(tx_1024byte_frames),
+       XGBE_STATS2_INFO(net_bytes),
+       XGBE_STATS2_INFO(rx_sof_overruns),
+       XGBE_STATS2_INFO(rx_mof_overruns),
+       XGBE_STATS2_INFO(rx_dma_overruns),
 };
 
 #define for_each_intf(i, priv) \
@@ -1066,9 +1796,16 @@ static void netcp_ethss_update_link_state(struct gbe_priv *gbe_dev,
        if (!slave->open)
                return;
 
-       if (!SLAVE_LINK_IS_XGMII(slave))
-               sgmii_link_state = netcp_sgmii_get_port_link(SGMII_BASE(sp),
-                                                            sp);
+       if (!SLAVE_LINK_IS_XGMII(slave)) {
+               if (gbe_dev->ss_version == GBE_SS_VERSION_14)
+                       sgmii_link_state =
+                               netcp_sgmii_get_port_link(SGMII_BASE(sp), sp);
+               else
+                       sgmii_link_state =
+                               netcp_sgmii_get_port_link(
+                                               gbe_dev->sgmii_port_regs, sp);
+       }
+
        phy_link_state = gbe_phy_link_status(slave);
        link_state = phy_link_state & sgmii_link_state;
 
@@ -1137,6 +1874,7 @@ static int gbe_port_reset(struct gbe_slave *slave)
 static void gbe_port_config(struct gbe_priv *gbe_dev, struct gbe_slave *slave,
                            int max_rx_len)
 {
+       void __iomem *rx_maxlen_reg;
        u32 xgmii_mode;
 
        if (max_rx_len > NETCP_MAX_FRAME_SIZE)
@@ -1150,7 +1888,12 @@ static void gbe_port_config(struct gbe_priv *gbe_dev, struct gbe_slave *slave,
                writel(xgmii_mode, GBE_REG_ADDR(gbe_dev, ss_regs, control));
        }
 
-       writel(max_rx_len, GBE_REG_ADDR(slave, emac_regs, rx_maxlen));
+       if (IS_SS_ID_MU(gbe_dev))
+               rx_maxlen_reg = GBE_REG_ADDR(slave, port_regs, rx_maxlen);
+       else
+               rx_maxlen_reg = GBE_REG_ADDR(slave, emac_regs, rx_maxlen);
+
+       writel(max_rx_len, rx_maxlen_reg);
        writel(slave->mac_control, GBE_REG_ADDR(slave, emac_regs, mac_control));
 }
 
@@ -1242,6 +1985,12 @@ static int gbe_slave_open(struct gbe_intf *gbe_intf)
 static void gbe_init_host_port(struct gbe_priv *priv)
 {
        int bypass_en = 1;
+
+       /* Host Tx Pri */
+       if (IS_SS_ID_NU(priv))
+               writel(HOST_TX_PRI_MAP_DEFAULT,
+                      GBE_REG_ADDR(priv, host_port_regs, tx_pri_map));
+
        /* Max length register */
        writel(NETCP_MAX_FRAME_SIZE, GBE_REG_ADDR(priv, host_port_regs,
                                                  rx_maxlen));
@@ -1472,15 +2221,21 @@ static int gbe_open(void *intf_priv, struct net_device *ndev)
                GBE_MAJOR_VERSION(reg), GBE_MINOR_VERSION(reg),
                GBE_RTL_VERSION(reg), GBE_IDENT(reg));
 
+       /* For 10G and on NetCP 1.5, use directed to port */
+       if ((gbe_dev->ss_version == XGBE_SS_VERSION_10) || IS_SS_ID_MU(gbe_dev))
+               gbe_intf->tx_pipe.flags = SWITCH_TO_PORT_IN_TAGINFO;
+
        if (gbe_dev->enable_ale)
-               gbe_intf->tx_pipe.dma_psflags = 0;
+               gbe_intf->tx_pipe.switch_to_port = 0;
        else
-               gbe_intf->tx_pipe.dma_psflags = port_num;
+               gbe_intf->tx_pipe.switch_to_port = port_num;
 
-       dev_dbg(gbe_dev->dev, "opened TX channel %s: %p with psflags %d\n",
+       dev_dbg(gbe_dev->dev,
+               "opened TX channel %s: %p with to port %d, flags %d\n",
                gbe_intf->tx_pipe.dma_chan_name,
                gbe_intf->tx_pipe.dma_channel,
-               gbe_intf->tx_pipe.dma_psflags);
+               gbe_intf->tx_pipe.switch_to_port,
+               gbe_intf->tx_pipe.flags);
 
        gbe_slave_stop(gbe_intf);
 
@@ -1491,8 +2246,8 @@ static int gbe_open(void *intf_priv, struct net_device *ndev)
        writel(GBE_CTL_P0_ENABLE, GBE_REG_ADDR(gbe_dev, switch_regs, control));
 
        /* All statistics enabled and STAT AB visible by default */
-       writel(GBE_REG_VAL_STAT_ENABLE_ALL, GBE_REG_ADDR(gbe_dev, switch_regs,
-                                                        stat_port_en));
+       writel(gbe_dev->stats_en_mask, GBE_REG_ADDR(gbe_dev, switch_regs,
+                                                   stat_port_en));
 
        ret = gbe_slave_open(gbe_intf);
        if (ret)
@@ -1529,6 +2284,7 @@ static int init_slave(struct gbe_priv *gbe_dev, struct gbe_slave *slave,
 {
        int port_reg_num;
        u32 port_reg_ofs, emac_reg_ofs;
+       u32 port_reg_blk_sz, emac_reg_blk_sz;
 
        if (of_property_read_u32(node, "slave-port", &slave->slave_num)) {
                dev_err(gbe_dev->dev, "missing slave-port parameter\n");
@@ -1560,23 +2316,29 @@ static int init_slave(struct gbe_priv *gbe_dev, struct gbe_slave *slave,
                } else {
                        port_reg_ofs = GBE13_SLAVE_PORT_OFFSET;
                }
+               emac_reg_ofs = GBE13_EMAC_OFFSET;
+               port_reg_blk_sz = 0x30;
+               emac_reg_blk_sz = 0x40;
+       } else if (IS_SS_ID_MU(gbe_dev)) {
+               port_reg_ofs = GBENU_SLAVE_PORT_OFFSET;
+               emac_reg_ofs = GBENU_EMAC_OFFSET;
+               port_reg_blk_sz = 0x1000;
+               emac_reg_blk_sz = 0x1000;
        } else if (gbe_dev->ss_version == XGBE_SS_VERSION_10) {
                port_reg_ofs = XGBE10_SLAVE_PORT_OFFSET;
+               emac_reg_ofs = XGBE10_EMAC_OFFSET;
+               port_reg_blk_sz = 0x30;
+               emac_reg_blk_sz = 0x40;
        } else {
                dev_err(gbe_dev->dev, "unknown ethss(0x%x)\n",
                        gbe_dev->ss_version);
                return -EINVAL;
        }
 
-       if (gbe_dev->ss_version == GBE_SS_VERSION_14)
-               emac_reg_ofs = GBE13_EMAC_OFFSET;
-       else if (gbe_dev->ss_version == XGBE_SS_VERSION_10)
-               emac_reg_ofs = XGBE10_EMAC_OFFSET;
-
-       slave->port_regs = gbe_dev->ss_regs + port_reg_ofs +
-                               (0x30 * port_reg_num);
-       slave->emac_regs = gbe_dev->ss_regs + emac_reg_ofs +
-                               (0x40 * slave->slave_num);
+       slave->port_regs = gbe_dev->switch_regs + port_reg_ofs +
+                               (port_reg_blk_sz * port_reg_num);
+       slave->emac_regs = gbe_dev->switch_regs + emac_reg_ofs +
+                               (emac_reg_blk_sz * slave->slave_num);
 
        if (gbe_dev->ss_version == GBE_SS_VERSION_14) {
                /* Initialize  slave port register offsets */
@@ -1595,6 +2357,23 @@ static int init_slave(struct gbe_priv *gbe_dev, struct gbe_slave *slave,
                GBE_SET_REG_OFS(slave, emac_regs, soft_reset);
                GBE_SET_REG_OFS(slave, emac_regs, rx_maxlen);
 
+       } else if (IS_SS_ID_MU(gbe_dev)) {
+               /* Initialize  slave port register offsets */
+               GBENU_SET_REG_OFS(slave, port_regs, port_vlan);
+               GBENU_SET_REG_OFS(slave, port_regs, tx_pri_map);
+               GBENU_SET_REG_OFS(slave, port_regs, sa_lo);
+               GBENU_SET_REG_OFS(slave, port_regs, sa_hi);
+               GBENU_SET_REG_OFS(slave, port_regs, ts_ctl);
+               GBENU_SET_REG_OFS(slave, port_regs, ts_seq_ltype);
+               GBENU_SET_REG_OFS(slave, port_regs, ts_vlan);
+               GBENU_SET_REG_OFS(slave, port_regs, ts_ctl_ltype2);
+               GBENU_SET_REG_OFS(slave, port_regs, ts_ctl2);
+               GBENU_SET_REG_OFS(slave, port_regs, rx_maxlen);
+
+               /* Initialize EMAC register offsets */
+               GBENU_SET_REG_OFS(slave, emac_regs, mac_control);
+               GBENU_SET_REG_OFS(slave, emac_regs, soft_reset);
+
        } else if (gbe_dev->ss_version == XGBE_SS_VERSION_10) {
                /* Initialize  slave port register offsets */
                XGBE_SET_REG_OFS(slave, port_regs, port_vlan);
@@ -1654,6 +2433,8 @@ static void init_secondary_ports(struct gbe_priv *gbe_dev,
                        mac_phy_link = true;
 
                slave->open = true;
+               if (gbe_dev->num_slaves >= gbe_dev->max_num_slaves)
+                       break;
        }
 
        /* of_phy_connect() is needed only for MAC-PHY interface */
@@ -1724,24 +2505,41 @@ static int set_xgbe_ethss10_priv(struct gbe_priv *gbe_dev,
        void __iomem *regs;
        int ret, i;
 
-       ret = of_address_to_resource(node, 0, &res);
+       ret = of_address_to_resource(node, XGBE_SS_REG_INDEX, &res);
        if (ret) {
-               dev_err(gbe_dev->dev, "Can't translate of node(%s) address for xgbe subsystem regs\n",
-                       node->name);
+               dev_err(gbe_dev->dev,
+                       "Can't xlate xgbe of node(%s) ss address at %d\n",
+                       node->name, XGBE_SS_REG_INDEX);
                return ret;
        }
 
        regs = devm_ioremap_resource(gbe_dev->dev, &res);
        if (IS_ERR(regs)) {
-               dev_err(gbe_dev->dev, "Failed to map xgbe register base\n");
+               dev_err(gbe_dev->dev, "Failed to map xgbe ss register base\n");
                return PTR_ERR(regs);
        }
        gbe_dev->ss_regs = regs;
 
+       ret = of_address_to_resource(node, XGBE_SM_REG_INDEX, &res);
+       if (ret) {
+               dev_err(gbe_dev->dev,
+                       "Can't xlate xgbe of node(%s) sm address at %d\n",
+                       node->name, XGBE_SM_REG_INDEX);
+               return ret;
+       }
+
+       regs = devm_ioremap_resource(gbe_dev->dev, &res);
+       if (IS_ERR(regs)) {
+               dev_err(gbe_dev->dev, "Failed to map xgbe sm register base\n");
+               return PTR_ERR(regs);
+       }
+       gbe_dev->switch_regs = regs;
+
        ret = of_address_to_resource(node, XGBE_SERDES_REG_INDEX, &res);
        if (ret) {
-               dev_err(gbe_dev->dev, "Can't translate of node(%s) address for xgbe serdes regs\n",
-                       node->name);
+               dev_err(gbe_dev->dev,
+                       "Can't xlate xgbe serdes of node(%s) address at %d\n",
+                       node->name, XGBE_SERDES_REG_INDEX);
                return ret;
        }
 
@@ -1753,9 +2551,9 @@ static int set_xgbe_ethss10_priv(struct gbe_priv *gbe_dev,
        gbe_dev->xgbe_serdes_regs = regs;
 
        gbe_dev->hw_stats = devm_kzalloc(gbe_dev->dev,
-                                         XGBE10_NUM_STAT_ENTRIES *
-                                         (XGBE10_NUM_SLAVES + 1) * sizeof(u64),
-                                         GFP_KERNEL);
+                                 XGBE10_NUM_STAT_ENTRIES *
+                                 (gbe_dev->max_num_ports) * sizeof(u64),
+                                 GFP_KERNEL);
        if (!gbe_dev->hw_stats) {
                dev_err(gbe_dev->dev, "hw_stats memory allocation failed\n");
                return -ENOMEM;
@@ -1764,19 +2562,19 @@ static int set_xgbe_ethss10_priv(struct gbe_priv *gbe_dev,
        gbe_dev->ss_version = XGBE_SS_VERSION_10;
        gbe_dev->sgmii_port_regs = gbe_dev->ss_regs +
                                        XGBE10_SGMII_MODULE_OFFSET;
-       gbe_dev->switch_regs = gbe_dev->ss_regs + XGBE10_SWITCH_MODULE_OFFSET;
        gbe_dev->host_port_regs = gbe_dev->ss_regs + XGBE10_HOST_PORT_OFFSET;
 
-       for (i = 0; i < XGBE10_NUM_HW_STATS_MOD; i++)
-               gbe_dev->hw_stats_regs[i] = gbe_dev->ss_regs +
+       for (i = 0; i < gbe_dev->max_num_ports; i++)
+               gbe_dev->hw_stats_regs[i] = gbe_dev->switch_regs +
                        XGBE10_HW_STATS_OFFSET + (GBE_HW_STATS_REG_MAP_SZ * i);
 
-       gbe_dev->ale_reg = gbe_dev->ss_regs + XGBE10_ALE_OFFSET;
-       gbe_dev->ale_ports = XGBE10_NUM_ALE_PORTS;
+       gbe_dev->ale_reg = gbe_dev->switch_regs + XGBE10_ALE_OFFSET;
+       gbe_dev->ale_ports = gbe_dev->max_num_ports;
        gbe_dev->host_port = XGBE10_HOST_PORT_NUM;
        gbe_dev->ale_entries = XGBE10_NUM_ALE_ENTRIES;
        gbe_dev->et_stats = xgbe10_et_stats;
        gbe_dev->num_et_stats = ARRAY_SIZE(xgbe10_et_stats);
+       gbe_dev->stats_en_mask = (1 << (gbe_dev->max_num_ports)) - 1;
 
        /* Subsystem registers */
        XGBE_SET_REG_OFS(gbe_dev, ss_regs, id_ver);
@@ -1803,10 +2601,11 @@ static int get_gbe_resource_version(struct gbe_priv *gbe_dev,
        void __iomem *regs;
        int ret;
 
-       ret = of_address_to_resource(node, 0, &res);
+       ret = of_address_to_resource(node, GBE_SS_REG_INDEX, &res);
        if (ret) {
-               dev_err(gbe_dev->dev, "Can't translate of node(%s) address\n",
-                       node->name);
+               dev_err(gbe_dev->dev,
+                       "Can't translate of node(%s) of gbe ss address at %d\n",
+                       node->name, GBE_SS_REG_INDEX);
                return ret;
        }
 
@@ -1823,34 +2622,67 @@ static int get_gbe_resource_version(struct gbe_priv *gbe_dev,
 static int set_gbe_ethss14_priv(struct gbe_priv *gbe_dev,
                                struct device_node *node)
 {
+       struct resource res;
        void __iomem *regs;
-       int i;
+       int i, ret;
+
+       ret = of_address_to_resource(node, GBE_SGMII34_REG_INDEX, &res);
+       if (ret) {
+               dev_err(gbe_dev->dev,
+                       "Can't translate of gbe node(%s) address at index %d\n",
+                       node->name, GBE_SGMII34_REG_INDEX);
+               return ret;
+       }
+
+       regs = devm_ioremap_resource(gbe_dev->dev, &res);
+       if (IS_ERR(regs)) {
+               dev_err(gbe_dev->dev,
+                       "Failed to map gbe sgmii port34 register base\n");
+               return PTR_ERR(regs);
+       }
+       gbe_dev->sgmii_port34_regs = regs;
+
+       ret = of_address_to_resource(node, GBE_SM_REG_INDEX, &res);
+       if (ret) {
+               dev_err(gbe_dev->dev,
+                       "Can't translate of gbe node(%s) address at index %d\n",
+                       node->name, GBE_SM_REG_INDEX);
+               return ret;
+       }
+
+       regs = devm_ioremap_resource(gbe_dev->dev, &res);
+       if (IS_ERR(regs)) {
+               dev_err(gbe_dev->dev,
+                       "Failed to map gbe switch module register base\n");
+               return PTR_ERR(regs);
+       }
+       gbe_dev->switch_regs = regs;
 
        gbe_dev->hw_stats = devm_kzalloc(gbe_dev->dev,
                                          GBE13_NUM_HW_STAT_ENTRIES *
-                                         GBE13_NUM_SLAVES * sizeof(u64),
+                                         gbe_dev->max_num_slaves * sizeof(u64),
                                          GFP_KERNEL);
        if (!gbe_dev->hw_stats) {
                dev_err(gbe_dev->dev, "hw_stats memory allocation failed\n");
                return -ENOMEM;
        }
 
-       regs = gbe_dev->ss_regs;
-       gbe_dev->sgmii_port_regs = regs + GBE13_SGMII_MODULE_OFFSET;
-       gbe_dev->sgmii_port34_regs = regs + GBE13_SGMII34_MODULE_OFFSET;
-       gbe_dev->switch_regs = regs + GBE13_SWITCH_MODULE_OFFSET;
-       gbe_dev->host_port_regs = regs + GBE13_HOST_PORT_OFFSET;
+       gbe_dev->sgmii_port_regs = gbe_dev->ss_regs + GBE13_SGMII_MODULE_OFFSET;
+       gbe_dev->host_port_regs = gbe_dev->switch_regs + GBE13_HOST_PORT_OFFSET;
 
-       for (i = 0; i < GBE13_NUM_HW_STATS_MOD; i++)
-               gbe_dev->hw_stats_regs[i] = regs + GBE13_HW_STATS_OFFSET +
-                               (GBE_HW_STATS_REG_MAP_SZ * i);
+       for (i = 0; i < gbe_dev->max_num_slaves; i++) {
+               gbe_dev->hw_stats_regs[i] =
+                       gbe_dev->switch_regs + GBE13_HW_STATS_OFFSET +
+                       (GBE_HW_STATS_REG_MAP_SZ * i);
+       }
 
-       gbe_dev->ale_reg = regs + GBE13_ALE_OFFSET;
-       gbe_dev->ale_ports = GBE13_NUM_ALE_PORTS;
+       gbe_dev->ale_reg = gbe_dev->switch_regs + GBE13_ALE_OFFSET;
+       gbe_dev->ale_ports = gbe_dev->max_num_ports;
        gbe_dev->host_port = GBE13_HOST_PORT_NUM;
        gbe_dev->ale_entries = GBE13_NUM_ALE_ENTRIES;
        gbe_dev->et_stats = gbe13_et_stats;
        gbe_dev->num_et_stats = ARRAY_SIZE(gbe13_et_stats);
+       gbe_dev->stats_en_mask = GBE13_REG_VAL_STAT_ENABLE_ALL;
 
        /* Subsystem registers */
        GBE_SET_REG_OFS(gbe_dev, ss_regs, id_ver);
@@ -1869,6 +2701,80 @@ static int set_gbe_ethss14_priv(struct gbe_priv *gbe_dev,
        return 0;
 }
 
+static int set_gbenu_ethss_priv(struct gbe_priv *gbe_dev,
+                               struct device_node *node)
+{
+       struct resource res;
+       void __iomem *regs;
+       int i, ret;
+
+       gbe_dev->hw_stats = devm_kzalloc(gbe_dev->dev,
+                                 GBENU_NUM_HW_STAT_ENTRIES *
+                                 (gbe_dev->max_num_ports) * sizeof(u64),
+                                 GFP_KERNEL);
+       if (!gbe_dev->hw_stats) {
+               dev_err(gbe_dev->dev, "hw_stats memory allocation failed\n");
+               return -ENOMEM;
+       }
+
+       ret = of_address_to_resource(node, GBENU_SM_REG_INDEX, &res);
+       if (ret) {
+               dev_err(gbe_dev->dev,
+                       "Can't translate of gbenu node(%s) addr at index %d\n",
+                       node->name, GBENU_SM_REG_INDEX);
+               return ret;
+       }
+
+       regs = devm_ioremap_resource(gbe_dev->dev, &res);
+       if (IS_ERR(regs)) {
+               dev_err(gbe_dev->dev,
+                       "Failed to map gbenu switch module register base\n");
+               return PTR_ERR(regs);
+       }
+       gbe_dev->switch_regs = regs;
+
+       gbe_dev->sgmii_port_regs = gbe_dev->ss_regs + GBENU_SGMII_MODULE_OFFSET;
+       gbe_dev->host_port_regs = gbe_dev->switch_regs + GBENU_HOST_PORT_OFFSET;
+
+       for (i = 0; i < (gbe_dev->max_num_ports); i++)
+               gbe_dev->hw_stats_regs[i] = gbe_dev->switch_regs +
+                       GBENU_HW_STATS_OFFSET + (GBENU_HW_STATS_REG_MAP_SZ * i);
+
+       gbe_dev->ale_reg = gbe_dev->switch_regs + GBENU_ALE_OFFSET;
+       gbe_dev->ale_ports = gbe_dev->max_num_ports;
+       gbe_dev->host_port = GBENU_HOST_PORT_NUM;
+       gbe_dev->ale_entries = GBE13_NUM_ALE_ENTRIES;
+       gbe_dev->et_stats = gbenu_et_stats;
+       gbe_dev->stats_en_mask = (1 << (gbe_dev->max_num_ports)) - 1;
+
+       if (IS_SS_ID_NU(gbe_dev))
+               gbe_dev->num_et_stats = GBENU_ET_STATS_HOST_SIZE +
+                       (gbe_dev->max_num_slaves * GBENU_ET_STATS_PORT_SIZE);
+       else
+               gbe_dev->num_et_stats = GBENU_ET_STATS_HOST_SIZE +
+                                       GBENU_ET_STATS_PORT_SIZE;
+
+       /* Subsystem registers */
+       GBENU_SET_REG_OFS(gbe_dev, ss_regs, id_ver);
+
+       /* Switch module registers */
+       GBENU_SET_REG_OFS(gbe_dev, switch_regs, id_ver);
+       GBENU_SET_REG_OFS(gbe_dev, switch_regs, control);
+       GBENU_SET_REG_OFS(gbe_dev, switch_regs, stat_port_en);
+       GBENU_SET_REG_OFS(gbe_dev, switch_regs, ptype);
+
+       /* Host port registers */
+       GBENU_SET_REG_OFS(gbe_dev, host_port_regs, port_vlan);
+       GBENU_SET_REG_OFS(gbe_dev, host_port_regs, rx_maxlen);
+
+       /* For NU only.  2U does not need tx_pri_map.
+        * NU cppi port 0 tx pkt streaming interface has (n-1)*8 egress threads
+        * while 2U has only 1 such thread
+        */
+       GBENU_SET_REG_OFS(gbe_dev, host_port_regs, tx_pri_map);
+       return 0;
+}
+
 static int gbe_probe(struct netcp_device *netcp_device, struct device *dev,
                     struct device_node *node, void **inst_priv)
 {
@@ -1888,6 +2794,21 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev,
        if (!gbe_dev)
                return -ENOMEM;
 
+       if (of_device_is_compatible(node, "ti,netcp-gbe-5") ||
+           of_device_is_compatible(node, "ti,netcp-gbe")) {
+               gbe_dev->max_num_slaves = 4;
+       } else if (of_device_is_compatible(node, "ti,netcp-gbe-9")) {
+               gbe_dev->max_num_slaves = 8;
+       } else if (of_device_is_compatible(node, "ti,netcp-gbe-2")) {
+               gbe_dev->max_num_slaves = 1;
+       } else if (of_device_is_compatible(node, "ti,netcp-xgbe")) {
+               gbe_dev->max_num_slaves = 2;
+       } else {
+               dev_err(dev, "device tree node for unknown device\n");
+               return -EINVAL;
+       }
+       gbe_dev->max_num_ports = gbe_dev->max_num_slaves + 1;
+
        gbe_dev->dev = dev;
        gbe_dev->netcp_device = netcp_device;
        gbe_dev->rx_packet_max = NETCP_MAX_FRAME_SIZE;
@@ -1923,7 +2844,15 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev,
                if (ret)
                        goto quit;
 
-               ret = set_gbe_ethss14_priv(gbe_dev, node);
+               dev_dbg(dev, "ss_version: 0x%08x\n", gbe_dev->ss_version);
+
+               if (gbe_dev->ss_version == GBE_SS_VERSION_14)
+                       ret = set_gbe_ethss14_priv(gbe_dev, node);
+               else if (IS_SS_ID_MU(gbe_dev))
+                       ret = set_gbenu_ethss_priv(gbe_dev, node);
+               else
+                       ret = -ENODEV;
+
                if (ret)
                        goto quit;
        } else if (!strcmp(node->name, "xgbe")) {
@@ -1963,6 +2892,8 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev,
                        continue;
                }
                gbe_dev->num_slaves++;
+               if (gbe_dev->num_slaves >= gbe_dev->max_num_slaves)
+                       break;
        }
 
        if (!gbe_dev->num_slaves)
@@ -1971,7 +2902,7 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev,
        /* Initialize Secondary slave ports */
        secondary_ports = of_get_child_by_name(node, "secondary-slave-ports");
        INIT_LIST_HEAD(&gbe_dev->secondary_slaves);
-       if (secondary_ports)
+       if (secondary_ports && (gbe_dev->num_slaves <  gbe_dev->max_num_slaves))
                init_secondary_ports(gbe_dev, secondary_ports);
        of_node_put(secondary_ports);
 
index bea8cd2bb56cf85b92132ea69ea90f08e6dfc5f7..a789a2054388ea1505e6c43ab5fe47efb73d13bc 100644 (file)
@@ -838,7 +838,8 @@ static int ptp_mpipe_adjtime(struct ptp_clock_info *ptp, s64 delta)
        return ret;
 }
 
-static int ptp_mpipe_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
+static int ptp_mpipe_gettime(struct ptp_clock_info *ptp,
+                            struct timespec64 *ts)
 {
        int ret = 0;
        struct mpipe_data *md = container_of(ptp, struct mpipe_data, caps);
@@ -850,7 +851,7 @@ static int ptp_mpipe_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
 }
 
 static int ptp_mpipe_settime(struct ptp_clock_info *ptp,
-                            const struct timespec *ts)
+                            const struct timespec64 *ts)
 {
        int ret = 0;
        struct mpipe_data *md = container_of(ptp, struct mpipe_data, caps);
@@ -876,8 +877,8 @@ static struct ptp_clock_info ptp_mpipe_caps = {
        .pps            = 0,
        .adjfreq        = ptp_mpipe_adjfreq,
        .adjtime        = ptp_mpipe_adjtime,
-       .gettime        = ptp_mpipe_gettime,
-       .settime        = ptp_mpipe_settime,
+       .gettime64      = ptp_mpipe_gettime,
+       .settime64      = ptp_mpipe_settime,
        .enable         = ptp_mpipe_enable,
 };
 
index bb79928046645d62fbb238614777b00600b62b0d..ac62a5e248b0b011fbdf1c8e1172c70ed0892965 100644 (file)
@@ -1065,7 +1065,7 @@ refill:
 
        /*
         * this call can fail, but for now, just leave this
-        * decriptor without skb
+        * descriptor without skb
         */
        gelic_descr_prepare_rx(card, descr);
 
index 0a7f2e77557f63eb8920c0b80c9552ec17e056ec..13214a6492ac5b1eced4d39c21b7736f5dcf19d4 100644 (file)
@@ -1167,7 +1167,7 @@ static int gelic_wl_set_ap(struct net_device *netdev,
        } else {
                pr_debug("%s: clear bssid\n", __func__);
                clear_bit(GELIC_WL_STAT_BSSID_SET, &wl->stat);
-               memset(wl->bssid, 0, ETH_ALEN);
+               eth_zero_addr(wl->bssid);
        }
        spin_unlock_irqrestore(&wl->lock, irqflag);
        pr_debug("%s: ->\n", __func__);
@@ -1189,7 +1189,7 @@ static int gelic_wl_get_ap(struct net_device *netdev,
                memcpy(data->ap_addr.sa_data, wl->active_bssid,
                       ETH_ALEN);
        } else
-               memset(data->ap_addr.sa_data, 0, ETH_ALEN);
+               eth_zero_addr(data->ap_addr.sa_data);
 
        spin_unlock_irqrestore(&wl->lock, irqflag);
        mutex_unlock(&wl->assoc_stat_lock);
index 17e276651601b27c393001cdd5593cddcd828788..de2850497c09d87a6e7131e12a82ce68af702dd5 100644 (file)
@@ -70,12 +70,14 @@ static const int multicast_filter_limit = 32;
 /* Operational parameters that are set at compile time. */
 
 /* Keep the ring sizes a power of two for compile efficiency.
-   The compiler will convert <unsigned>'%'<2^N> into a bit mask.
-   Making the Tx ring too large decreases the effectiveness of channel
-   bonding and packet priority.
-   There are no ill effects from too-large receive rings. */
-#define TX_RING_SIZE   16
-#define TX_QUEUE_LEN   10      /* Limit ring entries actually used. */
+ * The compiler will convert <unsigned>'%'<2^N> into a bit mask.
+ * Making the Tx ring too large decreases the effectiveness of channel
+ * bonding and packet priority.
+ * With BQL support, we can increase TX ring safely.
+ * There are no ill effects from too-large receive rings.
+ */
+#define TX_RING_SIZE   64
+#define TX_QUEUE_LEN   (TX_RING_SIZE - 6)      /* Limit ring entries actually used. */
 #define RX_RING_SIZE   64
 
 /* Operational parameters that usually are not changed. */
@@ -286,7 +288,7 @@ MODULE_DEVICE_TABLE(pci, rhine_pci_tbl);
  * The .data field is currently only used to store quirks
  */
 static u32 vt8500_quirks = rqWOL | rqForceReset | rq6patterns;
-static struct of_device_id rhine_of_tbl[] = {
+static const struct of_device_id rhine_of_tbl[] = {
        { .compatible = "via,vt8500-rhine", .data = &vt8500_quirks },
        { }     /* terminate list */
 };
@@ -1295,6 +1297,7 @@ static void alloc_tbufs(struct net_device* dev)
        }
        rp->tx_ring[i-1].next_desc = cpu_to_le32(rp->tx_ring_dma);
 
+       netdev_reset_queue(dev);
 }
 
 static void free_tbufs(struct net_device* dev)
@@ -1795,6 +1798,7 @@ static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
        else
                rp->tx_ring[entry].tx_status = 0;
 
+       netdev_sent_queue(dev, skb->len);
        /* lock eth irq */
        wmb();
        rp->tx_ring[entry].tx_status |= cpu_to_le32(DescOwn);
@@ -1863,6 +1867,8 @@ static void rhine_tx(struct net_device *dev)
        struct rhine_private *rp = netdev_priv(dev);
        struct device *hwdev = dev->dev.parent;
        int txstatus = 0, entry = rp->dirty_tx % TX_RING_SIZE;
+       unsigned int pkts_compl = 0, bytes_compl = 0;
+       struct sk_buff *skb;
 
        /* find and cleanup dirty tx descriptors */
        while (rp->dirty_tx != rp->cur_tx) {
@@ -1871,6 +1877,7 @@ static void rhine_tx(struct net_device *dev)
                          entry, txstatus);
                if (txstatus & DescOwn)
                        break;
+               skb = rp->tx_skbuff[entry];
                if (txstatus & 0x8000) {
                        netif_dbg(rp, tx_done, dev,
                                  "Transmit error, Tx status %08x\n", txstatus);
@@ -1899,7 +1906,7 @@ static void rhine_tx(struct net_device *dev)
                                  (txstatus >> 3) & 0xF, txstatus & 0xF);
 
                        u64_stats_update_begin(&rp->tx_stats.syncp);
-                       rp->tx_stats.bytes += rp->tx_skbuff[entry]->len;
+                       rp->tx_stats.bytes += skb->len;
                        rp->tx_stats.packets++;
                        u64_stats_update_end(&rp->tx_stats.syncp);
                }
@@ -1907,13 +1914,17 @@ static void rhine_tx(struct net_device *dev)
                if (rp->tx_skbuff_dma[entry]) {
                        dma_unmap_single(hwdev,
                                         rp->tx_skbuff_dma[entry],
-                                        rp->tx_skbuff[entry]->len,
+                                        skb->len,
                                         DMA_TO_DEVICE);
                }
-               dev_consume_skb_any(rp->tx_skbuff[entry]);
+               bytes_compl += skb->len;
+               pkts_compl++;
+               dev_consume_skb_any(skb);
                rp->tx_skbuff[entry] = NULL;
                entry = (++rp->dirty_tx) % TX_RING_SIZE;
        }
+
+       netdev_completed_queue(dev, pkts_compl, bytes_compl);
        if ((rp->cur_tx - rp->dirty_tx) < TX_QUEUE_LEN - 4)
                netif_wake_queue(dev);
 }
index c20206f83cc1fdf3445cfc41139724f0dbdbdd92..ae68afd50a156199b6f74b2d2e65420cddd02c73 100644 (file)
@@ -392,7 +392,7 @@ MODULE_DEVICE_TABLE(pci, velocity_pci_id_table);
  *     Describe the OF device identifiers that we support in this
  *     device driver. Used for devicetree nodes.
  */
-static struct of_device_id velocity_of_ids[] = {
+static const struct of_device_id velocity_of_ids[] = {
        { .compatible = "via,velocity-vt6110", .data = &chip_info_table[0] },
        { /* Sentinel */ },
 };
index 0e0fbb5842b3d25e0d18ebaedfaeff60b006c1b7..8b282d0b169c4a06e82479b215dbb02fcceb4c1e 100644 (file)
@@ -56,7 +56,7 @@ MODULE_LICENSE("GPL");
 
 #define W5100_S0_REGS          0x0400
 #define W5100_S0_MR            0x0400 /* S0 Mode Register */
-#define   S0_MR_MACRAW           0x04 /* MAC RAW mode (promiscous) */
+#define   S0_MR_MACRAW           0x04 /* MAC RAW mode (promiscuous) */
 #define   S0_MR_MACRAW_MF        0x44 /* MAC RAW mode (filtered) */
 #define W5100_S0_CR            0x0401 /* S0 Command Register */
 #define   S0_CR_OPEN             0x01 /* OPEN command */
index 4b310002258d0742031f4bcb36d395dd992d0a3d..8da7b930ff595aa6ed79ddedbd5461b84dad6003 100644 (file)
@@ -63,7 +63,7 @@ MODULE_LICENSE("GPL");
 #define   IDR_W5300              0x5300  /* =0x5300 for WIZnet W5300 */
 #define W5300_S0_MR            0x0200  /* S0 Mode Register */
 #define   S0_MR_CLOSED           0x0000  /* Close mode */
-#define   S0_MR_MACRAW           0x0004  /* MAC RAW mode (promiscous) */
+#define   S0_MR_MACRAW           0x0004  /* MAC RAW mode (promiscuous) */
 #define   S0_MR_MACRAW_MF        0x0044  /* MAC RAW mode (filtered) */
 #define W5300_S0_CR            0x0202  /* S0 Command Register */
 #define   S0_CR_OPEN             0x0001  /* OPEN command */
index dbcbf0c5bcfa910c49ec81037892a45487aa081e..690a4c36b3166c76b4d8ed63f9d21574ecac8bb3 100644 (file)
@@ -1157,7 +1157,7 @@ static int temac_of_remove(struct platform_device *op)
        return 0;
 }
 
-static struct of_device_id temac_of_match[] = {
+static const struct of_device_id temac_of_match[] = {
        { .compatible = "xlnx,xps-ll-temac-1.01.b", },
        { .compatible = "xlnx,xps-ll-temac-2.00.a", },
        { .compatible = "xlnx,xps-ll-temac-2.02.a", },
index a6d2860b712c732c5459bea14647da9825ff1042..28b7e7d9c272ae672034d10bb64f855f0d1077e9 100644 (file)
@@ -48,7 +48,7 @@
 #define AXIENET_REGS_N         32
 
 /* Match table for of_platform binding */
-static struct of_device_id axienet_of_match[] = {
+static const struct of_device_id axienet_of_match[] = {
        { .compatible = "xlnx,axi-ethernet-1.00.a", },
        { .compatible = "xlnx,axi-ethernet-1.01.a", },
        { .compatible = "xlnx,axi-ethernet-2.01.a", },
index 9d4ce388510a5034b2f29d890645afdda73b23f0..6008eee01a33a7a9e62918627443874eae56813a 100644 (file)
@@ -1062,7 +1062,7 @@ static bool get_bool(struct platform_device *ofdev, const char *s)
        } else {
                dev_warn(&ofdev->dev, "Parameter %s not found,"
                        "defaulting to false\n", s);
-               return 0;
+               return false;
        }
 }
 
@@ -1231,7 +1231,7 @@ static struct net_device_ops xemaclite_netdev_ops = {
 };
 
 /* Match table for OF platform binding */
-static struct of_device_id xemaclite_of_match[] = {
+static const struct of_device_id xemaclite_of_match[] = {
        { .compatible = "xlnx,opb-ethernetlite-1.01.a", },
        { .compatible = "xlnx,opb-ethernetlite-1.01.b", },
        { .compatible = "xlnx,xps-ethernetlite-1.00.a", },
index 9e16a2819d4850938389c924e18f56184fedd946..5138407941cf1d90f4d4b1f7c6fa9c3ff92f5294 100644 (file)
@@ -954,7 +954,7 @@ static void eth_set_mcast_list(struct net_device *dev)
                return;
        }
 
-       memset(diffs, 0, ETH_ALEN);
+       eth_zero_addr(diffs);
 
        addr = NULL;
        netdev_for_each_mc_addr(ha, dev) {
index daca0dee88f3634570ab6cf984e80066bd331341..7c4a4151ef0f23fca16b0fd957769c09dc5823d0 100644 (file)
@@ -247,6 +247,9 @@ static netdev_tx_t sp_xmit(struct sk_buff *skb, struct net_device *dev)
 {
        struct sixpack *sp = netdev_priv(dev);
 
+       if (skb->protocol == htons(ETH_P_IP))
+               return ax25_ip_xmit(skb);
+
        spin_lock_bh(&sp->lock);
        /* We were not busy, so we are now... :-) */
        netif_stop_queue(dev);
@@ -284,18 +287,6 @@ static int sp_close(struct net_device *dev)
        return 0;
 }
 
-/* Return the frame type ID */
-static int sp_header(struct sk_buff *skb, struct net_device *dev,
-                    unsigned short type, const void *daddr,
-                    const void *saddr, unsigned len)
-{
-#ifdef CONFIG_INET
-       if (type != ETH_P_AX25)
-               return ax25_hard_header(skb, dev, type, daddr, saddr, len);
-#endif
-       return 0;
-}
-
 static int sp_set_mac_address(struct net_device *dev, void *addr)
 {
        struct sockaddr_ax25 *sa = addr;
@@ -309,20 +300,6 @@ static int sp_set_mac_address(struct net_device *dev, void *addr)
        return 0;
 }
 
-static int sp_rebuild_header(struct sk_buff *skb)
-{
-#ifdef CONFIG_INET
-       return ax25_rebuild_header(skb);
-#else
-       return 0;
-#endif
-}
-
-static const struct header_ops sp_header_ops = {
-       .create         = sp_header,
-       .rebuild        = sp_rebuild_header,
-};
-
 static const struct net_device_ops sp_netdev_ops = {
        .ndo_open               = sp_open_dev,
        .ndo_stop               = sp_close,
@@ -337,7 +314,7 @@ static void sp_setup(struct net_device *dev)
        dev->destructor         = free_netdev;
        dev->mtu                = SIXP_MTU;
        dev->hard_header_len    = AX25_MAX_HEADER_LEN;
-       dev->header_ops         = &sp_header_ops;
+       dev->header_ops         = &ax25_header_ops;
 
        dev->addr_len           = AX25_ADDR_LEN;
        dev->type               = ARPHRD_AX25;
index a98c153f371e761f2c396c7fbcc1bdf787f407f3..83c7cce0d172b205f0b1d96405b6b66eea79ea55 100644 (file)
@@ -772,6 +772,9 @@ static int baycom_send_packet(struct sk_buff *skb, struct net_device *dev)
 {
        struct baycom_state *bc = netdev_priv(dev);
 
+       if (skb->protocol == htons(ETH_P_IP))
+               return ax25_ip_xmit(skb);
+
        if (skb->data[0] != 0) {
                do_kiss_params(bc, skb->data, skb->len);
                dev_kfree_skb(skb);
index c2894e43840e604e75eac9f40bcfb1470e4c47aa..63ff08a26da81b5bafc401fbfeeb710a6b595a64 100644 (file)
@@ -251,6 +251,9 @@ static netdev_tx_t bpq_xmit(struct sk_buff *skb, struct net_device *dev)
        struct net_device *orig_dev;
        int size;
 
+       if (skb->protocol == htons(ETH_P_IP))
+               return ax25_ip_xmit(skb);
+
        /*
         * Just to be *really* sure not to send anything if the interface
         * is down, the ethernet device may have gone.
index 0fad408f24aa137694156290ae35a1da99343b80..c3d37777061631d1c34fb1b1e05f36986bb5f6cc 100644 (file)
@@ -920,6 +920,9 @@ static int scc_send_packet(struct sk_buff *skb, struct net_device *dev)
        unsigned long flags;
        int i;
 
+       if (skb->protocol == htons(ETH_P_IP))
+               return ax25_ip_xmit(skb);
+
        /* Temporarily stop the scheduler feeding us packets */
        netif_stop_queue(dev);
 
index c67a27245072746c3275a3b27a4fd899ebb6f3a4..49fe59b180a8619f554d5852202df933fee54871 100644 (file)
@@ -404,6 +404,9 @@ static netdev_tx_t hdlcdrv_send_packet(struct sk_buff *skb,
 {
        struct hdlcdrv_state *sm = netdev_priv(dev);
 
+       if (skb->protocol == htons(ETH_P_IP))
+               return ax25_ip_xmit(skb);
+
        if (skb->data[0] != 0) {
                do_kiss_params(sm, skb->data, skb->len);
                dev_kfree_skb(skb);
index f990bb1c3e02ba50c583e6d41d3f8472736a3e37..2ffbf13471d09ad4c27d8c70fbb4dd3145befa75 100644 (file)
@@ -529,6 +529,9 @@ static netdev_tx_t ax_xmit(struct sk_buff *skb, struct net_device *dev)
 {
        struct mkiss *ax = netdev_priv(dev);
 
+       if (skb->protocol == htons(ETH_P_IP))
+               return ax25_ip_xmit(skb);
+
        if (!netif_running(dev))  {
                printk(KERN_ERR "mkiss: %s: xmit call when iface is down\n", dev->name);
                return NETDEV_TX_BUSY;
@@ -554,11 +557,9 @@ static netdev_tx_t ax_xmit(struct sk_buff *skb, struct net_device *dev)
        }
 
        /* We were not busy, so we are now... :-) */
-       if (skb != NULL) {
-               netif_stop_queue(dev);
-               ax_encaps(dev, skb->data, skb->len);
-               kfree_skb(skb);
-       }
+       netif_stop_queue(dev);
+       ax_encaps(dev, skb->data, skb->len);
+       kfree_skb(skb);
 
        return NETDEV_TX_OK;
 }
@@ -573,32 +574,6 @@ static int ax_open_dev(struct net_device *dev)
        return 0;
 }
 
-#if defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE)
-
-/* Return the frame type ID */
-static int ax_header(struct sk_buff *skb, struct net_device *dev,
-                    unsigned short type, const void *daddr,
-                    const void *saddr, unsigned len)
-{
-#ifdef CONFIG_INET
-       if (type != ETH_P_AX25)
-               return ax25_hard_header(skb, dev, type, daddr, saddr, len);
-#endif
-       return 0;
-}
-
-
-static int ax_rebuild_header(struct sk_buff *skb)
-{
-#ifdef CONFIG_INET
-       return ax25_rebuild_header(skb);
-#else
-       return 0;
-#endif
-}
-
-#endif /* CONFIG_{AX25,AX25_MODULE} */
-
 /* Open the low-level part of the AX25 channel. Easy! */
 static int ax_open(struct net_device *dev)
 {
@@ -662,11 +637,6 @@ static int ax_close(struct net_device *dev)
        return 0;
 }
 
-static const struct header_ops ax_header_ops = {
-       .create    = ax_header,
-       .rebuild   = ax_rebuild_header,
-};
-
 static const struct net_device_ops ax_netdev_ops = {
        .ndo_open            = ax_open_dev,
        .ndo_stop            = ax_close,
@@ -682,7 +652,7 @@ static void ax_setup(struct net_device *dev)
        dev->addr_len        = 0;
        dev->type            = ARPHRD_AX25;
        dev->tx_queue_len    = 10;
-       dev->header_ops      = &ax_header_ops;
+       dev->header_ops      = &ax25_header_ops;
        dev->netdev_ops      = &ax_netdev_ops;
 
 
index 57be9e0e98a68608fcbafade768bcf397b248e42..ce88df33fe17b20237b3f4f2e0c9e1ca7b4b63a3 100644 (file)
@@ -1639,6 +1639,9 @@ static netdev_tx_t scc_net_tx(struct sk_buff *skb, struct net_device *dev)
        unsigned long flags;
        char kisscmd;
 
+       if (skb->protocol == htons(ETH_P_IP))
+               return ax25_ip_xmit(skb);
+
        if (skb->len > scc->stat.bufsize || skb->len < 2) {
                scc->dev_stat.tx_dropped++;     /* bogus frame */
                dev_kfree_skb(skb);
index 717433cfb81d2c4248539a56516daf6eb09c45ae..1a4729c36aa49d93f8e536cd98d4731df704621d 100644 (file)
@@ -597,6 +597,9 @@ static netdev_tx_t yam_send_packet(struct sk_buff *skb,
 {
        struct yam_port *yp = netdev_priv(dev);
 
+       if (skb->protocol == htons(ETH_P_IP))
+               return ax25_ip_xmit(skb);
+
        skb_queue_tail(&yp->send_queue, skb);
        dev->trans_start = jiffies;
        return NETDEV_TX_OK;
index 384ca4f4de4a0e6ee6b053440937d96a272c4850..309adee6e7910db99cfe2cd08d332c24033c1d15 100644 (file)
@@ -128,9 +128,11 @@ struct ndis_tcp_ip_checksum_info;
 struct hv_netvsc_packet {
        /* Bookkeeping stuff */
        u32 status;
+       bool part_of_skb;
 
        struct hv_device *device;
        bool is_data_pkt;
+       bool xmit_more; /* from skb */
        u16 vlan_tci;
 
        u16 q_idx;
@@ -149,7 +151,7 @@ struct hv_netvsc_packet {
        /* Points to the send/receive buffer where the ethernet frame is */
        void *data;
        u32 page_buf_cnt;
-       struct hv_page_buffer page_buf[0];
+       struct hv_page_buffer *page_buf;
 };
 
 struct netvsc_device_info {
@@ -596,7 +598,16 @@ struct nvsp_message {
 
 #define VRSS_SEND_TAB_SIZE 16
 
-/* Per netvsc channel-specific */
+#define RNDIS_MAX_PKT_DEFAULT 8
+#define RNDIS_PKT_ALIGN_DEFAULT 8
+
+struct multi_send_data {
+       spinlock_t lock; /* protect struct multi_send_data */
+       struct hv_netvsc_packet *pkt; /* netvsc pkt pending */
+       u32 count; /* counter of batched packets */
+};
+
+/* Per netvsc device */
 struct netvsc_device {
        struct hv_device *dev;
 
@@ -634,6 +645,7 @@ struct netvsc_device {
 
        struct vmbus_channel *chn_table[NR_CPUS];
        u32 send_table[VRSS_SEND_TAB_SIZE];
+       u32 max_chn;
        u32 num_chn;
        atomic_t queue_sends[NR_CPUS];
 
@@ -646,6 +658,10 @@ struct netvsc_device {
        unsigned char *cb_buffer;
        /* The sub channel callback buffer */
        unsigned char *sub_cb_buf;
+
+       struct multi_send_data msd[NR_CPUS];
+       u32 max_pkt; /* max number of pkt in one send, e.g. 8 */
+       u32 pkt_align; /* alignment bytes, e.g. 8 */
 };
 
 /* NdisInitialize message */
index 208eb05446baa4a6980620773865e3746a13848f..f69923695b5b1b6fbb886f523203ba86656ac5f6 100644 (file)
@@ -37,6 +37,7 @@ static struct netvsc_device *alloc_net_device(struct hv_device *device)
 {
        struct netvsc_device *net_device;
        struct net_device *ndev = hv_get_drvdata(device);
+       int i;
 
        net_device = kzalloc(sizeof(struct netvsc_device), GFP_KERNEL);
        if (!net_device)
@@ -53,6 +54,11 @@ static struct netvsc_device *alloc_net_device(struct hv_device *device)
        net_device->destroy = false;
        net_device->dev = device;
        net_device->ndev = ndev;
+       net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT;
+       net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT;
+
+       for (i = 0; i < num_online_cpus(); i++)
+               spin_lock_init(&net_device->msd[i].lock);
 
        hv_set_drvdata(device, net_device);
        return net_device;
@@ -687,12 +693,23 @@ static u32 netvsc_get_next_send_section(struct netvsc_device *net_device)
 
 static u32 netvsc_copy_to_send_buf(struct netvsc_device *net_device,
                                   unsigned int section_index,
+                                  u32 pend_size,
                                   struct hv_netvsc_packet *packet)
 {
        char *start = net_device->send_buf;
-       char *dest = (start + (section_index * net_device->send_section_size));
+       char *dest = start + (section_index * net_device->send_section_size)
+                    + pend_size;
        int i;
        u32 msg_size = 0;
+       u32 padding = 0;
+       u32 remain = packet->total_data_buflen % net_device->pkt_align;
+
+       /* Add padding */
+       if (packet->is_data_pkt && packet->xmit_more && remain) {
+               padding = net_device->pkt_align - remain;
+               packet->rndis_msg->msg_len += padding;
+               packet->total_data_buflen += padding;
+       }
 
        for (i = 0; i < packet->page_buf_cnt; i++) {
                char *src = phys_to_virt(packet->page_buf[i].pfn << PAGE_SHIFT);
@@ -703,67 +720,48 @@ static u32 netvsc_copy_to_send_buf(struct netvsc_device *net_device,
                msg_size += len;
                dest += len;
        }
+
+       if (padding) {
+               memset(dest, 0, padding);
+               msg_size += padding;
+       }
+
        return msg_size;
 }
 
-int netvsc_send(struct hv_device *device,
-                       struct hv_netvsc_packet *packet)
+static inline int netvsc_send_pkt(
+       struct hv_netvsc_packet *packet,
+       struct netvsc_device *net_device)
 {
-       struct netvsc_device *net_device;
-       int ret = 0;
-       struct nvsp_message sendMessage;
-       struct net_device *ndev;
-       struct vmbus_channel *out_channel = NULL;
-       u64 req_id;
-       unsigned int section_index = NETVSC_INVALID_INDEX;
-       u32 msg_size = 0;
-       struct sk_buff *skb = NULL;
+       struct nvsp_message nvmsg;
+       struct vmbus_channel *out_channel = packet->channel;
        u16 q_idx = packet->q_idx;
+       struct net_device *ndev = net_device->ndev;
+       u64 req_id;
+       int ret;
 
-
-       net_device = get_outbound_net_device(device);
-       if (!net_device)
-               return -ENODEV;
-       ndev = net_device->ndev;
-
-       sendMessage.hdr.msg_type = NVSP_MSG1_TYPE_SEND_RNDIS_PKT;
+       nvmsg.hdr.msg_type = NVSP_MSG1_TYPE_SEND_RNDIS_PKT;
        if (packet->is_data_pkt) {
                /* 0 is RMC_DATA; */
-               sendMessage.msg.v1_msg.send_rndis_pkt.channel_type = 0;
+               nvmsg.msg.v1_msg.send_rndis_pkt.channel_type = 0;
        } else {
                /* 1 is RMC_CONTROL; */
-               sendMessage.msg.v1_msg.send_rndis_pkt.channel_type = 1;
-       }
-
-       /* Attempt to send via sendbuf */
-       if (packet->total_data_buflen < net_device->send_section_size) {
-               section_index = netvsc_get_next_send_section(net_device);
-               if (section_index != NETVSC_INVALID_INDEX) {
-                       msg_size = netvsc_copy_to_send_buf(net_device,
-                                                          section_index,
-                                                          packet);
-                       skb = (struct sk_buff *)
-                             (unsigned long)packet->send_completion_tid;
-                       packet->page_buf_cnt = 0;
-               }
+               nvmsg.msg.v1_msg.send_rndis_pkt.channel_type = 1;
        }
-       packet->send_buf_index = section_index;
 
-
-       sendMessage.msg.v1_msg.send_rndis_pkt.send_buf_section_index =
-               section_index;
-       sendMessage.msg.v1_msg.send_rndis_pkt.send_buf_section_size = msg_size;
+       nvmsg.msg.v1_msg.send_rndis_pkt.send_buf_section_index =
+               packet->send_buf_index;
+       if (packet->send_buf_index == NETVSC_INVALID_INDEX)
+               nvmsg.msg.v1_msg.send_rndis_pkt.send_buf_section_size = 0;
+       else
+               nvmsg.msg.v1_msg.send_rndis_pkt.send_buf_section_size =
+                       packet->total_data_buflen;
 
        if (packet->send_completion)
                req_id = (ulong)packet;
        else
                req_id = 0;
 
-       out_channel = net_device->chn_table[packet->q_idx];
-       if (out_channel == NULL)
-               out_channel = device->channel;
-       packet->channel = out_channel;
-
        if (out_channel->rescind)
                return -ENODEV;
 
@@ -771,11 +769,12 @@ int netvsc_send(struct hv_device *device,
                ret = vmbus_sendpacket_pagebuffer(out_channel,
                                                  packet->page_buf,
                                                  packet->page_buf_cnt,
-                                                 &sendMessage,
+                                                 &nvmsg,
                                                  sizeof(struct nvsp_message),
                                                  req_id);
        } else {
-               ret = vmbus_sendpacket(out_channel, &sendMessage,
+               ret = vmbus_sendpacket(
+                               out_channel, &nvmsg,
                                sizeof(struct nvsp_message),
                                req_id,
                                VM_PKT_DATA_INBAND,
@@ -809,6 +808,107 @@ int netvsc_send(struct hv_device *device,
                           packet, ret);
        }
 
+       return ret;
+}
+
+int netvsc_send(struct hv_device *device,
+               struct hv_netvsc_packet *packet)
+{
+       struct netvsc_device *net_device;
+       int ret = 0, m_ret = 0;
+       struct vmbus_channel *out_channel;
+       u16 q_idx = packet->q_idx;
+       u32 pktlen = packet->total_data_buflen, msd_len = 0;
+       unsigned int section_index = NETVSC_INVALID_INDEX;
+       struct sk_buff *skb = NULL;
+       unsigned long flag;
+       struct multi_send_data *msdp;
+       struct hv_netvsc_packet *msd_send = NULL, *cur_send = NULL;
+
+       net_device = get_outbound_net_device(device);
+       if (!net_device)
+               return -ENODEV;
+
+       out_channel = net_device->chn_table[q_idx];
+       if (!out_channel) {
+               out_channel = device->channel;
+               q_idx = 0;
+               packet->q_idx = 0;
+       }
+       packet->channel = out_channel;
+       packet->send_buf_index = NETVSC_INVALID_INDEX;
+
+       msdp = &net_device->msd[q_idx];
+
+       /* batch packets in send buffer if possible */
+       spin_lock_irqsave(&msdp->lock, flag);
+       if (msdp->pkt)
+               msd_len = msdp->pkt->total_data_buflen;
+
+       if (packet->is_data_pkt && msd_len > 0 &&
+           msdp->count < net_device->max_pkt &&
+           msd_len + pktlen + net_device->pkt_align <
+           net_device->send_section_size) {
+               section_index = msdp->pkt->send_buf_index;
+
+       } else if (packet->is_data_pkt && pktlen + net_device->pkt_align <
+                  net_device->send_section_size) {
+               section_index = netvsc_get_next_send_section(net_device);
+               if (section_index != NETVSC_INVALID_INDEX) {
+                               msd_send = msdp->pkt;
+                               msdp->pkt = NULL;
+                               msdp->count = 0;
+                               msd_len = 0;
+               }
+       }
+
+       if (section_index != NETVSC_INVALID_INDEX) {
+               netvsc_copy_to_send_buf(net_device,
+                                       section_index, msd_len,
+                                       packet);
+               if (!packet->part_of_skb) {
+                       skb = (struct sk_buff *)
+                               (unsigned long)
+                               packet->send_completion_tid;
+
+                       packet->send_completion_tid = 0;
+               }
+
+               packet->page_buf_cnt = 0;
+               packet->send_buf_index = section_index;
+               packet->total_data_buflen += msd_len;
+
+               kfree(msdp->pkt);
+               if (packet->xmit_more) {
+                       msdp->pkt = packet;
+                       msdp->count++;
+               } else {
+                       cur_send = packet;
+                       msdp->pkt = NULL;
+                       msdp->count = 0;
+               }
+       } else {
+               msd_send = msdp->pkt;
+               msdp->pkt = NULL;
+               msdp->count = 0;
+               cur_send = packet;
+       }
+
+       spin_unlock_irqrestore(&msdp->lock, flag);
+
+       if (msd_send) {
+               m_ret = netvsc_send_pkt(msd_send, net_device);
+
+               if (m_ret != 0) {
+                       netvsc_free_send_slot(net_device,
+                                             msd_send->send_buf_index);
+                       kfree(msd_send);
+               }
+       }
+
+       if (cur_send)
+               ret = netvsc_send_pkt(cur_send, net_device);
+
        if (ret != 0) {
                if (section_index != NETVSC_INVALID_INDEX)
                        netvsc_free_send_slot(net_device, section_index);
index 15d82eda0baf4141465addd6207c6202f09f2543..f9db6bc513e954b0831d881baae5048ad7cc9f87 100644 (file)
@@ -234,11 +234,11 @@ static void netvsc_xmit_completion(void *context)
        struct hv_netvsc_packet *packet = (struct hv_netvsc_packet *)context;
        struct sk_buff *skb = (struct sk_buff *)
                (unsigned long)packet->send_completion_tid;
-       u32 index = packet->send_buf_index;
 
-       kfree(packet);
+       if (!packet->part_of_skb)
+               kfree(packet);
 
-       if (skb && (index == NETVSC_INVALID_INDEX))
+       if (skb)
                dev_kfree_skb_any(skb);
 }
 
@@ -384,6 +384,9 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
        u32 net_trans_info;
        u32 hash;
        u32 skb_length = skb->len;
+       u32 head_room = skb_headroom(skb);
+       u32 pkt_sz;
+       struct hv_page_buffer page_buf[MAX_PAGE_BUFFER_COUNT];
 
 
        /* We will atmost need two pages to describe the rndis
@@ -398,22 +401,32 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
                return NETDEV_TX_OK;
        }
 
-       /* Allocate a netvsc packet based on # of frags. */
-       packet = kzalloc(sizeof(struct hv_netvsc_packet) +
-                        (num_data_pgs * sizeof(struct hv_page_buffer)) +
-                        sizeof(struct rndis_message) +
-                        NDIS_VLAN_PPI_SIZE + NDIS_CSUM_PPI_SIZE +
-                        NDIS_LSO_PPI_SIZE + NDIS_HASH_PPI_SIZE, GFP_ATOMIC);
-       if (!packet) {
-               /* out of memory, drop packet */
-               netdev_err(net, "unable to allocate hv_netvsc_packet\n");
-
-               dev_kfree_skb(skb);
-               net->stats.tx_dropped++;
-               return NETDEV_TX_OK;
+       pkt_sz = sizeof(struct hv_netvsc_packet) +
+                       sizeof(struct rndis_message) +
+                       NDIS_VLAN_PPI_SIZE + NDIS_CSUM_PPI_SIZE +
+                       NDIS_LSO_PPI_SIZE + NDIS_HASH_PPI_SIZE;
+
+       if (head_room < pkt_sz) {
+               packet = kmalloc(pkt_sz, GFP_ATOMIC);
+               if (!packet) {
+                       /* out of memory, drop packet */
+                       netdev_err(net, "unable to alloc hv_netvsc_packet\n");
+                       dev_kfree_skb(skb);
+                       net->stats.tx_dropped++;
+                       return NETDEV_TX_OK;
+               }
+               packet->part_of_skb = false;
+       } else {
+               /* Use the headroom for building up the packet */
+               packet = (struct hv_netvsc_packet *)skb->head;
+               packet->part_of_skb = true;
        }
 
+       packet->status = 0;
+       packet->xmit_more = skb->xmit_more;
+
        packet->vlan_tci = skb->vlan_tci;
+       packet->page_buf = page_buf;
 
        packet->q_idx = skb_get_queue_mapping(skb);
 
@@ -421,8 +434,13 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
        packet->total_data_buflen = skb->len;
 
        packet->rndis_msg = (struct rndis_message *)((unsigned long)packet +
-                               sizeof(struct hv_netvsc_packet) +
-                               (num_data_pgs * sizeof(struct hv_page_buffer)));
+                               sizeof(struct hv_netvsc_packet));
+
+       memset(packet->rndis_msg, 0, sizeof(struct rndis_message) +
+                                       NDIS_VLAN_PPI_SIZE +
+                                       NDIS_CSUM_PPI_SIZE +
+                                       NDIS_LSO_PPI_SIZE +
+                                       NDIS_HASH_PPI_SIZE);
 
        /* Set the completion routine */
        packet->send_completion = netvsc_xmit_completion;
@@ -554,7 +572,7 @@ do_send:
        rndis_msg->msg_len += rndis_msg_size;
        packet->total_data_buflen = rndis_msg->msg_len;
        packet->page_buf_cnt = init_page_array(rndis_msg, rndis_msg_size,
-                                       skb, &packet->page_buf[0]);
+                                       skb, &page_buf[0]);
 
        ret = netvsc_send(net_device_ctx->device_ctx, packet);
 
@@ -563,7 +581,8 @@ drop:
                net->stats.tx_bytes += skb_length;
                net->stats.tx_packets++;
        } else {
-               kfree(packet);
+               if (!packet->part_of_skb)
+                       kfree(packet);
                if (ret != -EAGAIN) {
                        dev_kfree_skb_any(skb);
                        net->stats.tx_dropped++;
@@ -687,6 +706,19 @@ static void netvsc_get_drvinfo(struct net_device *net,
        strlcpy(info->fw_version, "N/A", sizeof(info->fw_version));
 }
 
+static void netvsc_get_channels(struct net_device *net,
+                               struct ethtool_channels *channel)
+{
+       struct net_device_context *net_device_ctx = netdev_priv(net);
+       struct hv_device *dev = net_device_ctx->device_ctx;
+       struct netvsc_device *nvdev = hv_get_drvdata(dev);
+
+       if (nvdev) {
+               channel->max_combined   = nvdev->max_chn;
+               channel->combined_count = nvdev->num_chn;
+       }
+}
+
 static int netvsc_change_mtu(struct net_device *ndev, int mtu)
 {
        struct net_device_context *ndevctx = netdev_priv(ndev);
@@ -760,6 +792,7 @@ static void netvsc_poll_controller(struct net_device *net)
 static const struct ethtool_ops ethtool_ops = {
        .get_drvinfo    = netvsc_get_drvinfo,
        .get_link       = ethtool_op_get_link,
+       .get_channels   = netvsc_get_channels,
 };
 
 static const struct net_device_ops device_ops = {
@@ -831,12 +864,18 @@ static int netvsc_probe(struct hv_device *dev,
        struct netvsc_device_info device_info;
        struct netvsc_device *nvdev;
        int ret;
+       u32 max_needed_headroom;
 
        net = alloc_etherdev_mq(sizeof(struct net_device_context),
                                num_online_cpus());
        if (!net)
                return -ENOMEM;
 
+       max_needed_headroom = sizeof(struct hv_netvsc_packet) +
+                               sizeof(struct rndis_message) +
+                               NDIS_VLAN_PPI_SIZE + NDIS_CSUM_PPI_SIZE +
+                               NDIS_LSO_PPI_SIZE + NDIS_HASH_PPI_SIZE;
+
        netif_carrier_off(net);
 
        net_device_ctx = netdev_priv(net);
@@ -855,6 +894,13 @@ static int netvsc_probe(struct hv_device *dev,
        net->ethtool_ops = &ethtool_ops;
        SET_NETDEV_DEV(net, &dev->device);
 
+       /*
+        * Request additional head room in the skb.
+        * We will use this space to build the rndis
+        * heaser and other state we need to maintain.
+        */
+       net->needed_headroom = max_needed_headroom;
+
        /* Notify the netvsc driver of the new device */
        device_info.ring_size = ring_size;
        ret = rndis_filter_device_add(dev, &device_info);
index 7816d98bdddc2b920598ba3c9f267304dca847bb..a1604376aee1a1d9fb420e77e4d6791fc4719fa1 100644 (file)
@@ -210,6 +210,7 @@ static int rndis_filter_send_request(struct rndis_device *dev,
 {
        int ret;
        struct hv_netvsc_packet *packet;
+       struct hv_page_buffer page_buf[2];
 
        /* Setup the packet to send it */
        packet = &req->pkt;
@@ -217,6 +218,7 @@ static int rndis_filter_send_request(struct rndis_device *dev,
        packet->is_data_pkt = false;
        packet->total_data_buflen = req->request_msg.msg_len;
        packet->page_buf_cnt = 1;
+       packet->page_buf = page_buf;
 
        packet->page_buf[0].pfn = virt_to_phys(&req->request_msg) >>
                                        PAGE_SHIFT;
@@ -237,6 +239,7 @@ static int rndis_filter_send_request(struct rndis_device *dev,
        }
 
        packet->send_completion = NULL;
+       packet->xmit_more = false;
 
        ret = netvsc_send(dev->net_dev->dev, packet);
        return ret;
@@ -855,6 +858,7 @@ static int rndis_filter_init_device(struct rndis_device *dev)
        u32 status;
        int ret;
        unsigned long t;
+       struct netvsc_device *nvdev = dev->net_dev;
 
        request = get_rndis_request(dev, RNDIS_MSG_INIT,
                        RNDIS_MESSAGE_SIZE(struct rndis_initialize_request));
@@ -889,6 +893,8 @@ static int rndis_filter_init_device(struct rndis_device *dev)
        status = init_complete->status;
        if (status == RNDIS_STATUS_SUCCESS) {
                dev->state = RNDIS_DEV_INITIALIZED;
+               nvdev->max_pkt = init_complete->max_pkt_per_msg;
+               nvdev->pkt_align = 1 << init_complete->pkt_alignment_factor;
                ret = 0;
        } else {
                dev->state = RNDIS_DEV_UNINITIALIZED;
@@ -1027,6 +1033,7 @@ int rndis_filter_device_add(struct hv_device *dev,
 
        /* Initialize the rndis device */
        net_device = hv_get_drvdata(dev);
+       net_device->max_chn = 1;
        net_device->num_chn = 1;
 
        net_device->extension = rndis_device;
@@ -1094,6 +1101,7 @@ int rndis_filter_device_add(struct hv_device *dev,
        if (ret || rsscap.num_recv_que < 2)
                goto out;
 
+       net_device->max_chn = rsscap.num_recv_que;
        net_device->num_chn = (num_online_cpus() < rsscap.num_recv_que) ?
                               num_online_cpus() : rsscap.num_recv_que;
        if (net_device->num_chn == 1)
@@ -1135,13 +1143,13 @@ int rndis_filter_device_add(struct hv_device *dev,
        net_device->num_chn = 1 +
                init_packet->msg.v5_msg.subchn_comp.num_subchannels;
 
-       vmbus_are_subchannels_present(dev->channel);
-
        ret = rndis_filter_set_rss_param(rndis_device, net_device->num_chn);
 
 out:
-       if (ret)
+       if (ret) {
+               net_device->max_chn = 1;
                net_device->num_chn = 1;
+       }
        return 0; /* return 0 because primary channel can be used alone */
 
 err_dev_remv:
index 7b051eacb7f184a3770294b5b0d7d60aae5fbe2d..5ad46f7f514f1332df5f42f6f7546aeef313da30 100644 (file)
  */
 #include <linux/kernel.h>
 #include <linux/module.h>
+#include <linux/hrtimer.h>
+#include <linux/jiffies.h>
 #include <linux/interrupt.h>
 #include <linux/irq.h>
 #include <linux/gpio.h>
 #include <linux/delay.h>
-#include <linux/spinlock.h>
 #include <linux/spi/spi.h>
 #include <linux/spi/at86rf230.h>
 #include <linux/regmap.h>
@@ -46,19 +47,27 @@ struct at86rf2xx_chip_data {
        u16 t_off_to_tx_on;
        u16 t_frame;
        u16 t_p_ack;
-       /* completion timeout for tx in msecs */
-       u16 t_tx_timeout;
        int rssi_base_val;
 
        int (*set_channel)(struct at86rf230_local *, u8, u8);
        int (*get_desense_steps)(struct at86rf230_local *, s32);
 };
 
-#define AT86RF2XX_MAX_BUF (127 + 3)
+#define AT86RF2XX_MAX_BUF              (127 + 3)
+/* tx retries to access the TX_ON state
+ * if it's above then force change will be started.
+ *
+ * We assume the max_frame_retries (7) value of 802.15.4 here.
+ */
+#define AT86RF2XX_MAX_TX_RETRIES       7
+/* We use the recommended 5 minutes timeout to recalibrate */
+#define AT86RF2XX_CAL_LOOP_TIMEOUT     (5 * 60 * HZ)
 
 struct at86rf230_state_change {
        struct at86rf230_local *lp;
+       int irq;
 
+       struct hrtimer timer;
        struct spi_message msg;
        struct spi_transfer trx;
        u8 buf[AT86RF2XX_MAX_BUF];
@@ -83,10 +92,10 @@ struct at86rf230_local {
        struct at86rf230_state_change irq;
 
        bool tx_aret;
+       unsigned long cal_timeout;
        s8 max_frame_retries;
        bool is_tx;
-       /* spinlock for is_tx protection */
-       spinlock_t lock;
+       u8 tx_retry;
        struct sk_buff *tx_skb;
        struct at86rf230_state_change tx;
 };
@@ -313,7 +322,7 @@ at86rf230_read_subreg(struct at86rf230_local *lp,
        int rc;
 
        rc = __at86rf230_read(lp, addr, data);
-       if (rc > 0)
+       if (!rc)
                *data = (*data & mask) >> shift;
 
        return rc;
@@ -409,6 +418,8 @@ at86rf230_reg_volatile(struct device *dev, unsigned int reg)
        case RG_PHY_ED_LEVEL:
        case RG_IRQ_STATUS:
        case RG_VREG_CTRL:
+       case RG_PLL_CF:
+       case RG_PLL_DCU:
                return true;
        default:
                return false;
@@ -446,6 +457,7 @@ at86rf230_async_error_recover(void *context)
        struct at86rf230_state_change *ctx = context;
        struct at86rf230_local *lp = ctx->lp;
 
+       lp->is_tx = 0;
        at86rf230_async_state_change(lp, ctx, STATE_RX_AACK_ON, NULL, false);
        ieee802154_wake_queue(lp->hw);
 }
@@ -472,18 +484,25 @@ at86rf230_async_read_reg(struct at86rf230_local *lp, const u8 reg,
        u8 *tx_buf = ctx->buf;
 
        tx_buf[0] = (reg & CMD_REG_MASK) | CMD_REG;
-       ctx->trx.len = 2;
        ctx->msg.complete = complete;
        ctx->irq_enable = irq_enable;
        rc = spi_async(lp->spi, &ctx->msg);
        if (rc) {
                if (irq_enable)
-                       enable_irq(lp->spi->irq);
+                       enable_irq(ctx->irq);
 
                at86rf230_async_error(lp, ctx, rc);
        }
 }
 
+static inline u8 at86rf230_state_to_force(u8 state)
+{
+       if (state == STATE_TX_ON)
+               return STATE_FORCE_TX_ON;
+       else
+               return STATE_FORCE_TRX_OFF;
+}
+
 static void
 at86rf230_async_state_assert(void *context)
 {
@@ -514,10 +533,21 @@ at86rf230_async_state_assert(void *context)
                         * in STATE_BUSY_RX_AACK, we run a force state change
                         * to STATE_TX_ON. This is a timeout handling, if the
                         * transceiver stucks in STATE_BUSY_RX_AACK.
+                        *
+                        * Additional we do several retries to try to get into
+                        * TX_ON state without forcing. If the retries are
+                        * higher or equal than AT86RF2XX_MAX_TX_RETRIES we
+                        * will do a force change.
                         */
-                       if (ctx->to_state == STATE_TX_ON) {
-                               at86rf230_async_state_change(lp, ctx,
-                                                            STATE_FORCE_TX_ON,
+                       if (ctx->to_state == STATE_TX_ON ||
+                           ctx->to_state == STATE_TRX_OFF) {
+                               u8 state = ctx->to_state;
+
+                               if (lp->tx_retry >= AT86RF2XX_MAX_TX_RETRIES)
+                                       state = at86rf230_state_to_force(state);
+                               lp->tx_retry++;
+
+                               at86rf230_async_state_change(lp, ctx, state,
                                                             ctx->complete,
                                                             ctx->irq_enable);
                                return;
@@ -533,6 +563,19 @@ done:
                ctx->complete(context);
 }
 
+static enum hrtimer_restart at86rf230_async_state_timer(struct hrtimer *timer)
+{
+       struct at86rf230_state_change *ctx =
+               container_of(timer, struct at86rf230_state_change, timer);
+       struct at86rf230_local *lp = ctx->lp;
+
+       at86rf230_async_read_reg(lp, RG_TRX_STATUS, ctx,
+                                at86rf230_async_state_assert,
+                                ctx->irq_enable);
+
+       return HRTIMER_NORESTART;
+}
+
 /* Do state change timing delay. */
 static void
 at86rf230_async_state_delay(void *context)
@@ -541,6 +584,7 @@ at86rf230_async_state_delay(void *context)
        struct at86rf230_local *lp = ctx->lp;
        struct at86rf2xx_chip_data *c = lp->data;
        bool force = false;
+       ktime_t tim;
 
        /* The force state changes are will show as normal states in the
         * state status subregister. We change the to_state to the
@@ -564,11 +608,15 @@ at86rf230_async_state_delay(void *context)
        case STATE_TRX_OFF:
                switch (ctx->to_state) {
                case STATE_RX_AACK_ON:
-                       usleep_range(c->t_off_to_aack, c->t_off_to_aack + 10);
+                       tim = ktime_set(0, c->t_off_to_aack * NSEC_PER_USEC);
                        goto change;
                case STATE_TX_ON:
-                       usleep_range(c->t_off_to_tx_on,
-                                    c->t_off_to_tx_on + 10);
+                       tim = ktime_set(0, c->t_off_to_tx_on * NSEC_PER_USEC);
+                       /* state change from TRX_OFF to TX_ON to do a
+                        * calibration, we need to reset the timeout for the
+                        * next one.
+                        */
+                       lp->cal_timeout = jiffies + AT86RF2XX_CAL_LOOP_TIMEOUT;
                        goto change;
                default:
                        break;
@@ -576,14 +624,15 @@ at86rf230_async_state_delay(void *context)
                break;
        case STATE_BUSY_RX_AACK:
                switch (ctx->to_state) {
+               case STATE_TRX_OFF:
                case STATE_TX_ON:
                        /* Wait for worst case receiving time if we
                         * didn't make a force change from BUSY_RX_AACK
-                        * to TX_ON.
+                        * to TX_ON or TRX_OFF.
                         */
                        if (!force) {
-                               usleep_range(c->t_frame + c->t_p_ack,
-                                            c->t_frame + c->t_p_ack + 1000);
+                               tim = ktime_set(0, (c->t_frame + c->t_p_ack) *
+                                                  NSEC_PER_USEC);
                                goto change;
                        }
                        break;
@@ -595,7 +644,7 @@ at86rf230_async_state_delay(void *context)
        case STATE_P_ON:
                switch (ctx->to_state) {
                case STATE_TRX_OFF:
-                       usleep_range(c->t_reset_to_off, c->t_reset_to_off + 10);
+                       tim = ktime_set(0, c->t_reset_to_off * NSEC_PER_USEC);
                        goto change;
                default:
                        break;
@@ -606,12 +655,10 @@ at86rf230_async_state_delay(void *context)
        }
 
        /* Default delay is 1us in the most cases */
-       udelay(1);
+       tim = ktime_set(0, NSEC_PER_USEC);
 
 change:
-       at86rf230_async_read_reg(lp, RG_TRX_STATUS, ctx,
-                                at86rf230_async_state_assert,
-                                ctx->irq_enable);
+       hrtimer_start(&ctx->timer, tim, HRTIMER_MODE_REL);
 }
 
 static void
@@ -647,12 +694,11 @@ at86rf230_async_state_change_start(void *context)
         */
        buf[0] = (RG_TRX_STATE & CMD_REG_MASK) | CMD_REG | CMD_WRITE;
        buf[1] = ctx->to_state;
-       ctx->trx.len = 2;
        ctx->msg.complete = at86rf230_async_state_delay;
        rc = spi_async(lp->spi, &ctx->msg);
        if (rc) {
                if (ctx->irq_enable)
-                       enable_irq(lp->spi->irq);
+                       enable_irq(ctx->irq);
 
                at86rf230_async_error(lp, ctx, rc);
        }
@@ -689,7 +735,7 @@ at86rf230_sync_state_change_complete(void *context)
 static int
 at86rf230_sync_state_change(struct at86rf230_local *lp, unsigned int state)
 {
-       int rc;
+       unsigned long rc;
 
        at86rf230_async_state_change(lp, &lp->state, state,
                                     at86rf230_sync_state_change_complete,
@@ -710,11 +756,10 @@ at86rf230_tx_complete(void *context)
 {
        struct at86rf230_state_change *ctx = context;
        struct at86rf230_local *lp = ctx->lp;
-       struct sk_buff *skb = lp->tx_skb;
 
-       enable_irq(lp->spi->irq);
+       enable_irq(ctx->irq);
 
-       ieee802154_xmit_complete(lp->hw, skb, !lp->tx_aret);
+       ieee802154_xmit_complete(lp->hw, lp->tx_skb, !lp->tx_aret);
 }
 
 static void
@@ -723,7 +768,7 @@ at86rf230_tx_on(void *context)
        struct at86rf230_state_change *ctx = context;
        struct at86rf230_local *lp = ctx->lp;
 
-       at86rf230_async_state_change(lp, &lp->irq, STATE_RX_AACK_ON,
+       at86rf230_async_state_change(lp, ctx, STATE_RX_AACK_ON,
                                     at86rf230_tx_complete, true);
 }
 
@@ -767,14 +812,25 @@ at86rf230_tx_trac_status(void *context)
 }
 
 static void
-at86rf230_rx(struct at86rf230_local *lp,
-            const u8 *data, const u8 len, const u8 lqi)
+at86rf230_rx_read_frame_complete(void *context)
 {
-       struct sk_buff *skb;
+       struct at86rf230_state_change *ctx = context;
+       struct at86rf230_local *lp = ctx->lp;
        u8 rx_local_buf[AT86RF2XX_MAX_BUF];
+       const u8 *buf = ctx->buf;
+       struct sk_buff *skb;
+       u8 len, lqi;
+
+       len = buf[1];
+       if (!ieee802154_is_valid_psdu_len(len)) {
+               dev_vdbg(&lp->spi->dev, "corrupted frame received\n");
+               len = IEEE802154_MTU;
+       }
+       lqi = buf[2 + len];
 
-       memcpy(rx_local_buf, data, len);
-       enable_irq(lp->spi->irq);
+       memcpy(rx_local_buf, buf + 2, len);
+       ctx->trx.len = 2;
+       enable_irq(ctx->irq);
 
        skb = dev_alloc_skb(IEEE802154_MTU);
        if (!skb) {
@@ -787,60 +843,41 @@ at86rf230_rx(struct at86rf230_local *lp,
 }
 
 static void
-at86rf230_rx_read_frame_complete(void *context)
+at86rf230_rx_read_frame(void *context)
 {
        struct at86rf230_state_change *ctx = context;
        struct at86rf230_local *lp = ctx->lp;
-       const u8 *buf = lp->irq.buf;
-       u8 len = buf[1];
-
-       if (!ieee802154_is_valid_psdu_len(len)) {
-               dev_vdbg(&lp->spi->dev, "corrupted frame received\n");
-               len = IEEE802154_MTU;
-       }
-
-       at86rf230_rx(lp, buf + 2, len, buf[2 + len]);
-}
-
-static void
-at86rf230_rx_read_frame(struct at86rf230_local *lp)
-{
+       u8 *buf = ctx->buf;
        int rc;
 
-       u8 *buf = lp->irq.buf;
-
        buf[0] = CMD_FB;
-       lp->irq.trx.len = AT86RF2XX_MAX_BUF;
-       lp->irq.msg.complete = at86rf230_rx_read_frame_complete;
-       rc = spi_async(lp->spi, &lp->irq.msg);
+       ctx->trx.len = AT86RF2XX_MAX_BUF;
+       ctx->msg.complete = at86rf230_rx_read_frame_complete;
+       rc = spi_async(lp->spi, &ctx->msg);
        if (rc) {
-               enable_irq(lp->spi->irq);
-               at86rf230_async_error(lp, &lp->irq, rc);
+               ctx->trx.len = 2;
+               enable_irq(ctx->irq);
+               at86rf230_async_error(lp, ctx, rc);
        }
 }
 
 static void
 at86rf230_rx_trac_check(void *context)
 {
-       struct at86rf230_state_change *ctx = context;
-       struct at86rf230_local *lp = ctx->lp;
-
        /* Possible check on trac status here. This could be useful to make
         * some stats why receive is failed. Not used at the moment, but it's
         * maybe timing relevant. Datasheet doesn't say anything about this.
         * The programming guide say do it so.
         */
 
-       at86rf230_rx_read_frame(lp);
+       at86rf230_rx_read_frame(context);
 }
 
 static void
 at86rf230_irq_trx_end(struct at86rf230_local *lp)
 {
-       spin_lock(&lp->lock);
        if (lp->is_tx) {
                lp->is_tx = 0;
-               spin_unlock(&lp->lock);
 
                if (lp->tx_aret)
                        at86rf230_async_state_change(lp, &lp->irq,
@@ -853,7 +890,6 @@ at86rf230_irq_trx_end(struct at86rf230_local *lp)
                                                     at86rf230_tx_complete,
                                                     true);
        } else {
-               spin_unlock(&lp->lock);
                at86rf230_async_read_reg(lp, RG_TRX_STATE, &lp->irq,
                                         at86rf230_rx_trac_check, true);
        }
@@ -864,13 +900,13 @@ at86rf230_irq_status(void *context)
 {
        struct at86rf230_state_change *ctx = context;
        struct at86rf230_local *lp = ctx->lp;
-       const u8 *buf = lp->irq.buf;
+       const u8 *buf = ctx->buf;
        const u8 irq = buf[1];
 
        if (irq & IRQ_TRX_END) {
                at86rf230_irq_trx_end(lp);
        } else {
-               enable_irq(lp->spi->irq);
+               enable_irq(ctx->irq);
                dev_err(&lp->spi->dev, "not supported irq %02x received\n",
                        irq);
        }
@@ -886,7 +922,6 @@ static irqreturn_t at86rf230_isr(int irq, void *data)
        disable_irq_nosync(irq);
 
        buf[0] = (RG_IRQ_STATUS & CMD_REG_MASK) | CMD_REG;
-       ctx->trx.len = 2;
        ctx->msg.complete = at86rf230_irq_status;
        rc = spi_async(lp->spi, &ctx->msg);
        if (rc) {
@@ -921,21 +956,21 @@ at86rf230_write_frame(void *context)
        struct at86rf230_state_change *ctx = context;
        struct at86rf230_local *lp = ctx->lp;
        struct sk_buff *skb = lp->tx_skb;
-       u8 *buf = lp->tx.buf;
+       u8 *buf = ctx->buf;
        int rc;
 
-       spin_lock(&lp->lock);
        lp->is_tx = 1;
-       spin_unlock(&lp->lock);
 
        buf[0] = CMD_FB | CMD_WRITE;
        buf[1] = skb->len + 2;
        memcpy(buf + 2, skb->data, skb->len);
-       lp->tx.trx.len = skb->len + 2;
-       lp->tx.msg.complete = at86rf230_write_frame_complete;
-       rc = spi_async(lp->spi, &lp->tx.msg);
-       if (rc)
+       ctx->trx.len = skb->len + 2;
+       ctx->msg.complete = at86rf230_write_frame_complete;
+       rc = spi_async(lp->spi, &ctx->msg);
+       if (rc) {
+               ctx->trx.len = 2;
                at86rf230_async_error(lp, ctx, rc);
+       }
 }
 
 static void
@@ -948,24 +983,45 @@ at86rf230_xmit_tx_on(void *context)
                                     at86rf230_write_frame, false);
 }
 
+static void
+at86rf230_xmit_start(void *context)
+{
+       struct at86rf230_state_change *ctx = context;
+       struct at86rf230_local *lp = ctx->lp;
+
+       /* In ARET mode we need to go into STATE_TX_ARET_ON after we
+        * are in STATE_TX_ON. The pfad differs here, so we change
+        * the complete handler.
+        */
+       if (lp->tx_aret)
+               at86rf230_async_state_change(lp, ctx, STATE_TX_ON,
+                                            at86rf230_xmit_tx_on, false);
+       else
+               at86rf230_async_state_change(lp, ctx, STATE_TX_ON,
+                                            at86rf230_write_frame, false);
+}
+
 static int
 at86rf230_xmit(struct ieee802154_hw *hw, struct sk_buff *skb)
 {
        struct at86rf230_local *lp = hw->priv;
        struct at86rf230_state_change *ctx = &lp->tx;
 
-       void (*tx_complete)(void *context) = at86rf230_write_frame;
-
        lp->tx_skb = skb;
+       lp->tx_retry = 0;
 
-       /* In ARET mode we need to go into STATE_TX_ARET_ON after we
-        * are in STATE_TX_ON. The pfad differs here, so we change
-        * the complete handler.
+       /* After 5 minutes in PLL and the same frequency we run again the
+        * calibration loops which is recommended by at86rf2xx datasheets.
+        *
+        * The calibration is initiate by a state change from TRX_OFF
+        * to TX_ON, the lp->cal_timeout should be reinit by state_delay
+        * function then to start in the next 5 minutes.
         */
-       if (lp->tx_aret)
-               tx_complete = at86rf230_xmit_tx_on;
-
-       at86rf230_async_state_change(lp, ctx, STATE_TX_ON, tx_complete, false);
+       if (time_is_before_jiffies(lp->cal_timeout))
+               at86rf230_async_state_change(lp, ctx, STATE_TRX_OFF,
+                                            at86rf230_xmit_start, false);
+       else
+               at86rf230_xmit_start(ctx);
 
        return 0;
 }
@@ -981,6 +1037,9 @@ at86rf230_ed(struct ieee802154_hw *hw, u8 *level)
 static int
 at86rf230_start(struct ieee802154_hw *hw)
 {
+       struct at86rf230_local *lp = hw->priv;
+
+       lp->cal_timeout = jiffies + AT86RF2XX_CAL_LOOP_TIMEOUT;
        return at86rf230_sync_state_change(hw->priv, STATE_RX_AACK_ON);
 }
 
@@ -1061,6 +1120,8 @@ at86rf230_channel(struct ieee802154_hw *hw, u8 page, u8 channel)
        /* Wait for PLL */
        usleep_range(lp->data->t_channel_switch,
                     lp->data->t_channel_switch + 10);
+
+       lp->cal_timeout = jiffies + AT86RF2XX_CAL_LOOP_TIMEOUT;
        return rc;
 }
 
@@ -1281,7 +1342,6 @@ static struct at86rf2xx_chip_data at86rf233_data = {
        .t_off_to_tx_on = 80,
        .t_frame = 4096,
        .t_p_ack = 545,
-       .t_tx_timeout = 2000,
        .rssi_base_val = -91,
        .set_channel = at86rf23x_set_channel,
        .get_desense_steps = at86rf23x_get_desens_steps
@@ -1295,7 +1355,6 @@ static struct at86rf2xx_chip_data at86rf231_data = {
        .t_off_to_tx_on = 110,
        .t_frame = 4096,
        .t_p_ack = 545,
-       .t_tx_timeout = 2000,
        .rssi_base_val = -91,
        .set_channel = at86rf23x_set_channel,
        .get_desense_steps = at86rf23x_get_desens_steps
@@ -1309,13 +1368,12 @@ static struct at86rf2xx_chip_data at86rf212_data = {
        .t_off_to_tx_on = 200,
        .t_frame = 4096,
        .t_p_ack = 545,
-       .t_tx_timeout = 2000,
        .rssi_base_val = -100,
        .set_channel = at86rf212_set_channel,
        .get_desense_steps = at86rf212_get_desens_steps
 };
 
-static int at86rf230_hw_init(struct at86rf230_local *lp)
+static int at86rf230_hw_init(struct at86rf230_local *lp, u8 xtal_trim)
 {
        int rc, irq_type, irq_pol = IRQ_ACTIVE_HIGH;
        unsigned int dvdd;
@@ -1326,7 +1384,12 @@ static int at86rf230_hw_init(struct at86rf230_local *lp)
                return rc;
 
        irq_type = irq_get_trigger_type(lp->spi->irq);
-       if (irq_type == IRQ_TYPE_EDGE_FALLING)
+       if (irq_type == IRQ_TYPE_EDGE_RISING ||
+           irq_type == IRQ_TYPE_EDGE_FALLING)
+               dev_warn(&lp->spi->dev,
+                        "Using edge triggered irq's are not recommended!\n");
+       if (irq_type == IRQ_TYPE_EDGE_FALLING ||
+           irq_type == IRQ_TYPE_LEVEL_LOW)
                irq_pol = IRQ_ACTIVE_LOW;
 
        rc = at86rf230_write_subreg(lp, SR_IRQ_POLARITY, irq_pol);
@@ -1341,6 +1404,11 @@ static int at86rf230_hw_init(struct at86rf230_local *lp)
        if (rc)
                return rc;
 
+       /* reset values differs in at86rf231 and at86rf233 */
+       rc = at86rf230_write_subreg(lp, SR_IRQ_MASK_MODE, 0);
+       if (rc)
+               return rc;
+
        get_random_bytes(csma_seed, ARRAY_SIZE(csma_seed));
        rc = at86rf230_write_subreg(lp, SR_CSMA_SEED_0, csma_seed[0]);
        if (rc)
@@ -1362,6 +1430,45 @@ static int at86rf230_hw_init(struct at86rf230_local *lp)
        usleep_range(lp->data->t_sleep_cycle,
                     lp->data->t_sleep_cycle + 100);
 
+       /* xtal_trim value is calculated by:
+        * CL = 0.5 * (CX + CTRIM + CPAR)
+        *
+        * whereas:
+        * CL = capacitor of used crystal
+        * CX = connected capacitors at xtal pins
+        * CPAR = in all at86rf2xx datasheets this is a constant value 3 pF,
+        *        but this is different on each board setup. You need to fine
+        *        tuning this value via CTRIM.
+        * CTRIM = variable capacitor setting. Resolution is 0.3 pF range is
+        *         0 pF upto 4.5 pF.
+        *
+        * Examples:
+        * atben transceiver:
+        *
+        * CL = 8 pF
+        * CX = 12 pF
+        * CPAR = 3 pF (We assume the magic constant from datasheet)
+        * CTRIM = 0.9 pF
+        *
+        * (12+0.9+3)/2 = 7.95 which is nearly at 8 pF
+        *
+        * xtal_trim = 0x3
+        *
+        * openlabs transceiver:
+        *
+        * CL = 16 pF
+        * CX = 22 pF
+        * CPAR = 3 pF (We assume the magic constant from datasheet)
+        * CTRIM = 4.5 pF
+        *
+        * (22+4.5+3)/2 = 14.75 which is the nearest value to 16 pF
+        *
+        * xtal_trim = 0xf
+        */
+       rc = at86rf230_write_subreg(lp, SR_XTAL_TRIM, xtal_trim);
+       if (rc)
+               return rc;
+
        rc = at86rf230_read_subreg(lp, SR_DVDD_OK, &dvdd);
        if (rc)
                return rc;
@@ -1377,24 +1484,30 @@ static int at86rf230_hw_init(struct at86rf230_local *lp)
        return at86rf230_write_subreg(lp, SR_SLOTTED_OPERATION, 0);
 }
 
-static struct at86rf230_platform_data *
-at86rf230_get_pdata(struct spi_device *spi)
+static int
+at86rf230_get_pdata(struct spi_device *spi, int *rstn, int *slp_tr,
+                   u8 *xtal_trim)
 {
-       struct at86rf230_platform_data *pdata;
+       struct at86rf230_platform_data *pdata = spi->dev.platform_data;
+       int ret;
 
-       if (!IS_ENABLED(CONFIG_OF) || !spi->dev.of_node)
-               return spi->dev.platform_data;
+       if (!IS_ENABLED(CONFIG_OF) || !spi->dev.of_node) {
+               if (!pdata)
+                       return -ENOENT;
 
-       pdata = devm_kzalloc(&spi->dev, sizeof(*pdata), GFP_KERNEL);
-       if (!pdata)
-               goto done;
+               *rstn = pdata->rstn;
+               *slp_tr = pdata->slp_tr;
+               *xtal_trim = pdata->xtal_trim;
+               return 0;
+       }
 
-       pdata->rstn = of_get_named_gpio(spi->dev.of_node, "reset-gpio", 0);
-       pdata->slp_tr = of_get_named_gpio(spi->dev.of_node, "sleep-gpio", 0);
+       *rstn = of_get_named_gpio(spi->dev.of_node, "reset-gpio", 0);
+       *slp_tr = of_get_named_gpio(spi->dev.of_node, "sleep-gpio", 0);
+       ret = of_property_read_u8(spi->dev.of_node, "xtal-trim", xtal_trim);
+       if (ret < 0 && ret != -EINVAL)
+               return ret;
 
-       spi->dev.platform_data = pdata;
-done:
-       return pdata;
+       return 0;
 }
 
 static int
@@ -1478,66 +1591,78 @@ static void
 at86rf230_setup_spi_messages(struct at86rf230_local *lp)
 {
        lp->state.lp = lp;
+       lp->state.irq = lp->spi->irq;
        spi_message_init(&lp->state.msg);
        lp->state.msg.context = &lp->state;
+       lp->state.trx.len = 2;
        lp->state.trx.tx_buf = lp->state.buf;
        lp->state.trx.rx_buf = lp->state.buf;
        spi_message_add_tail(&lp->state.trx, &lp->state.msg);
+       hrtimer_init(&lp->state.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+       lp->state.timer.function = at86rf230_async_state_timer;
 
        lp->irq.lp = lp;
+       lp->irq.irq = lp->spi->irq;
        spi_message_init(&lp->irq.msg);
        lp->irq.msg.context = &lp->irq;
+       lp->irq.trx.len = 2;
        lp->irq.trx.tx_buf = lp->irq.buf;
        lp->irq.trx.rx_buf = lp->irq.buf;
        spi_message_add_tail(&lp->irq.trx, &lp->irq.msg);
+       hrtimer_init(&lp->irq.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+       lp->irq.timer.function = at86rf230_async_state_timer;
 
        lp->tx.lp = lp;
+       lp->tx.irq = lp->spi->irq;
        spi_message_init(&lp->tx.msg);
        lp->tx.msg.context = &lp->tx;
+       lp->tx.trx.len = 2;
        lp->tx.trx.tx_buf = lp->tx.buf;
        lp->tx.trx.rx_buf = lp->tx.buf;
        spi_message_add_tail(&lp->tx.trx, &lp->tx.msg);
+       hrtimer_init(&lp->tx.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+       lp->tx.timer.function = at86rf230_async_state_timer;
 }
 
 static int at86rf230_probe(struct spi_device *spi)
 {
-       struct at86rf230_platform_data *pdata;
        struct ieee802154_hw *hw;
        struct at86rf230_local *lp;
        unsigned int status;
-       int rc, irq_type;
+       int rc, irq_type, rstn, slp_tr;
+       u8 xtal_trim = 0;
 
        if (!spi->irq) {
                dev_err(&spi->dev, "no IRQ specified\n");
                return -EINVAL;
        }
 
-       pdata = at86rf230_get_pdata(spi);
-       if (!pdata) {
-               dev_err(&spi->dev, "no platform_data\n");
-               return -EINVAL;
+       rc = at86rf230_get_pdata(spi, &rstn, &slp_tr, &xtal_trim);
+       if (rc < 0) {
+               dev_err(&spi->dev, "failed to parse platform_data: %d\n", rc);
+               return rc;
        }
 
-       if (gpio_is_valid(pdata->rstn)) {
-               rc = devm_gpio_request_one(&spi->dev, pdata->rstn,
+       if (gpio_is_valid(rstn)) {
+               rc = devm_gpio_request_one(&spi->dev, rstn,
                                           GPIOF_OUT_INIT_HIGH, "rstn");
                if (rc)
                        return rc;
        }
 
-       if (gpio_is_valid(pdata->slp_tr)) {
-               rc = devm_gpio_request_one(&spi->dev, pdata->slp_tr,
+       if (gpio_is_valid(slp_tr)) {
+               rc = devm_gpio_request_one(&spi->dev, slp_tr,
                                           GPIOF_OUT_INIT_LOW, "slp_tr");
                if (rc)
                        return rc;
        }
 
        /* Reset */
-       if (gpio_is_valid(pdata->rstn)) {
+       if (gpio_is_valid(rstn)) {
                udelay(1);
-               gpio_set_value(pdata->rstn, 0);
+               gpio_set_value(rstn, 0);
                udelay(1);
-               gpio_set_value(pdata->rstn, 1);
+               gpio_set_value(rstn, 1);
                usleep_range(120, 240);
        }
 
@@ -1566,12 +1691,11 @@ static int at86rf230_probe(struct spi_device *spi)
        if (rc < 0)
                goto free_dev;
 
-       spin_lock_init(&lp->lock);
        init_completion(&lp->state_complete);
 
        spi_set_drvdata(spi, lp);
 
-       rc = at86rf230_hw_init(lp);
+       rc = at86rf230_hw_init(lp, xtal_trim);
        if (rc)
                goto free_dev;
 
index 181b349b060ee552dcc5be089c8cf616d7d965b2..f833b8bb66634ed0e6057235785c67e7c072f4b7 100644 (file)
@@ -714,11 +714,45 @@ static irqreturn_t cc2520_sfd_isr(int irq, void *data)
        return IRQ_HANDLED;
 }
 
+static int cc2520_get_platform_data(struct spi_device *spi,
+                                   struct cc2520_platform_data *pdata)
+{
+       struct device_node *np = spi->dev.of_node;
+       struct cc2520_private *priv = spi_get_drvdata(spi);
+
+       if (!np) {
+               struct cc2520_platform_data *spi_pdata = spi->dev.platform_data;
+               if (!spi_pdata)
+                       return -ENOENT;
+               *pdata = *spi_pdata;
+               return 0;
+       }
+
+       pdata->fifo = of_get_named_gpio(np, "fifo-gpio", 0);
+       priv->fifo_pin = pdata->fifo;
+
+       pdata->fifop = of_get_named_gpio(np, "fifop-gpio", 0);
+
+       pdata->sfd = of_get_named_gpio(np, "sfd-gpio", 0);
+       pdata->cca = of_get_named_gpio(np, "cca-gpio", 0);
+       pdata->vreg = of_get_named_gpio(np, "vreg-gpio", 0);
+       pdata->reset = of_get_named_gpio(np, "reset-gpio", 0);
+
+       pdata->amplified = of_property_read_bool(np, "amplified");
+
+       return 0;
+}
+
 static int cc2520_hw_init(struct cc2520_private *priv)
 {
        u8 status = 0, state = 0xff;
        int ret;
        int timeout = 100;
+       struct cc2520_platform_data pdata;
+
+       ret = cc2520_get_platform_data(priv->spi, &pdata);
+       if (ret)
+               goto err_ret;
 
        ret = cc2520_read_register(priv, CC2520_FSMSTAT1, &state);
        if (ret)
@@ -741,11 +775,47 @@ static int cc2520_hw_init(struct cc2520_private *priv)
 
        dev_vdbg(&priv->spi->dev, "oscillator brought up\n");
 
-       /* Registers default value: section 28.1 in Datasheet */
-       ret = cc2520_write_register(priv, CC2520_TXPOWER, 0xF7);
-       if (ret)
-               goto err_ret;
+       /* If the CC2520 is connected to a CC2591 amplifier, we must both
+        * configure GPIOs on the CC2520 to correctly configure the CC2591
+        * and change a couple settings of the CC2520 to work with the
+        * amplifier. See section 8 page 17 of TI application note AN065.
+        * http://www.ti.com/lit/an/swra229a/swra229a.pdf
+        */
+       if (pdata.amplified) {
+               ret = cc2520_write_register(priv, CC2520_TXPOWER, 0xF9);
+               if (ret)
+                       goto err_ret;
+
+               ret = cc2520_write_register(priv, CC2520_AGCCTRL1, 0x16);
+               if (ret)
+                       goto err_ret;
+
+               ret = cc2520_write_register(priv, CC2520_GPIOCTRL0, 0x46);
+               if (ret)
+                       goto err_ret;
+
+               ret = cc2520_write_register(priv, CC2520_GPIOCTRL5, 0x47);
+               if (ret)
+                       goto err_ret;
+
+               ret = cc2520_write_register(priv, CC2520_GPIOPOLARITY, 0x1e);
+               if (ret)
+                       goto err_ret;
+
+               ret = cc2520_write_register(priv, CC2520_TXCTRL, 0xc1);
+               if (ret)
+                       goto err_ret;
+       } else {
+               ret = cc2520_write_register(priv, CC2520_TXPOWER, 0xF7);
+               if (ret)
+                       goto err_ret;
 
+               ret = cc2520_write_register(priv, CC2520_AGCCTRL1, 0x11);
+               if (ret)
+                       goto err_ret;
+       }
+
+       /* Registers default value: section 28.1 in Datasheet */
        ret = cc2520_write_register(priv, CC2520_CCACTRL0, 0x1A);
        if (ret)
                goto err_ret;
@@ -770,10 +840,6 @@ static int cc2520_hw_init(struct cc2520_private *priv)
        if (ret)
                goto err_ret;
 
-       ret = cc2520_write_register(priv, CC2520_AGCCTRL1, 0x11);
-       if (ret)
-               goto err_ret;
-
        ret = cc2520_write_register(priv, CC2520_ADCTEST0, 0x10);
        if (ret)
                goto err_ret;
@@ -808,40 +874,10 @@ err_ret:
        return ret;
 }
 
-static struct cc2520_platform_data *
-cc2520_get_platform_data(struct spi_device *spi)
-{
-       struct cc2520_platform_data *pdata;
-       struct device_node *np = spi->dev.of_node;
-       struct cc2520_private *priv = spi_get_drvdata(spi);
-
-       if (!np)
-               return spi->dev.platform_data;
-
-       pdata = devm_kzalloc(&spi->dev, sizeof(*pdata), GFP_KERNEL);
-       if (!pdata)
-               goto done;
-
-       pdata->fifo = of_get_named_gpio(np, "fifo-gpio", 0);
-       priv->fifo_pin = pdata->fifo;
-
-       pdata->fifop = of_get_named_gpio(np, "fifop-gpio", 0);
-
-       pdata->sfd = of_get_named_gpio(np, "sfd-gpio", 0);
-       pdata->cca = of_get_named_gpio(np, "cca-gpio", 0);
-       pdata->vreg = of_get_named_gpio(np, "vreg-gpio", 0);
-       pdata->reset = of_get_named_gpio(np, "reset-gpio", 0);
-
-       spi->dev.platform_data = pdata;
-
-done:
-       return pdata;
-}
-
 static int cc2520_probe(struct spi_device *spi)
 {
        struct cc2520_private *priv;
-       struct cc2520_platform_data *pdata;
+       struct cc2520_platform_data pdata;
        int ret;
 
        priv = devm_kzalloc(&spi->dev, sizeof(*priv), GFP_KERNEL);
@@ -850,8 +886,8 @@ static int cc2520_probe(struct spi_device *spi)
 
        spi_set_drvdata(spi, priv);
 
-       pdata = cc2520_get_platform_data(spi);
-       if (!pdata) {
+       ret = cc2520_get_platform_data(spi, &pdata);
+       if (ret < 0) {
                dev_err(&spi->dev, "no platform data\n");
                return -EINVAL;
        }
@@ -869,76 +905,76 @@ static int cc2520_probe(struct spi_device *spi)
        init_completion(&priv->tx_complete);
 
        /* Request all the gpio's */
-       if (!gpio_is_valid(pdata->fifo)) {
+       if (!gpio_is_valid(pdata.fifo)) {
                dev_err(&spi->dev, "fifo gpio is not valid\n");
                ret = -EINVAL;
                goto err_hw_init;
        }
 
-       ret = devm_gpio_request_one(&spi->dev, pdata->fifo,
+       ret = devm_gpio_request_one(&spi->dev, pdata.fifo,
                                    GPIOF_IN, "fifo");
        if (ret)
                goto err_hw_init;
 
-       if (!gpio_is_valid(pdata->cca)) {
+       if (!gpio_is_valid(pdata.cca)) {
                dev_err(&spi->dev, "cca gpio is not valid\n");
                ret = -EINVAL;
                goto err_hw_init;
        }
 
-       ret = devm_gpio_request_one(&spi->dev, pdata->cca,
+       ret = devm_gpio_request_one(&spi->dev, pdata.cca,
                                    GPIOF_IN, "cca");
        if (ret)
                goto err_hw_init;
 
-       if (!gpio_is_valid(pdata->fifop)) {
+       if (!gpio_is_valid(pdata.fifop)) {
                dev_err(&spi->dev, "fifop gpio is not valid\n");
                ret = -EINVAL;
                goto err_hw_init;
        }
 
-       ret = devm_gpio_request_one(&spi->dev, pdata->fifop,
+       ret = devm_gpio_request_one(&spi->dev, pdata.fifop,
                                    GPIOF_IN, "fifop");
        if (ret)
                goto err_hw_init;
 
-       if (!gpio_is_valid(pdata->sfd)) {
+       if (!gpio_is_valid(pdata.sfd)) {
                dev_err(&spi->dev, "sfd gpio is not valid\n");
                ret = -EINVAL;
                goto err_hw_init;
        }
 
-       ret = devm_gpio_request_one(&spi->dev, pdata->sfd,
+       ret = devm_gpio_request_one(&spi->dev, pdata.sfd,
                                    GPIOF_IN, "sfd");
        if (ret)
                goto err_hw_init;
 
-       if (!gpio_is_valid(pdata->reset)) {
+       if (!gpio_is_valid(pdata.reset)) {
                dev_err(&spi->dev, "reset gpio is not valid\n");
                ret = -EINVAL;
                goto err_hw_init;
        }
 
-       ret = devm_gpio_request_one(&spi->dev, pdata->reset,
+       ret = devm_gpio_request_one(&spi->dev, pdata.reset,
                                    GPIOF_OUT_INIT_LOW, "reset");
        if (ret)
                goto err_hw_init;
 
-       if (!gpio_is_valid(pdata->vreg)) {
+       if (!gpio_is_valid(pdata.vreg)) {
                dev_err(&spi->dev, "vreg gpio is not valid\n");
                ret = -EINVAL;
                goto err_hw_init;
        }
 
-       ret = devm_gpio_request_one(&spi->dev, pdata->vreg,
+       ret = devm_gpio_request_one(&spi->dev, pdata.vreg,
                                    GPIOF_OUT_INIT_LOW, "vreg");
        if (ret)
                goto err_hw_init;
 
-       gpio_set_value(pdata->vreg, HIGH);
+       gpio_set_value(pdata.vreg, HIGH);
        usleep_range(100, 150);
 
-       gpio_set_value(pdata->reset, HIGH);
+       gpio_set_value(pdata.reset, HIGH);
        usleep_range(200, 250);
 
        ret = cc2520_hw_init(priv);
@@ -947,7 +983,7 @@ static int cc2520_probe(struct spi_device *spi)
 
        /* Set up fifop interrupt */
        ret = devm_request_irq(&spi->dev,
-                              gpio_to_irq(pdata->fifop),
+                              gpio_to_irq(pdata.fifop),
                               cc2520_fifop_isr,
                               IRQF_TRIGGER_RISING,
                               dev_name(&spi->dev),
@@ -959,7 +995,7 @@ static int cc2520_probe(struct spi_device *spi)
 
        /* Set up sfd interrupt */
        ret = devm_request_irq(&spi->dev,
-                              gpio_to_irq(pdata->sfd),
+                              gpio_to_irq(pdata.sfd),
                               cc2520_sfd_isr,
                               IRQF_TRIGGER_FALLING,
                               dev_name(&spi->dev),
index b7877a194cfe430469af8031c58efdf307eafc4f..c30b5c300c05f6fdd91e48802c84b77103e3a322 100644 (file)
@@ -342,7 +342,7 @@ static int ipvlan_process_v4_outbound(struct sk_buff *skb)
        struct rtable *rt;
        int err, ret = NET_XMIT_DROP;
        struct flowi4 fl4 = {
-               .flowi4_oif = dev->iflink,
+               .flowi4_oif = dev_get_iflink(dev),
                .flowi4_tos = RT_TOS(ip4h->tos),
                .flowi4_flags = FLOWI_FLAG_ANYSRC,
                .daddr = ip4h->daddr,
index 4fa14208d79931df1b5d8707744e93043b951424..77b92a0fe557ade8fea66af377217e9c0f8feded 100644 (file)
@@ -114,7 +114,6 @@ static int ipvlan_init(struct net_device *dev)
        dev->features = phy_dev->features & IPVLAN_FEATURES;
        dev->features |= NETIF_F_LLTX;
        dev->gso_max_size = phy_dev->gso_max_size;
-       dev->iflink = phy_dev->ifindex;
        dev->hard_header_len = phy_dev->hard_header_len;
 
        ipvlan_set_lockdep_class(dev);
@@ -305,6 +304,13 @@ static int ipvlan_vlan_rx_kill_vid(struct net_device *dev, __be16 proto,
        return 0;
 }
 
+static int ipvlan_get_iflink(const struct net_device *dev)
+{
+       struct ipvl_dev *ipvlan = netdev_priv(dev);
+
+       return ipvlan->phy_dev->ifindex;
+}
+
 static const struct net_device_ops ipvlan_netdev_ops = {
        .ndo_init               = ipvlan_init,
        .ndo_uninit             = ipvlan_uninit,
@@ -317,6 +323,7 @@ static const struct net_device_ops ipvlan_netdev_ops = {
        .ndo_get_stats64        = ipvlan_get_stats64,
        .ndo_vlan_rx_add_vid    = ipvlan_vlan_rx_add_vid,
        .ndo_vlan_rx_kill_vid   = ipvlan_vlan_rx_kill_vid,
+       .ndo_get_iflink         = ipvlan_get_iflink,
 };
 
 static int ipvlan_hard_header(struct sk_buff *skb, struct net_device *dev,
@@ -336,7 +343,6 @@ static int ipvlan_hard_header(struct sk_buff *skb, struct net_device *dev,
 
 static const struct header_ops ipvlan_header_ops = {
        .create         = ipvlan_hard_header,
-       .rebuild        = eth_rebuild_header,
        .parse          = eth_header_parse,
        .cache          = eth_header_cache,
        .cache_update   = eth_header_cache_update,
index 1df38bdae2ee384d1c6285a699b7dbf31dd28f59..b227a13f6473404a5082a0a99d4e7067b3daeaf7 100644 (file)
@@ -550,7 +550,6 @@ static int macvlan_hard_header(struct sk_buff *skb, struct net_device *dev,
 
 static const struct header_ops macvlan_hard_header_ops = {
        .create         = macvlan_hard_header,
-       .rebuild        = eth_rebuild_header,
        .parse          = eth_header_parse,
        .cache          = eth_header_cache,
        .cache_update   = eth_header_cache_update,
@@ -787,7 +786,6 @@ static int macvlan_init(struct net_device *dev)
        dev->hw_features        |= NETIF_F_LRO;
        dev->vlan_features      = lowerdev->vlan_features & MACVLAN_FEATURES;
        dev->gso_max_size       = lowerdev->gso_max_size;
-       dev->iflink             = lowerdev->ifindex;
        dev->hard_header_len    = lowerdev->hard_header_len;
 
        macvlan_set_lockdep_class(dev);
@@ -996,6 +994,13 @@ static void macvlan_dev_netpoll_cleanup(struct net_device *dev)
 }
 #endif /* CONFIG_NET_POLL_CONTROLLER */
 
+static int macvlan_dev_get_iflink(const struct net_device *dev)
+{
+       struct macvlan_dev *vlan = netdev_priv(dev);
+
+       return vlan->lowerdev->ifindex;
+}
+
 static const struct ethtool_ops macvlan_ethtool_ops = {
        .get_link               = ethtool_op_get_link,
        .get_settings           = macvlan_ethtool_get_settings,
@@ -1026,6 +1031,7 @@ static const struct net_device_ops macvlan_netdev_ops = {
        .ndo_netpoll_setup      = macvlan_dev_netpoll_setup,
        .ndo_netpoll_cleanup    = macvlan_dev_netpoll_cleanup,
 #endif
+       .ndo_get_iflink         = macvlan_dev_get_iflink,
 };
 
 void macvlan_common_setup(struct net_device *dev)
index 27ecc5c4fa2665cd42ac1ca81717255f85507113..9c91ff8724851d5d284bd8c5b128f94d57e1537e 100644 (file)
@@ -1118,8 +1118,6 @@ static const struct file_operations macvtap_fops = {
        .owner          = THIS_MODULE,
        .open           = macvtap_open,
        .release        = macvtap_release,
-       .read           = new_sync_read,
-       .write          = new_sync_write,
        .read_iter      = macvtap_read_iter,
        .write_iter     = macvtap_write_iter,
        .poll           = macvtap_poll,
@@ -1130,16 +1128,15 @@ static const struct file_operations macvtap_fops = {
 #endif
 };
 
-static int macvtap_sendmsg(struct kiocb *iocb, struct socket *sock,
-                          struct msghdr *m, size_t total_len)
+static int macvtap_sendmsg(struct socket *sock, struct msghdr *m,
+                          size_t total_len)
 {
        struct macvtap_queue *q = container_of(sock, struct macvtap_queue, sock);
        return macvtap_get_user(q, m, &m->msg_iter, m->msg_flags & MSG_DONTWAIT);
 }
 
-static int macvtap_recvmsg(struct kiocb *iocb, struct socket *sock,
-                          struct msghdr *m, size_t total_len,
-                          int flags)
+static int macvtap_recvmsg(struct socket *sock, struct msghdr *m,
+                          size_t total_len, int flags)
 {
        struct macvtap_queue *q = container_of(sock, struct macvtap_queue, sock);
        int ret;
index ba2f5e710af12cc7587f0503ea8dc177d2aba4ed..15731d1db918c32f6dc341800155340c0e62a7a2 100644 (file)
@@ -47,6 +47,7 @@
 #include <linux/netpoll.h>
 #include <linux/inet.h>
 #include <linux/configfs.h>
+#include <linux/etherdevice.h>
 
 MODULE_AUTHOR("Maintainer: Matt Mackall <mpm@selenic.com>");
 MODULE_DESCRIPTION("Console driver for network interfaces");
@@ -185,7 +186,7 @@ static struct netconsole_target *alloc_param_target(char *target_config)
        nt->np.local_port = 6665;
        nt->np.remote_port = 6666;
        mutex_init(&nt->mutex);
-       memset(nt->np.remote_mac, 0xff, ETH_ALEN);
+       eth_broadcast_addr(nt->np.remote_mac);
 
        /* Parse parameters and setup netpoll */
        err = netpoll_parse_options(&nt->np, target_config);
@@ -604,7 +605,7 @@ static struct config_item *make_netconsole_target(struct config_group *group,
        nt->np.local_port = 6665;
        nt->np.remote_port = 6666;
        mutex_init(&nt->mutex);
-       memset(nt->np.remote_mac, 0xff, ETH_ALEN);
+       eth_broadcast_addr(nt->np.remote_mac);
 
        /* Initialize the config_item member */
        config_item_init_type_name(&nt->item, name, &netconsole_target_type);
index 32efbd48f32642ddabb21126384b0c21e160a403..fb276f64cd6400cc7617c2586582c378eb2e9c53 100644 (file)
@@ -78,6 +78,7 @@
 #include <linux/bitops.h>
 #include <linux/property.h>
 #include <linux/acpi.h>
+#include <linux/jiffies.h>
 
 MODULE_AUTHOR("Tom Lendacky <thomas.lendacky@amd.com>");
 MODULE_LICENSE("Dual BSD/GPL");
@@ -100,6 +101,8 @@ MODULE_DESCRIPTION("AMD 10GbE (amd-xgbe) PHY driver");
 #define XGBE_PHY_SPEED_2500            1
 #define XGBE_PHY_SPEED_10000           2
 
+#define XGBE_AN_MS_TIMEOUT             500
+
 #define XGBE_AN_INT_CMPLT              0x01
 #define XGBE_AN_INC_LINK               0x02
 #define XGBE_AN_PG_RCV                 0x04
@@ -434,6 +437,7 @@ struct amd_xgbe_phy_priv {
        unsigned int an_supported;
        unsigned int parallel_detect;
        unsigned int fec_ability;
+       unsigned long an_start;
 
        unsigned int lpm_ctrl;          /* CTRL1 for resume */
 };
@@ -902,8 +906,23 @@ static enum amd_xgbe_phy_an amd_xgbe_an_page_received(struct phy_device *phydev)
 {
        struct amd_xgbe_phy_priv *priv = phydev->priv;
        enum amd_xgbe_phy_rx *state;
+       unsigned long an_timeout;
        int ret;
 
+       if (!priv->an_start) {
+               priv->an_start = jiffies;
+       } else {
+               an_timeout = priv->an_start +
+                            msecs_to_jiffies(XGBE_AN_MS_TIMEOUT);
+               if (time_after(jiffies, an_timeout)) {
+                       /* Auto-negotiation timed out, reset state */
+                       priv->kr_state = AMD_XGBE_RX_BPA;
+                       priv->kx_state = AMD_XGBE_RX_BPA;
+
+                       priv->an_start = jiffies;
+               }
+       }
+
        state = amd_xgbe_phy_in_kr_mode(phydev) ? &priv->kr_state
                                                : &priv->kx_state;
 
@@ -932,8 +951,8 @@ static enum amd_xgbe_phy_an amd_xgbe_an_incompat_link(struct phy_device *phydev)
        if (amd_xgbe_phy_in_kr_mode(phydev)) {
                priv->kr_state = AMD_XGBE_RX_ERROR;
 
-               if (!(phydev->supported & SUPPORTED_1000baseKX_Full) &&
-                   !(phydev->supported & SUPPORTED_2500baseX_Full))
+               if (!(phydev->advertising & SUPPORTED_1000baseKX_Full) &&
+                   !(phydev->advertising & SUPPORTED_2500baseX_Full))
                        return AMD_XGBE_AN_NO_LINK;
 
                if (priv->kx_state != AMD_XGBE_RX_BPA)
@@ -941,7 +960,7 @@ static enum amd_xgbe_phy_an amd_xgbe_an_incompat_link(struct phy_device *phydev)
        } else {
                priv->kx_state = AMD_XGBE_RX_ERROR;
 
-               if (!(phydev->supported & SUPPORTED_10000baseKR_Full))
+               if (!(phydev->advertising & SUPPORTED_10000baseKR_Full))
                        return AMD_XGBE_AN_NO_LINK;
 
                if (priv->kr_state != AMD_XGBE_RX_BPA)
@@ -1078,6 +1097,7 @@ again:
                priv->an_state = AMD_XGBE_AN_READY;
                priv->kr_state = AMD_XGBE_RX_BPA;
                priv->kx_state = AMD_XGBE_RX_BPA;
+               priv->an_start = 0;
        }
 
        if (cur_state != priv->an_state)
@@ -1101,7 +1121,7 @@ static int amd_xgbe_an_init(struct phy_device *phydev)
        if (ret < 0)
                return ret;
 
-       if (phydev->supported & SUPPORTED_10000baseR_FEC)
+       if (phydev->advertising & SUPPORTED_10000baseR_FEC)
                ret |= 0xc000;
        else
                ret &= ~0xc000;
@@ -1113,13 +1133,13 @@ static int amd_xgbe_an_init(struct phy_device *phydev)
        if (ret < 0)
                return ret;
 
-       if (phydev->supported & SUPPORTED_10000baseKR_Full)
+       if (phydev->advertising & SUPPORTED_10000baseKR_Full)
                ret |= 0x80;
        else
                ret &= ~0x80;
 
-       if ((phydev->supported & SUPPORTED_1000baseKX_Full) ||
-           (phydev->supported & SUPPORTED_2500baseX_Full))
+       if ((phydev->advertising & SUPPORTED_1000baseKX_Full) ||
+           (phydev->advertising & SUPPORTED_2500baseX_Full))
                ret |= 0x20;
        else
                ret &= ~0x20;
@@ -1131,12 +1151,12 @@ static int amd_xgbe_an_init(struct phy_device *phydev)
        if (ret < 0)
                return ret;
 
-       if (phydev->supported & SUPPORTED_Pause)
+       if (phydev->advertising & SUPPORTED_Pause)
                ret |= 0x400;
        else
                ret &= ~0x400;
 
-       if (phydev->supported & SUPPORTED_Asym_Pause)
+       if (phydev->advertising & SUPPORTED_Asym_Pause)
                ret |= 0x800;
        else
                ret &= ~0x800;
@@ -1212,38 +1232,14 @@ static int amd_xgbe_phy_config_init(struct phy_device *phydev)
                priv->an_irq_allocated = 1;
        }
 
-       ret = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_FEC_ABILITY);
-       if (ret < 0)
-               return ret;
-       priv->fec_ability = ret & XGBE_PHY_FEC_MASK;
-
-       /* Initialize supported features */
-       phydev->supported = SUPPORTED_Autoneg;
-       phydev->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
-       phydev->supported |= SUPPORTED_Backplane;
-       phydev->supported |= SUPPORTED_10000baseKR_Full;
-       switch (priv->speed_set) {
-       case AMD_XGBE_PHY_SPEEDSET_1000_10000:
-               phydev->supported |= SUPPORTED_1000baseKX_Full;
-               break;
-       case AMD_XGBE_PHY_SPEEDSET_2500_10000:
-               phydev->supported |= SUPPORTED_2500baseX_Full;
-               break;
-       }
-
-       if (priv->fec_ability & XGBE_PHY_FEC_ENABLE)
-               phydev->supported |= SUPPORTED_10000baseR_FEC;
-
-       phydev->advertising = phydev->supported;
-
        /* Set initial mode - call the mode setting routines
         * directly to insure we are properly configured
         */
-       if (phydev->supported & SUPPORTED_10000baseKR_Full)
+       if (phydev->advertising & SUPPORTED_10000baseKR_Full)
                ret = amd_xgbe_phy_xgmii_mode(phydev);
-       else if (phydev->supported & SUPPORTED_1000baseKX_Full)
+       else if (phydev->advertising & SUPPORTED_1000baseKX_Full)
                ret = amd_xgbe_phy_gmii_mode(phydev);
-       else if (phydev->supported & SUPPORTED_2500baseX_Full)
+       else if (phydev->advertising & SUPPORTED_2500baseX_Full)
                ret = amd_xgbe_phy_gmii_2500_mode(phydev);
        else
                ret = -EINVAL;
@@ -1315,10 +1311,10 @@ static int __amd_xgbe_phy_config_aneg(struct phy_device *phydev)
        disable_irq(priv->an_irq);
 
        /* Start auto-negotiation in a supported mode */
-       if (phydev->supported & SUPPORTED_10000baseKR_Full)
+       if (phydev->advertising & SUPPORTED_10000baseKR_Full)
                ret = amd_xgbe_phy_set_mode(phydev, AMD_XGBE_MODE_KR);
-       else if ((phydev->supported & SUPPORTED_1000baseKX_Full) ||
-                (phydev->supported & SUPPORTED_2500baseX_Full))
+       else if ((phydev->advertising & SUPPORTED_1000baseKX_Full) ||
+                (phydev->advertising & SUPPORTED_2500baseX_Full))
                ret = amd_xgbe_phy_set_mode(phydev, AMD_XGBE_MODE_KX);
        else
                ret = -EINVAL;
@@ -1746,6 +1742,29 @@ static int amd_xgbe_phy_probe(struct phy_device *phydev)
                       sizeof(priv->serdes_dfe_tap_ena));
        }
 
+       /* Initialize supported features */
+       phydev->supported = SUPPORTED_Autoneg;
+       phydev->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+       phydev->supported |= SUPPORTED_Backplane;
+       phydev->supported |= SUPPORTED_10000baseKR_Full;
+       switch (priv->speed_set) {
+       case AMD_XGBE_PHY_SPEEDSET_1000_10000:
+               phydev->supported |= SUPPORTED_1000baseKX_Full;
+               break;
+       case AMD_XGBE_PHY_SPEEDSET_2500_10000:
+               phydev->supported |= SUPPORTED_2500baseX_Full;
+               break;
+       }
+
+       ret = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_FEC_ABILITY);
+       if (ret < 0)
+               return ret;
+       priv->fec_ability = ret & XGBE_PHY_FEC_MASK;
+       if (priv->fec_ability & XGBE_PHY_FEC_ENABLE)
+               phydev->supported |= SUPPORTED_10000baseR_FEC;
+
+       phydev->advertising = phydev->supported;
+
        phydev->priv = priv;
 
        if (!priv->adev || acpi_disabled)
@@ -1817,6 +1836,7 @@ static struct phy_driver amd_xgbe_phy_driver[] = {
                .phy_id_mask            = XGBE_PHY_MASK,
                .name                   = "AMD XGBE PHY",
                .features               = 0,
+               .flags                  = PHY_IS_INTERNAL,
                .probe                  = amd_xgbe_phy_probe,
                .remove                 = amd_xgbe_phy_remove,
                .soft_reset             = amd_xgbe_phy_soft_reset,
index f80e19ac67041a0e75778761373ec3ce2a982d95..fabf11d32d276d9ac6ef6ac50ccd24609a48f90f 100644 (file)
@@ -192,16 +192,17 @@ static int at803x_probe(struct phy_device *phydev)
 {
        struct device *dev = &phydev->dev;
        struct at803x_priv *priv;
+       struct gpio_desc *gpiod_reset;
 
        priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
        if (!priv)
                return -ENOMEM;
 
-       priv->gpiod_reset = devm_gpiod_get(dev, "reset");
-       if (IS_ERR(priv->gpiod_reset))
-               priv->gpiod_reset = NULL;
-       else
-               gpiod_direction_output(priv->gpiod_reset, 1);
+       gpiod_reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
+       if (IS_ERR(gpiod_reset))
+               return PTR_ERR(gpiod_reset);
+
+       priv->gpiod_reset = gpiod_reset;
 
        phydev->priv = priv;
 
index 974ec45152697a8dc6dfc93c0bb40b7852bae41c..64c74c6a482806bfc5d2bb4f821b4b1ef085adfd 100644 (file)
@@ -396,6 +396,7 @@ static struct phy_driver bcm7xxx_driver[] = {
        BCM7XXX_28NM_GPHY(PHY_ID_BCM7364, "Broadcom BCM7364"),
        BCM7XXX_28NM_GPHY(PHY_ID_BCM7366, "Broadcom BCM7366"),
        BCM7XXX_28NM_GPHY(PHY_ID_BCM7439, "Broadcom BCM7439"),
+       BCM7XXX_28NM_GPHY(PHY_ID_BCM7439_2, "Broadcom BCM7439 (2)"),
        BCM7XXX_28NM_GPHY(PHY_ID_BCM7445, "Broadcom BCM7445"),
 {
        .phy_id         = PHY_ID_BCM7425,
index e22e602beef3426a600db641a66237f40c039732..a83f8e50844c432efdcc5a643d010a7129b8e51a 100644 (file)
@@ -257,7 +257,7 @@ static void ext_write(int broadcast, struct phy_device *phydev,
 
 /* Caller must hold extreg_lock. */
 static int tdr_write(int bc, struct phy_device *dev,
-                    const struct timespec *ts, u16 cmd)
+                    const struct timespec64 *ts, u16 cmd)
 {
        ext_write(bc, dev, PAGE4, PTP_TDR, ts->tv_nsec & 0xffff);/* ns[15:0]  */
        ext_write(bc, dev, PAGE4, PTP_TDR, ts->tv_nsec >> 16);   /* ns[31:16] */
@@ -411,12 +411,12 @@ static int ptp_dp83640_adjtime(struct ptp_clock_info *ptp, s64 delta)
        struct dp83640_clock *clock =
                container_of(ptp, struct dp83640_clock, caps);
        struct phy_device *phydev = clock->chosen->phydev;
-       struct timespec ts;
+       struct timespec64 ts;
        int err;
 
        delta += ADJTIME_FIX;
 
-       ts = ns_to_timespec(delta);
+       ts = ns_to_timespec64(delta);
 
        mutex_lock(&clock->extreg_lock);
 
@@ -427,7 +427,8 @@ static int ptp_dp83640_adjtime(struct ptp_clock_info *ptp, s64 delta)
        return err;
 }
 
-static int ptp_dp83640_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
+static int ptp_dp83640_gettime(struct ptp_clock_info *ptp,
+                              struct timespec64 *ts)
 {
        struct dp83640_clock *clock =
                container_of(ptp, struct dp83640_clock, caps);
@@ -452,7 +453,7 @@ static int ptp_dp83640_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
 }
 
 static int ptp_dp83640_settime(struct ptp_clock_info *ptp,
-                              const struct timespec *ts)
+                              const struct timespec64 *ts)
 {
        struct dp83640_clock *clock =
                container_of(ptp, struct dp83640_clock, caps);
@@ -605,7 +606,7 @@ static void recalibrate(struct dp83640_clock *clock)
 {
        s64 now, diff;
        struct phy_txts event_ts;
-       struct timespec ts;
+       struct timespec64 ts;
        struct list_head *this;
        struct dp83640_private *tmp;
        struct phy_device *master = clock->chosen->phydev;
@@ -697,7 +698,7 @@ static void recalibrate(struct dp83640_clock *clock)
                diff = now - (s64) phy2txts(&event_ts);
                pr_info("slave offset %lld nanoseconds\n", diff);
                diff += ADJTIME_FIX;
-               ts = ns_to_timespec(diff);
+               ts = ns_to_timespec64(diff);
                tdr_write(0, tmp->phydev, &ts, PTP_STEP_CLK);
        }
 
@@ -998,8 +999,8 @@ static void dp83640_clock_init(struct dp83640_clock *clock, struct mii_bus *bus)
        clock->caps.pps         = 0;
        clock->caps.adjfreq     = ptp_dp83640_adjfreq;
        clock->caps.adjtime     = ptp_dp83640_adjtime;
-       clock->caps.gettime     = ptp_dp83640_gettime;
-       clock->caps.settime     = ptp_dp83640_settime;
+       clock->caps.gettime64   = ptp_dp83640_gettime;
+       clock->caps.settime64   = ptp_dp83640_settime;
        clock->caps.enable      = ptp_dp83640_enable;
        clock->caps.verify      = ptp_dp83640_verify;
        /*
index a08a3c78ba97b08fb503bace451a3d177fc04053..1960b46add65b3b89f122cc401c872050cebdbe4 100644 (file)
@@ -183,6 +183,35 @@ int fixed_phy_set_link_update(struct phy_device *phydev,
 }
 EXPORT_SYMBOL_GPL(fixed_phy_set_link_update);
 
+int fixed_phy_update_state(struct phy_device *phydev,
+                          const struct fixed_phy_status *status,
+                          const struct fixed_phy_status *changed)
+{
+       struct fixed_mdio_bus *fmb = &platform_fmb;
+       struct fixed_phy *fp;
+
+       if (!phydev || !phydev->bus)
+               return -EINVAL;
+
+       list_for_each_entry(fp, &fmb->phys, node) {
+               if (fp->addr == phydev->addr) {
+#define _UPD(x) if (changed->x) \
+       fp->status.x = status->x
+                       _UPD(link);
+                       _UPD(speed);
+                       _UPD(duplex);
+                       _UPD(pause);
+                       _UPD(asym_pause);
+#undef _UPD
+                       fixed_phy_update_regs(fp);
+                       return 0;
+               }
+       }
+
+       return -ENOENT;
+}
+EXPORT_SYMBOL(fixed_phy_update_state);
+
 int fixed_phy_add(unsigned int irq, int phy_addr,
                  struct fixed_phy_status *status)
 {
index 6deac6d32f574dad07084faa6fae7285a21c053b..414fdf1f343fe61549fef390ac3e417d1bc928e4 100644 (file)
@@ -187,7 +187,7 @@ static int unimac_mdio_remove(struct platform_device *pdev)
        return 0;
 }
 
-static struct of_device_id unimac_mdio_ids[] = {
+static const struct of_device_id unimac_mdio_ids[] = {
        { .compatible = "brcm,genet-mdio-v4", },
        { .compatible = "brcm,genet-mdio-v3", },
        { .compatible = "brcm,genet-mdio-v2", },
index 0a0578a592b811df94c3e55a8de1e1f3b4dd9d38..49ce7ece5af30c04c3e632a1719ba45d9b47f3b5 100644 (file)
@@ -249,7 +249,7 @@ static int mdio_gpio_remove(struct platform_device *pdev)
        return 0;
 }
 
-static struct of_device_id mdio_gpio_of_match[] = {
+static const struct of_device_id mdio_gpio_of_match[] = {
        { .compatible = "virtual,mdio-gpio", },
        { /* sentinel */ }
 };
index 320eb15315c85bc16e8372704f1656e7a91c6404..1a87a585e74df9abac74d5a60c8715917b179ff9 100644 (file)
@@ -99,7 +99,7 @@ static int mdio_mux_gpio_remove(struct platform_device *pdev)
        return 0;
 }
 
-static struct of_device_id mdio_mux_gpio_match[] = {
+static const struct of_device_id mdio_mux_gpio_match[] = {
        {
                .compatible = "mdio-mux-gpio",
        },
index 0aa985c7401434db1e51e9fc3c95cc9cd8220b38..2377c1341172f6ac6fb56eeaf835b7fc577e1593 100644 (file)
@@ -145,7 +145,7 @@ static int mdio_mux_mmioreg_remove(struct platform_device *pdev)
        return 0;
 }
 
-static struct of_device_id mdio_mux_mmioreg_match[] = {
+static const struct of_device_id mdio_mux_mmioreg_match[] = {
        {
                .compatible = "mdio-mux-mmioreg",
        },
index c81052486edcd474df37cc871f7d794a04d68dc0..c838ad6155f7863cbed177f35c4416fad1c0ddb5 100644 (file)
@@ -252,7 +252,7 @@ static int octeon_mdiobus_remove(struct platform_device *pdev)
        return 0;
 }
 
-static struct of_device_id octeon_mdiobus_match[] = {
+static const struct of_device_id octeon_mdiobus_match[] = {
        {
                .compatible = "cavium,octeon-3860-mdio",
        },
index d2408a5e43a6a1fdef0ba85f5fb19b10ffca4743..ff059e1d8ac6c8c478625a55e54c4c41c945ad9a 100644 (file)
@@ -455,6 +455,18 @@ out:
        return NET_RX_DROP;
 }
 
+static void pppoe_unbind_sock_work(struct work_struct *work)
+{
+       struct pppox_sock *po = container_of(work, struct pppox_sock,
+                                            proto.pppoe.padt_work);
+       struct sock *sk = sk_pppox(po);
+
+       lock_sock(sk);
+       pppox_unbind_sock(sk);
+       release_sock(sk);
+       sock_put(sk);
+}
+
 /************************************************************************
  *
  * Receive a PPPoE Discovery frame.
@@ -500,7 +512,8 @@ static int pppoe_disc_rcv(struct sk_buff *skb, struct net_device *dev,
                }
 
                bh_unlock_sock(sk);
-               sock_put(sk);
+               if (!schedule_work(&po->proto.pppoe.padt_work))
+                       sock_put(sk);
        }
 
 abort:
@@ -613,6 +626,8 @@ static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr,
 
        lock_sock(sk);
 
+       INIT_WORK(&po->proto.pppoe.padt_work, pppoe_unbind_sock_work);
+
        error = -EINVAL;
        if (sp->sa_protocol != PX_PROTO_OE)
                goto end;
@@ -820,8 +835,8 @@ static int pppoe_ioctl(struct socket *sock, unsigned int cmd,
        return err;
 }
 
-static int pppoe_sendmsg(struct kiocb *iocb, struct socket *sock,
-                 struct msghdr *m, size_t total_len)
+static int pppoe_sendmsg(struct socket *sock, struct msghdr *m,
+                        size_t total_len)
 {
        struct sk_buff *skb;
        struct sock *sk = sock->sk;
@@ -962,8 +977,8 @@ static const struct ppp_channel_ops pppoe_chan_ops = {
        .start_xmit = pppoe_xmit,
 };
 
-static int pppoe_recvmsg(struct kiocb *iocb, struct socket *sock,
-                 struct msghdr *m, size_t total_len, int flags)
+static int pppoe_recvmsg(struct socket *sock, struct msghdr *m,
+                        size_t total_len, int flags)
 {
        struct sock *sk = sock->sk;
        struct sk_buff *skb;
index 1dc628ffce2b52a565354f060dca84467d67e4b9..e3bfbd4d01367fc32b063d3305b635006d3f9b03 100644 (file)
@@ -281,7 +281,7 @@ static int pptp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
        nf_reset(skb);
 
        skb->ip_summed = CHECKSUM_NONE;
-       ip_select_ident(skb, NULL);
+       ip_select_ident(sock_net(sk), skb, NULL);
        ip_send_check(iph);
 
        ip_local_out(skb);
index 7d394846afc214900f9e85baa2130f6f6121ac33..6928448f6b7f1a80f5cfd46eeee5992f3bc24cf7 100644 (file)
@@ -1935,6 +1935,9 @@ static netdev_features_t team_fix_features(struct net_device *dev,
                                                     mask);
        }
        rcu_read_unlock();
+
+       features = netdev_add_tso_features(features, mask);
+
        return features;
 }
 
@@ -1976,6 +1979,7 @@ static const struct net_device_ops team_netdev_ops = {
        .ndo_change_carrier     = team_change_carrier,
        .ndo_bridge_setlink     = ndo_dflt_netdev_switch_port_bridge_setlink,
        .ndo_bridge_dellink     = ndo_dflt_netdev_switch_port_bridge_dellink,
+       .ndo_features_check     = passthru_features_check,
 };
 
 /***********************
index 857dca47bf80eb9127e9e11d70c3cd681d114aae..e470ae59d40536fe7530774cb473ebe57000f9a8 100644 (file)
@@ -1448,8 +1448,7 @@ static void tun_sock_write_space(struct sock *sk)
        kill_fasync(&tfile->fasync, SIGIO, POLL_OUT);
 }
 
-static int tun_sendmsg(struct kiocb *iocb, struct socket *sock,
-                      struct msghdr *m, size_t total_len)
+static int tun_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
 {
        int ret;
        struct tun_file *tfile = container_of(sock, struct tun_file, socket);
@@ -1464,8 +1463,7 @@ static int tun_sendmsg(struct kiocb *iocb, struct socket *sock,
        return ret;
 }
 
-static int tun_recvmsg(struct kiocb *iocb, struct socket *sock,
-                      struct msghdr *m, size_t total_len,
+static int tun_recvmsg(struct socket *sock, struct msghdr *m, size_t total_len,
                       int flags)
 {
        struct tun_file *tfile = container_of(sock, struct tun_file, socket);
@@ -2225,8 +2223,6 @@ static void tun_chr_show_fdinfo(struct seq_file *m, struct file *f)
 static const struct file_operations tun_fops = {
        .owner  = THIS_MODULE,
        .llseek = no_llseek,
-       .read  = new_sync_read,
-       .write = new_sync_write,
        .read_iter  = tun_chr_read_iter,
        .write_iter = tun_chr_write_iter,
        .poll   = tun_chr_poll,
index 8cfc3bb0c6a672a288784ab0dd5f09597265c39d..4e2b26a88b15f03ba8302d5d7a83d1a5b1a4d4ea 100644 (file)
@@ -641,7 +641,7 @@ static void catc_set_multicast_list(struct net_device *netdev)
        u8 broadcast[ETH_ALEN];
        u8 rx = RxEnable | RxPolarity | RxMultiCast;
 
-       memset(broadcast, 0xff, ETH_ALEN);
+       eth_broadcast_addr(broadcast);
        memset(catc->multicast, 0, 64);
 
        catc_multicast(broadcast, catc->multicast);
@@ -880,7 +880,7 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id
                
                dev_dbg(dev, "Filling the multicast list.\n");
          
-               memset(broadcast, 0xff, ETH_ALEN);
+               eth_broadcast_addr(broadcast);
                catc_multicast(broadcast, catc->multicast);
                catc_multicast(netdev->dev_addr, catc->multicast);
                catc_write_mem(catc, 0xfa80, catc->multicast, 64);
index 96fc8a5bde8416a471ed4cd07e657025bc222fab..e4b7a47a825c7f686e48992b23d3f1ee30555d71 100644 (file)
@@ -394,7 +394,7 @@ static struct sk_buff *cdc_mbim_process_dgram(struct usbnet *dev, u8 *buf, size_
        skb_put(skb, ETH_HLEN);
        skb_reset_mac_header(skb);
        eth_hdr(skb)->h_proto = proto;
-       memset(eth_hdr(skb)->h_source, 0, ETH_ALEN);
+       eth_zero_addr(eth_hdr(skb)->h_source);
        memcpy(eth_hdr(skb)->h_dest, dev->net->dev_addr, ETH_ALEN);
 
        /* add datagram */
index 1762ad3910b2e75b55db894afee887f055977b22..e221bfcee76b40a3ad7ba60ec4d348f4b8f4cc73 100644 (file)
@@ -172,7 +172,7 @@ static int cx82310_bind(struct usbnet *dev, struct usb_interface *intf)
                if (!ret && link[0] == 1 && link[2] == 1)
                        break;
                msleep(500);
-       };
+       }
        if (!timeout) {
                dev_err(&udev->dev, "firmware not ready in time\n");
                return -ETIMEDOUT;
index 778e91531fac7f35480208ba35f6ae3e6c9ad5b2..111d907e0c117e8f8efb4b6c3b50607994f6c7b9 100644 (file)
@@ -1477,6 +1477,7 @@ static void tiocmget_intr_callback(struct urb *urb)
        struct uart_icount *icount;
        struct hso_serial_state_notification *serial_state_notification;
        struct usb_device *usb;
+       struct usb_interface *interface;
        int if_num;
 
        /* Sanity checks */
@@ -1494,7 +1495,9 @@ static void tiocmget_intr_callback(struct urb *urb)
        BUG_ON((serial->parent->port_spec & HSO_PORT_MASK) != HSO_PORT_MODEM);
 
        usb = serial->parent->usb;
-       if_num = serial->parent->interface->altsetting->desc.bInterfaceNumber;
+       interface = serial->parent->interface;
+
+       if_num = interface->cur_altsetting->desc.bInterfaceNumber;
 
        /* wIndex should be the USB interface number of the port to which the
         * notification applies, which should always be the Modem port.
@@ -1675,6 +1678,7 @@ static int hso_serial_tiocmset(struct tty_struct *tty,
        unsigned long flags;
        int if_num;
        struct hso_serial *serial = tty->driver_data;
+       struct usb_interface *interface;
 
        /* sanity check */
        if (!serial) {
@@ -1685,7 +1689,8 @@ static int hso_serial_tiocmset(struct tty_struct *tty,
        if ((serial->parent->port_spec & HSO_PORT_MASK) != HSO_PORT_MODEM)
                return -EINVAL;
 
-       if_num = serial->parent->interface->altsetting->desc.bInterfaceNumber;
+       interface = serial->parent->interface;
+       if_num = interface->cur_altsetting->desc.bInterfaceNumber;
 
        spin_lock_irqsave(&serial->serial_lock, flags);
        if (set & TIOCM_RTS)
@@ -2808,7 +2813,7 @@ static int hso_get_config_data(struct usb_interface *interface)
 {
        struct usb_device *usbdev = interface_to_usbdev(interface);
        u8 *config_data = kmalloc(17, GFP_KERNEL);
-       u32 if_num = interface->altsetting->desc.bInterfaceNumber;
+       u32 if_num = interface->cur_altsetting->desc.bInterfaceNumber;
        s32 result;
 
        if (!config_data)
@@ -2886,7 +2891,7 @@ static int hso_probe(struct usb_interface *interface,
                return -ENODEV;
        }
 
-       if_num = interface->altsetting->desc.bInterfaceNumber;
+       if_num = interface->cur_altsetting->desc.bInterfaceNumber;
 
        /* Get the interface/port specification from either driver_info or from
         * the device itself */
index 8f37efd2d2fbb3ec05fcde896b8b9e88136c2f35..5714107533bb4292b94c85d064726ac4f89f496b 100644 (file)
@@ -201,7 +201,7 @@ static int vl600_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
                                        &buf->data[sizeof(*ethhdr) + 0x12],
                                        ETH_ALEN);
                } else {
-                       memset(ethhdr->h_source, 0, ETH_ALEN);
+                       eth_zero_addr(ethhdr->h_source);
                        memcpy(ethhdr->h_dest, dev->net->dev_addr, ETH_ALEN);
 
                        /* Inbound IPv6 packets have an IPv4 ethertype (0x800)
index 602dc6668c3af7ce9f6cc4ddd61437ba2f6adf29..f603f362504bce0c1cb2656e1d29232eb05db846 100644 (file)
@@ -108,7 +108,7 @@ static int qmi_wwan_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
        skb_push(skb, ETH_HLEN);
        skb_reset_mac_header(skb);
        eth_hdr(skb)->h_proto = proto;
-       memset(eth_hdr(skb)->h_source, 0, ETH_ALEN);
+       eth_zero_addr(eth_hdr(skb)->h_source);
 fix_dest:
        memcpy(eth_hdr(skb)->h_dest, dev->net->dev_addr, ETH_ALEN);
        return 1;
index 9f7c0ab3b3490b947d161428406612c80dc29360..ac4d03b328b130ab918175b1fa5c8fe55a0cbc7b 100644 (file)
 #define USB_TX_AGG             0xd40a
 #define USB_RX_BUF_TH          0xd40c
 #define USB_USB_TIMER          0xd428
-#define USB_RX_EARLY_AGG       0xd42c
+#define USB_RX_EARLY_TIMEOUT   0xd42c
+#define USB_RX_EARLY_SIZE      0xd42e
 #define USB_PM_CTRL_STATUS     0xd432
 #define USB_TX_DMA             0xd434
 #define USB_TOLERANCE          0xd490
 /* USB_MISC_0 */
 #define PCUT_STATUS            0x0001
 
-/* USB_RX_EARLY_AGG */
-#define EARLY_AGG_SUPPER       0x0e832981
-#define EARLY_AGG_HIGH         0x0e837a12
-#define EARLY_AGG_SLOW         0x0e83ffff
+/* USB_RX_EARLY_TIMEOUT */
+#define COALESCE_SUPER          85000U
+#define COALESCE_HIGH          250000U
+#define COALESCE_SLOW          524280U
 
 /* USB_WDT11_CTRL */
 #define TIMER11_EN             0x0001
@@ -607,6 +608,7 @@ struct r8152 {
        u32 saved_wolopts;
        u32 msg_enable;
        u32 tx_qlen;
+       u32 coalesce;
        u16 ocp_base;
        u8 *intr_buff;
        u8 version;
@@ -2143,28 +2145,19 @@ static int rtl8152_enable(struct r8152 *tp)
        return rtl_enable(tp);
 }
 
-static void r8153_set_rx_agg(struct r8152 *tp)
+static void r8153_set_rx_early_timeout(struct r8152 *tp)
 {
-       u8 speed;
+       u32 ocp_data = tp->coalesce / 8;
 
-       speed = rtl8152_get_speed(tp);
-       if (speed & _1000bps) {
-               if (tp->udev->speed == USB_SPEED_SUPER) {
-                       ocp_write_dword(tp, MCU_TYPE_USB, USB_RX_BUF_TH,
-                                       RX_THR_SUPPER);
-                       ocp_write_dword(tp, MCU_TYPE_USB, USB_RX_EARLY_AGG,
-                                       EARLY_AGG_SUPPER);
-               } else {
-                       ocp_write_dword(tp, MCU_TYPE_USB, USB_RX_BUF_TH,
-                                       RX_THR_HIGH);
-                       ocp_write_dword(tp, MCU_TYPE_USB, USB_RX_EARLY_AGG,
-                                       EARLY_AGG_HIGH);
-               }
-       } else {
-               ocp_write_dword(tp, MCU_TYPE_USB, USB_RX_BUF_TH, RX_THR_SLOW);
-               ocp_write_dword(tp, MCU_TYPE_USB, USB_RX_EARLY_AGG,
-                               EARLY_AGG_SLOW);
-       }
+       ocp_write_word(tp, MCU_TYPE_USB, USB_RX_EARLY_TIMEOUT, ocp_data);
+}
+
+static void r8153_set_rx_early_size(struct r8152 *tp)
+{
+       u32 mtu = tp->netdev->mtu;
+       u32 ocp_data = (agg_buf_sz - mtu - VLAN_ETH_HLEN - VLAN_HLEN) / 4;
+
+       ocp_write_word(tp, MCU_TYPE_USB, USB_RX_EARLY_SIZE, ocp_data);
 }
 
 static int rtl8153_enable(struct r8152 *tp)
@@ -2174,7 +2167,8 @@ static int rtl8153_enable(struct r8152 *tp)
 
        set_tx_qlen(tp);
        rtl_set_eee_plus(tp);
-       r8153_set_rx_agg(tp);
+       r8153_set_rx_early_timeout(tp);
+       r8153_set_rx_early_size(tp);
 
        return rtl_enable(tp);
 }
@@ -3720,6 +3714,61 @@ out:
        return ret;
 }
 
+static int rtl8152_get_coalesce(struct net_device *netdev,
+                               struct ethtool_coalesce *coalesce)
+{
+       struct r8152 *tp = netdev_priv(netdev);
+
+       switch (tp->version) {
+       case RTL_VER_01:
+       case RTL_VER_02:
+               return -EOPNOTSUPP;
+       default:
+               break;
+       }
+
+       coalesce->rx_coalesce_usecs = tp->coalesce;
+
+       return 0;
+}
+
+static int rtl8152_set_coalesce(struct net_device *netdev,
+                               struct ethtool_coalesce *coalesce)
+{
+       struct r8152 *tp = netdev_priv(netdev);
+       int ret;
+
+       switch (tp->version) {
+       case RTL_VER_01:
+       case RTL_VER_02:
+               return -EOPNOTSUPP;
+       default:
+               break;
+       }
+
+       if (coalesce->rx_coalesce_usecs > COALESCE_SLOW)
+               return -EINVAL;
+
+       ret = usb_autopm_get_interface(tp->intf);
+       if (ret < 0)
+               return ret;
+
+       mutex_lock(&tp->control);
+
+       if (tp->coalesce != coalesce->rx_coalesce_usecs) {
+               tp->coalesce = coalesce->rx_coalesce_usecs;
+
+               if (netif_running(tp->netdev) && netif_carrier_ok(netdev))
+                       r8153_set_rx_early_timeout(tp);
+       }
+
+       mutex_unlock(&tp->control);
+
+       usb_autopm_put_interface(tp->intf);
+
+       return ret;
+}
+
 static struct ethtool_ops ops = {
        .get_drvinfo = rtl8152_get_drvinfo,
        .get_settings = rtl8152_get_settings,
@@ -3733,6 +3782,8 @@ static struct ethtool_ops ops = {
        .get_strings = rtl8152_get_strings,
        .get_sset_count = rtl8152_get_sset_count,
        .get_ethtool_stats = rtl8152_get_ethtool_stats,
+       .get_coalesce = rtl8152_get_coalesce,
+       .set_coalesce = rtl8152_set_coalesce,
        .get_eee = rtl_ethtool_get_eee,
        .set_eee = rtl_ethtool_set_eee,
 };
@@ -3784,6 +3835,7 @@ out:
 static int rtl8152_change_mtu(struct net_device *dev, int new_mtu)
 {
        struct r8152 *tp = netdev_priv(dev);
+       int ret;
 
        switch (tp->version) {
        case RTL_VER_01:
@@ -3796,9 +3848,22 @@ static int rtl8152_change_mtu(struct net_device *dev, int new_mtu)
        if (new_mtu < 68 || new_mtu > RTL8153_MAX_MTU)
                return -EINVAL;
 
+       ret = usb_autopm_get_interface(tp->intf);
+       if (ret < 0)
+               return ret;
+
+       mutex_lock(&tp->control);
+
        dev->mtu = new_mtu;
 
-       return 0;
+       if (netif_running(dev) && netif_carrier_ok(dev))
+               r8153_set_rx_early_size(tp);
+
+       mutex_unlock(&tp->control);
+
+       usb_autopm_put_interface(tp->intf);
+
+       return ret;
 }
 
 static const struct net_device_ops rtl8152_netdev_ops = {
@@ -3967,6 +4032,18 @@ static int rtl8152_probe(struct usb_interface *intf,
        tp->mii.reg_num_mask = 0x1f;
        tp->mii.phy_id = R8152_PHY_ID;
 
+       switch (udev->speed) {
+       case USB_SPEED_SUPER:
+               tp->coalesce = COALESCE_SUPER;
+               break;
+       case USB_SPEED_HIGH:
+               tp->coalesce = COALESCE_HIGH;
+               break;
+       default:
+               tp->coalesce = COALESCE_SLOW;
+               break;
+       }
+
        intf->needs_remote_wakeup = 1;
 
        tp->rtl_ops.init(tp);
index 4cca36ebc4fb194a22af440f11047ecc0ce7fd1e..c8186ffda1a314f08cb71b6688c5b120e57c1717 100644 (file)
@@ -263,6 +263,20 @@ static void veth_poll_controller(struct net_device *dev)
 }
 #endif /* CONFIG_NET_POLL_CONTROLLER */
 
+static int veth_get_iflink(const struct net_device *dev)
+{
+       struct veth_priv *priv = netdev_priv(dev);
+       struct net_device *peer;
+       int iflink;
+
+       rcu_read_lock();
+       peer = rcu_dereference(priv->peer);
+       iflink = peer ? peer->ifindex : 0;
+       rcu_read_unlock();
+
+       return iflink;
+}
+
 static const struct net_device_ops veth_netdev_ops = {
        .ndo_init            = veth_dev_init,
        .ndo_open            = veth_open,
@@ -275,6 +289,7 @@ static const struct net_device_ops veth_netdev_ops = {
 #ifdef CONFIG_NET_POLL_CONTROLLER
        .ndo_poll_controller    = veth_poll_controller,
 #endif
+       .ndo_get_iflink         = veth_get_iflink,
 };
 
 #define VETH_FEATURES (NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_ALL_TSO |    \
index 59b0e9754ae39cbc38812d407688f66f5e79b539..63c7810e1545a357eda7578af862ed18322de933 100644 (file)
@@ -749,9 +749,9 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
 {
        struct receive_queue *rq =
                container_of(napi, struct receive_queue, napi);
-       unsigned int r, received = 0;
+       unsigned int r, received;
 
-       received += virtnet_receive(rq, budget - received);
+       received = virtnet_receive(rq, budget);
 
        /* Out of packets? */
        if (received < budget) {
@@ -939,8 +939,16 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
        skb_orphan(skb);
        nf_reset(skb);
 
-       /* Apparently nice girls don't return TX_BUSY; stop the queue
-        * before it gets out of hand.  Naturally, this wastes entries. */
+       /* If running out of space, stop queue to avoid getting packets that we
+        * are then unable to transmit.
+        * An alternative would be to force queuing layer to requeue the skb by
+        * returning NETDEV_TX_BUSY. However, NETDEV_TX_BUSY should not be
+        * returned in a normal path of operation: it means that driver is not
+        * maintaining the TX queue stop/start state properly, and causes
+        * the stack to do a non-trivial amount of useless work.
+        * Since most packets only take 1 or 2 ring slots, stopping the queue
+        * early means 16 slots are typically wasted.
+        */
        if (sq->vq->num_free < 2+MAX_SKB_FRAGS) {
                netif_stop_subqueue(dev, qnum);
                if (unlikely(!virtqueue_enable_cb_delayed(sq->vq))) {
index 294214c152927a30564bf83d9204f3351b64f237..61c0840c448c05e3e7fc340e4b1d19370db3363c 100644 (file)
@@ -819,6 +819,7 @@ vmxnet3_parse_and_copy_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
                           struct vmxnet3_adapter *adapter)
 {
        struct Vmxnet3_TxDataDesc *tdd;
+       u8 protocol = 0;
 
        if (ctx->mss) { /* TSO */
                ctx->eth_ip_hdr_size = skb_transport_offset(skb);
@@ -831,16 +832,25 @@ vmxnet3_parse_and_copy_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
                        if (ctx->ipv4) {
                                const struct iphdr *iph = ip_hdr(skb);
 
-                               if (iph->protocol == IPPROTO_TCP)
-                                       ctx->l4_hdr_size = tcp_hdrlen(skb);
-                               else if (iph->protocol == IPPROTO_UDP)
-                                       ctx->l4_hdr_size = sizeof(struct udphdr);
-                               else
-                                       ctx->l4_hdr_size = 0;
-                       } else {
-                               /* for simplicity, don't copy L4 headers */
+                               protocol = iph->protocol;
+                       } else if (ctx->ipv6) {
+                               const struct ipv6hdr *ipv6h = ipv6_hdr(skb);
+
+                               protocol = ipv6h->nexthdr;
+                       }
+
+                       switch (protocol) {
+                       case IPPROTO_TCP:
+                               ctx->l4_hdr_size = tcp_hdrlen(skb);
+                               break;
+                       case IPPROTO_UDP:
+                               ctx->l4_hdr_size = sizeof(struct udphdr);
+                               break;
+                       default:
                                ctx->l4_hdr_size = 0;
+                               break;
                        }
+
                        ctx->copy_size = min(ctx->eth_ip_hdr_size +
                                         ctx->l4_hdr_size, skb->len);
                } else {
@@ -887,7 +897,7 @@ vmxnet3_prepare_tso(struct sk_buff *skb,
                iph->check = 0;
                tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0,
                                                 IPPROTO_TCP, 0);
-       } else {
+       } else if (ctx->ipv6) {
                struct ipv6hdr *iph = ipv6_hdr(skb);
 
                tcph->check = ~csum_ipv6_magic(&iph->saddr, &iph->daddr, 0,
@@ -938,6 +948,7 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
        count = txd_estimate(skb);
 
        ctx.ipv4 = (vlan_get_protocol(skb) == cpu_to_be16(ETH_P_IP));
+       ctx.ipv6 = (vlan_get_protocol(skb) == cpu_to_be16(ETH_P_IPV6));
 
        ctx.mss = skb_shinfo(skb)->gso_size;
        if (ctx.mss) {
index 4c8a944d58b41f0e42c1d9e34acb83a275c34b1d..c1d0e7a9da04c36c58c43b6c8c432a1d8a8720f1 100644 (file)
@@ -104,7 +104,7 @@ vmxnet3_rq_driver_stats[] = {
                                          rx_buf_alloc_failure) },
 };
 
-/* gloabl stats maintained by the driver */
+/* global stats maintained by the driver */
 static const struct vmxnet3_stat_desc
 vmxnet3_global_stats[] = {
        /* description,         offset */
@@ -272,7 +272,7 @@ int vmxnet3_set_features(struct net_device *netdev, netdev_features_t features)
                        adapter->shared->devRead.misc.uptFeatures &=
                        ~UPT1_F_RXCSUM;
 
-               /* update harware LRO capability accordingly */
+               /* update hardware LRO capability accordingly */
                if (features & NETIF_F_LRO)
                        adapter->shared->devRead.misc.uptFeatures |=
                                                        UPT1_F_LRO;
index cd71c77f78f2f5aeaa7165cbe83e71c0e25587c3..6bb769ae7de94ca5b0bbe0a83b0fcb83223f5081 100644 (file)
 /*
  * Version numbers
  */
-#define VMXNET3_DRIVER_VERSION_STRING   "1.3.4.0-k"
+#define VMXNET3_DRIVER_VERSION_STRING   "1.3.5.0-k"
 
 /* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */
-#define VMXNET3_DRIVER_VERSION_NUM      0x01030400
+#define VMXNET3_DRIVER_VERSION_NUM      0x01030500
 
 #if defined(CONFIG_PCI_MSI)
        /* RSS only makes sense if MSI-X is supported. */
@@ -211,6 +211,7 @@ struct vmxnet3_tq_driver_stats {
 
 struct vmxnet3_tx_ctx {
        bool   ipv4;
+       bool   ipv6;
        u16 mss;
        u32 eth_ip_hdr_size; /* only valid for pkts requesting tso or csum
                                 * offloading
index f8528a4cf54f2b0bc78b78bd705a82577530bdeb..b5fecb49a0c6aef28e4cf886abae837ca5e52b40 100644 (file)
@@ -127,10 +127,6 @@ struct vxlan_dev {
        __u8              ttl;
        u32               flags;        /* VXLAN_F_* in vxlan.h */
 
-       struct work_struct sock_work;
-       struct work_struct igmp_join;
-       struct work_struct igmp_leave;
-
        unsigned long     age_interval;
        struct timer_list age_timer;
        spinlock_t        hash_lock;
@@ -144,58 +140,56 @@ struct vxlan_dev {
 static u32 vxlan_salt __read_mostly;
 static struct workqueue_struct *vxlan_wq;
 
-static void vxlan_sock_work(struct work_struct *work);
-
 #if IS_ENABLED(CONFIG_IPV6)
 static inline
 bool vxlan_addr_equal(const union vxlan_addr *a, const union vxlan_addr *b)
 {
-       if (a->sa.sa_family != b->sa.sa_family)
-               return false;
-       if (a->sa.sa_family == AF_INET6)
-               return ipv6_addr_equal(&a->sin6.sin6_addr, &b->sin6.sin6_addr);
-       else
-               return a->sin.sin_addr.s_addr == b->sin.sin_addr.s_addr;
+       if (a->sa.sa_family != b->sa.sa_family)
+               return false;
+       if (a->sa.sa_family == AF_INET6)
+               return ipv6_addr_equal(&a->sin6.sin6_addr, &b->sin6.sin6_addr);
+       else
+               return a->sin.sin_addr.s_addr == b->sin.sin_addr.s_addr;
 }
 
 static inline bool vxlan_addr_any(const union vxlan_addr *ipa)
 {
-       if (ipa->sa.sa_family == AF_INET6)
-               return ipv6_addr_any(&ipa->sin6.sin6_addr);
-       else
-               return ipa->sin.sin_addr.s_addr == htonl(INADDR_ANY);
+       if (ipa->sa.sa_family == AF_INET6)
+               return ipv6_addr_any(&ipa->sin6.sin6_addr);
+       else
+               return ipa->sin.sin_addr.s_addr == htonl(INADDR_ANY);
 }
 
 static inline bool vxlan_addr_multicast(const union vxlan_addr *ipa)
 {
-       if (ipa->sa.sa_family == AF_INET6)
-               return ipv6_addr_is_multicast(&ipa->sin6.sin6_addr);
-       else
-               return IN_MULTICAST(ntohl(ipa->sin.sin_addr.s_addr));
+       if (ipa->sa.sa_family == AF_INET6)
+               return ipv6_addr_is_multicast(&ipa->sin6.sin6_addr);
+       else
+               return IN_MULTICAST(ntohl(ipa->sin.sin_addr.s_addr));
 }
 
 static int vxlan_nla_get_addr(union vxlan_addr *ip, struct nlattr *nla)
 {
-       if (nla_len(nla) >= sizeof(struct in6_addr)) {
-               nla_memcpy(&ip->sin6.sin6_addr, nla, sizeof(struct in6_addr));
-               ip->sa.sa_family = AF_INET6;
-               return 0;
-       } else if (nla_len(nla) >= sizeof(__be32)) {
-               ip->sin.sin_addr.s_addr = nla_get_be32(nla);
-               ip->sa.sa_family = AF_INET;
-               return 0;
-       } else {
-               return -EAFNOSUPPORT;
-       }
+       if (nla_len(nla) >= sizeof(struct in6_addr)) {
+               ip->sin6.sin6_addr = nla_get_in6_addr(nla);
+               ip->sa.sa_family = AF_INET6;
+               return 0;
+       } else if (nla_len(nla) >= sizeof(__be32)) {
+               ip->sin.sin_addr.s_addr = nla_get_in_addr(nla);
+               ip->sa.sa_family = AF_INET;
+               return 0;
+       } else {
+               return -EAFNOSUPPORT;
+       }
 }
 
 static int vxlan_nla_put_addr(struct sk_buff *skb, int attr,
-                             const union vxlan_addr *ip)
+                             const union vxlan_addr *ip)
 {
-       if (ip->sa.sa_family == AF_INET6)
-               return nla_put(skb, attr, sizeof(struct in6_addr), &ip->sin6.sin6_addr);
-       else
-               return nla_put_be32(skb, attr, ip->sin.sin_addr.s_addr);
+       if (ip->sa.sa_family == AF_INET6)
+               return nla_put_in6_addr(skb, attr, &ip->sin6.sin6_addr);
+       else
+               return nla_put_in_addr(skb, attr, ip->sin.sin_addr.s_addr);
 }
 
 #else /* !CONFIG_IPV6 */
@@ -203,36 +197,36 @@ static int vxlan_nla_put_addr(struct sk_buff *skb, int attr,
 static inline
 bool vxlan_addr_equal(const union vxlan_addr *a, const union vxlan_addr *b)
 {
-       return a->sin.sin_addr.s_addr == b->sin.sin_addr.s_addr;
+       return a->sin.sin_addr.s_addr == b->sin.sin_addr.s_addr;
 }
 
 static inline bool vxlan_addr_any(const union vxlan_addr *ipa)
 {
-       return ipa->sin.sin_addr.s_addr == htonl(INADDR_ANY);
+       return ipa->sin.sin_addr.s_addr == htonl(INADDR_ANY);
 }
 
 static inline bool vxlan_addr_multicast(const union vxlan_addr *ipa)
 {
-       return IN_MULTICAST(ntohl(ipa->sin.sin_addr.s_addr));
+       return IN_MULTICAST(ntohl(ipa->sin.sin_addr.s_addr));
 }
 
 static int vxlan_nla_get_addr(union vxlan_addr *ip, struct nlattr *nla)
 {
-       if (nla_len(nla) >= sizeof(struct in6_addr)) {
-               return -EAFNOSUPPORT;
-       } else if (nla_len(nla) >= sizeof(__be32)) {
-               ip->sin.sin_addr.s_addr = nla_get_be32(nla);
-               ip->sa.sa_family = AF_INET;
-               return 0;
-       } else {
-               return -EAFNOSUPPORT;
-       }
+       if (nla_len(nla) >= sizeof(struct in6_addr)) {
+               return -EAFNOSUPPORT;
+       } else if (nla_len(nla) >= sizeof(__be32)) {
+               ip->sin.sin_addr.s_addr = nla_get_in_addr(nla);
+               ip->sa.sa_family = AF_INET;
+               return 0;
+       } else {
+               return -EAFNOSUPPORT;
+       }
 }
 
 static int vxlan_nla_put_addr(struct sk_buff *skb, int attr,
-                             const union vxlan_addr *ip)
+                             const union vxlan_addr *ip)
 {
-       return nla_put_be32(skb, attr, ip->sin.sin_addr.s_addr);
+       return nla_put_in_addr(skb, attr, ip->sin.sin_addr.s_addr);
 }
 #endif
 
@@ -995,7 +989,7 @@ out:
 
 /* Watch incoming packets to learn mapping between Ethernet address
  * and Tunnel endpoint.
- * Return true if packet is bogus and should be droppped.
+ * Return true if packet is bogus and should be dropped.
  */
 static bool vxlan_snoop(struct net_device *dev,
                        union vxlan_addr *src_ip, const u8 *src_mac)
@@ -1072,11 +1066,6 @@ static bool vxlan_group_used(struct vxlan_net *vn, struct vxlan_dev *dev)
        return false;
 }
 
-static void vxlan_sock_hold(struct vxlan_sock *vs)
-{
-       atomic_inc(&vs->refcnt);
-}
-
 void vxlan_sock_release(struct vxlan_sock *vs)
 {
        struct sock *sk = vs->sock->sk;
@@ -1095,17 +1084,16 @@ void vxlan_sock_release(struct vxlan_sock *vs)
 }
 EXPORT_SYMBOL_GPL(vxlan_sock_release);
 
-/* Callback to update multicast group membership when first VNI on
- * multicast asddress is brought up
- * Done as workqueue because ip_mc_join_group acquires RTNL.
+/* Update multicast group membership when first VNI on
+ * multicast address is brought up
  */
-static void vxlan_igmp_join(struct work_struct *work)
+static int vxlan_igmp_join(struct vxlan_dev *vxlan)
 {
-       struct vxlan_dev *vxlan = container_of(work, struct vxlan_dev, igmp_join);
        struct vxlan_sock *vs = vxlan->vn_sock;
        struct sock *sk = vs->sock->sk;
        union vxlan_addr *ip = &vxlan->default_dst.remote_ip;
        int ifindex = vxlan->default_dst.remote_ifindex;
+       int ret = -EINVAL;
 
        lock_sock(sk);
        if (ip->sa.sa_family == AF_INET) {
@@ -1114,27 +1102,26 @@ static void vxlan_igmp_join(struct work_struct *work)
                        .imr_ifindex            = ifindex,
                };
 
-               ip_mc_join_group(sk, &mreq);
+               ret = ip_mc_join_group(sk, &mreq);
 #if IS_ENABLED(CONFIG_IPV6)
        } else {
-               ipv6_stub->ipv6_sock_mc_join(sk, ifindex,
-                                            &ip->sin6.sin6_addr);
+               ret = ipv6_stub->ipv6_sock_mc_join(sk, ifindex,
+                                                  &ip->sin6.sin6_addr);
 #endif
        }
        release_sock(sk);
 
-       vxlan_sock_release(vs);
-       dev_put(vxlan->dev);
+       return ret;
 }
 
 /* Inverse of vxlan_igmp_join when last VNI is brought down */
-static void vxlan_igmp_leave(struct work_struct *work)
+static int vxlan_igmp_leave(struct vxlan_dev *vxlan)
 {
-       struct vxlan_dev *vxlan = container_of(work, struct vxlan_dev, igmp_leave);
        struct vxlan_sock *vs = vxlan->vn_sock;
        struct sock *sk = vs->sock->sk;
        union vxlan_addr *ip = &vxlan->default_dst.remote_ip;
        int ifindex = vxlan->default_dst.remote_ifindex;
+       int ret = -EINVAL;
 
        lock_sock(sk);
        if (ip->sa.sa_family == AF_INET) {
@@ -1143,18 +1130,16 @@ static void vxlan_igmp_leave(struct work_struct *work)
                        .imr_ifindex            = ifindex,
                };
 
-               ip_mc_leave_group(sk, &mreq);
+               ret = ip_mc_leave_group(sk, &mreq);
 #if IS_ENABLED(CONFIG_IPV6)
        } else {
-               ipv6_stub->ipv6_sock_mc_drop(sk, ifindex,
-                                            &ip->sin6.sin6_addr);
+               ret = ipv6_stub->ipv6_sock_mc_drop(sk, ifindex,
+                                                  &ip->sin6.sin6_addr);
 #endif
        }
-
        release_sock(sk);
 
-       vxlan_sock_release(vs);
-       dev_put(vxlan->dev);
+       return ret;
 }
 
 static struct vxlanhdr *vxlan_remcsum(struct sk_buff *skb, struct vxlanhdr *vh,
@@ -1244,7 +1229,7 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
                 * this as a malformed packet. This behavior diverges from
                 * VXLAN RFC (RFC7348) which stipulates that bits in reserved
                 * in reserved fields are to be ignored. The approach here
-                * maintains compatbility with previous stack code, and also
+                * maintains compatibility with previous stack code, and also
                 * is more robust and provides a little more security in
                 * adding extensions to VXLAN.
                 */
@@ -2175,37 +2160,22 @@ static void vxlan_cleanup(unsigned long arg)
 
 static void vxlan_vs_add_dev(struct vxlan_sock *vs, struct vxlan_dev *vxlan)
 {
+       struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
        __u32 vni = vxlan->default_dst.remote_vni;
 
        vxlan->vn_sock = vs;
+       spin_lock(&vn->sock_lock);
        hlist_add_head_rcu(&vxlan->hlist, vni_head(vs, vni));
+       spin_unlock(&vn->sock_lock);
 }
 
 /* Setup stats when device is created */
 static int vxlan_init(struct net_device *dev)
 {
-       struct vxlan_dev *vxlan = netdev_priv(dev);
-       struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
-       struct vxlan_sock *vs;
-       bool ipv6 = vxlan->flags & VXLAN_F_IPV6;
-
        dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
        if (!dev->tstats)
                return -ENOMEM;
 
-       spin_lock(&vn->sock_lock);
-       vs = vxlan_find_sock(vxlan->net, ipv6 ? AF_INET6 : AF_INET,
-                            vxlan->dst_port, vxlan->flags);
-       if (vs && atomic_add_unless(&vs->refcnt, 1, 0)) {
-               /* If we have a socket with same port already, reuse it */
-               vxlan_vs_add_dev(vs, vxlan);
-       } else {
-               /* otherwise make new socket outside of RTNL */
-               dev_hold(dev);
-               queue_work(vxlan_wq, &vxlan->sock_work);
-       }
-       spin_unlock(&vn->sock_lock);
-
        return 0;
 }
 
@@ -2223,12 +2193,9 @@ static void vxlan_fdb_delete_default(struct vxlan_dev *vxlan)
 static void vxlan_uninit(struct net_device *dev)
 {
        struct vxlan_dev *vxlan = netdev_priv(dev);
-       struct vxlan_sock *vs = vxlan->vn_sock;
 
        vxlan_fdb_delete_default(vxlan);
 
-       if (vs)
-               vxlan_sock_release(vs);
        free_percpu(dev->tstats);
 }
 
@@ -2236,22 +2203,28 @@ static void vxlan_uninit(struct net_device *dev)
 static int vxlan_open(struct net_device *dev)
 {
        struct vxlan_dev *vxlan = netdev_priv(dev);
-       struct vxlan_sock *vs = vxlan->vn_sock;
+       struct vxlan_sock *vs;
+       int ret = 0;
 
-       /* socket hasn't been created */
-       if (!vs)
-               return -ENOTCONN;
+       vs = vxlan_sock_add(vxlan->net, vxlan->dst_port, vxlan_rcv, NULL,
+                           false, vxlan->flags);
+       if (IS_ERR(vs))
+               return PTR_ERR(vs);
+
+       vxlan_vs_add_dev(vs, vxlan);
 
        if (vxlan_addr_multicast(&vxlan->default_dst.remote_ip)) {
-               vxlan_sock_hold(vs);
-               dev_hold(dev);
-               queue_work(vxlan_wq, &vxlan->igmp_join);
+               ret = vxlan_igmp_join(vxlan);
+               if (ret) {
+                       vxlan_sock_release(vs);
+                       return ret;
+               }
        }
 
        if (vxlan->age_interval)
                mod_timer(&vxlan->age_timer, jiffies + FDB_AGE_INTERVAL);
 
-       return 0;
+       return ret;
 }
 
 /* Purge the forwarding table */
@@ -2279,19 +2252,21 @@ static int vxlan_stop(struct net_device *dev)
        struct vxlan_dev *vxlan = netdev_priv(dev);
        struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
        struct vxlan_sock *vs = vxlan->vn_sock;
+       int ret = 0;
 
-       if (vs && vxlan_addr_multicast(&vxlan->default_dst.remote_ip) &&
+       if (vxlan_addr_multicast(&vxlan->default_dst.remote_ip) &&
            !vxlan_group_used(vn, vxlan)) {
-               vxlan_sock_hold(vs);
-               dev_hold(dev);
-               queue_work(vxlan_wq, &vxlan->igmp_leave);
+               ret = vxlan_igmp_leave(vxlan);
+               if (ret)
+                       return ret;
        }
 
        del_timer_sync(&vxlan->age_timer);
 
        vxlan_flush(vxlan);
+       vxlan_sock_release(vs);
 
-       return 0;
+       return ret;
 }
 
 /* Stub, nothing needs to be done. */
@@ -2402,9 +2377,6 @@ static void vxlan_setup(struct net_device *dev)
 
        INIT_LIST_HEAD(&vxlan->next);
        spin_lock_init(&vxlan->hash_lock);
-       INIT_WORK(&vxlan->igmp_join, vxlan_igmp_join);
-       INIT_WORK(&vxlan->igmp_leave, vxlan_igmp_leave);
-       INIT_WORK(&vxlan->sock_work, vxlan_sock_work);
 
        init_timer_deferrable(&vxlan->age_timer);
        vxlan->age_timer.function = vxlan_cleanup;
@@ -2516,7 +2488,6 @@ static struct socket *vxlan_create_sock(struct net *net, bool ipv6,
                    !(flags & VXLAN_F_UDP_ZERO_CSUM6_RX);
        } else {
                udp_conf.family = AF_INET;
-               udp_conf.local_ip.s_addr = INADDR_ANY;
        }
 
        udp_conf.local_udp_port = port;
@@ -2552,6 +2523,8 @@ static struct vxlan_sock *vxlan_socket_create(struct net *net, __be16 port,
 
        sock = vxlan_create_sock(net, ipv6, port, flags);
        if (IS_ERR(sock)) {
+               pr_info("Cannot bind port %d, err=%ld\n", ntohs(port),
+                       PTR_ERR(sock));
                kfree(vs);
                return ERR_CAST(sock);
        }
@@ -2591,45 +2564,23 @@ struct vxlan_sock *vxlan_sock_add(struct net *net, __be16 port,
        struct vxlan_sock *vs;
        bool ipv6 = flags & VXLAN_F_IPV6;
 
-       vs = vxlan_socket_create(net, port, rcv, data, flags);
-       if (!IS_ERR(vs))
-               return vs;
-
-       if (no_share)   /* Return error if sharing is not allowed. */
-               return vs;
-
-       spin_lock(&vn->sock_lock);
-       vs = vxlan_find_sock(net, ipv6 ? AF_INET6 : AF_INET, port, flags);
-       if (vs && ((vs->rcv != rcv) ||
-                  !atomic_add_unless(&vs->refcnt, 1, 0)))
-                       vs = ERR_PTR(-EBUSY);
-       spin_unlock(&vn->sock_lock);
-
-       if (!vs)
-               vs = ERR_PTR(-EINVAL);
+       if (!no_share) {
+               spin_lock(&vn->sock_lock);
+               vs = vxlan_find_sock(net, ipv6 ? AF_INET6 : AF_INET, port,
+                                    flags);
+               if (vs && vs->rcv == rcv) {
+                       if (!atomic_add_unless(&vs->refcnt, 1, 0))
+                               vs = ERR_PTR(-EBUSY);
+                       spin_unlock(&vn->sock_lock);
+                       return vs;
+               }
+               spin_unlock(&vn->sock_lock);
+       }
 
-       return vs;
+       return vxlan_socket_create(net, port, rcv, data, flags);
 }
 EXPORT_SYMBOL_GPL(vxlan_sock_add);
 
-/* Scheduled at device creation to bind to a socket */
-static void vxlan_sock_work(struct work_struct *work)
-{
-       struct vxlan_dev *vxlan = container_of(work, struct vxlan_dev, sock_work);
-       struct net *net = vxlan->net;
-       struct vxlan_net *vn = net_generic(net, vxlan_net_id);
-       __be16 port = vxlan->dst_port;
-       struct vxlan_sock *nvs;
-
-       nvs = vxlan_sock_add(net, port, vxlan_rcv, NULL, false, vxlan->flags);
-       spin_lock(&vn->sock_lock);
-       if (!IS_ERR(nvs))
-               vxlan_vs_add_dev(nvs, vxlan);
-       spin_unlock(&vn->sock_lock);
-
-       dev_put(vxlan->dev);
-}
-
 static int vxlan_newlink(struct net *src_net, struct net_device *dev,
                         struct nlattr *tb[], struct nlattr *data[])
 {
@@ -2651,27 +2602,25 @@ static int vxlan_newlink(struct net *src_net, struct net_device *dev,
        /* Unless IPv6 is explicitly requested, assume IPv4 */
        dst->remote_ip.sa.sa_family = AF_INET;
        if (data[IFLA_VXLAN_GROUP]) {
-               dst->remote_ip.sin.sin_addr.s_addr = nla_get_be32(data[IFLA_VXLAN_GROUP]);
+               dst->remote_ip.sin.sin_addr.s_addr = nla_get_in_addr(data[IFLA_VXLAN_GROUP]);
        } else if (data[IFLA_VXLAN_GROUP6]) {
                if (!IS_ENABLED(CONFIG_IPV6))
                        return -EPFNOSUPPORT;
 
-               nla_memcpy(&dst->remote_ip.sin6.sin6_addr, data[IFLA_VXLAN_GROUP6],
-                          sizeof(struct in6_addr));
+               dst->remote_ip.sin6.sin6_addr = nla_get_in6_addr(data[IFLA_VXLAN_GROUP6]);
                dst->remote_ip.sa.sa_family = AF_INET6;
                use_ipv6 = true;
        }
 
        if (data[IFLA_VXLAN_LOCAL]) {
-               vxlan->saddr.sin.sin_addr.s_addr = nla_get_be32(data[IFLA_VXLAN_LOCAL]);
+               vxlan->saddr.sin.sin_addr.s_addr = nla_get_in_addr(data[IFLA_VXLAN_LOCAL]);
                vxlan->saddr.sa.sa_family = AF_INET;
        } else if (data[IFLA_VXLAN_LOCAL6]) {
                if (!IS_ENABLED(CONFIG_IPV6))
                        return -EPFNOSUPPORT;
 
                /* TODO: respect scope id */
-               nla_memcpy(&vxlan->saddr.sin6.sin6_addr, data[IFLA_VXLAN_LOCAL6],
-                          sizeof(struct in6_addr));
+               vxlan->saddr.sin6.sin6_addr = nla_get_in6_addr(data[IFLA_VXLAN_LOCAL6]);
                vxlan->saddr.sa.sa_family = AF_INET6;
                use_ipv6 = true;
        }
@@ -2856,13 +2805,13 @@ static int vxlan_fill_info(struct sk_buff *skb, const struct net_device *dev)
 
        if (!vxlan_addr_any(&dst->remote_ip)) {
                if (dst->remote_ip.sa.sa_family == AF_INET) {
-                       if (nla_put_be32(skb, IFLA_VXLAN_GROUP,
-                                        dst->remote_ip.sin.sin_addr.s_addr))
+                       if (nla_put_in_addr(skb, IFLA_VXLAN_GROUP,
+                                           dst->remote_ip.sin.sin_addr.s_addr))
                                goto nla_put_failure;
 #if IS_ENABLED(CONFIG_IPV6)
                } else {
-                       if (nla_put(skb, IFLA_VXLAN_GROUP6, sizeof(struct in6_addr),
-                                   &dst->remote_ip.sin6.sin6_addr))
+                       if (nla_put_in6_addr(skb, IFLA_VXLAN_GROUP6,
+                                            &dst->remote_ip.sin6.sin6_addr))
                                goto nla_put_failure;
 #endif
                }
@@ -2873,13 +2822,13 @@ static int vxlan_fill_info(struct sk_buff *skb, const struct net_device *dev)
 
        if (!vxlan_addr_any(&vxlan->saddr)) {
                if (vxlan->saddr.sa.sa_family == AF_INET) {
-                       if (nla_put_be32(skb, IFLA_VXLAN_LOCAL,
-                                        vxlan->saddr.sin.sin_addr.s_addr))
+                       if (nla_put_in_addr(skb, IFLA_VXLAN_LOCAL,
+                                           vxlan->saddr.sin.sin_addr.s_addr))
                                goto nla_put_failure;
 #if IS_ENABLED(CONFIG_IPV6)
                } else {
-                       if (nla_put(skb, IFLA_VXLAN_LOCAL6, sizeof(struct in6_addr),
-                                   &vxlan->saddr.sin6.sin6_addr))
+                       if (nla_put_in6_addr(skb, IFLA_VXLAN_LOCAL6,
+                                            &vxlan->saddr.sin6.sin6_addr))
                                goto nla_put_failure;
 #endif
                }
index e71a2ce7a4487a5386331e26023e5538512f14bd..627443283e1d7ab9022cda91c4acd07deff6f84f 100644 (file)
@@ -2676,7 +2676,7 @@ static void wifi_setup(struct net_device *dev)
        dev->addr_len           = ETH_ALEN;
        dev->tx_queue_len       = 100; 
 
-       memset(dev->broadcast,0xFF, ETH_ALEN);
+       eth_broadcast_addr(dev->broadcast);
 
        dev->flags              = IFF_BROADCAST|IFF_MULTICAST;
 }
@@ -3273,7 +3273,7 @@ static void airo_handle_link(struct airo_info *ai)
                }
 
                /* Send event to user space */
-               memset(wrqu.ap_addr.sa_data, '\0', ETH_ALEN);
+               eth_zero_addr(wrqu.ap_addr.sa_data);
                wrqu.ap_addr.sa_family = ARPHRD_ETHER;
                wireless_send_event(ai->dev, SIOCGIWAP, &wrqu, NULL);
        }
index da92bfa76b7cf1d37e9ea819edf00c59d84e350b..49219c5089639574a471be4befa951da71e620c3 100644 (file)
@@ -1166,7 +1166,7 @@ static int at76_start_monitor(struct at76_priv *priv)
        int ret;
 
        memset(&scan, 0, sizeof(struct at76_req_scan));
-       memset(scan.bssid, 0xff, ETH_ALEN);
+       eth_broadcast_addr(scan.bssid);
 
        scan.channel = priv->channel;
        scan.scan_type = SCAN_TYPE_PASSIVE;
@@ -1427,7 +1427,7 @@ static int at76_startup_device(struct at76_priv *priv)
        at76_wait_completion(priv, CMD_STARTUP);
 
        /* remove BSSID from previous run */
-       memset(priv->bssid, 0, ETH_ALEN);
+       eth_zero_addr(priv->bssid);
 
        priv->scanning = false;
 
@@ -1973,7 +1973,7 @@ static int at76_hw_scan(struct ieee80211_hw *hw,
        ieee80211_stop_queues(hw);
 
        memset(&scan, 0, sizeof(struct at76_req_scan));
-       memset(scan.bssid, 0xFF, ETH_ALEN);
+       eth_broadcast_addr(scan.bssid);
 
        if (req->n_ssids) {
                scan.scan_type = SCAN_TYPE_ACTIVE;
index f92050617ae682e02bb48b6676a16298ae2dfa4f..5147ebe4cd05d13d12db78aacbd3081fe83a9727 100644 (file)
@@ -779,8 +779,6 @@ static void ar5523_tx(struct ieee80211_hw *hw,
                ieee80211_stop_queues(hw);
        }
 
-       data->skb = skb;
-
        spin_lock_irqsave(&ar->tx_data_list_lock, flags);
        list_add_tail(&data->list, &ar->tx_queue_pending);
        spin_unlock_irqrestore(&ar->tx_data_list_lock, flags);
@@ -817,10 +815,13 @@ static void ar5523_tx_work_locked(struct ar5523 *ar)
                if (!data)
                        break;
 
-               skb = data->skb;
+               txi = container_of((void *)data, struct ieee80211_tx_info,
+                                  driver_data);
                txqid = 0;
-               txi = IEEE80211_SKB_CB(skb);
+
+               skb = container_of((void *)txi, struct sk_buff, cb);
                paylen = skb->len;
+
                urb = usb_alloc_urb(0, GFP_KERNEL);
                if (!urb) {
                        ar5523_err(ar, "Failed to allocate TX urb\n");
index 00c6fd346d48225e92a865ff8938a1c9f921e6aa..9a322a65cdb548180a75940cb3997989226768e4 100644 (file)
@@ -74,7 +74,6 @@ struct ar5523_tx_cmd {
 struct ar5523_tx_data {
        struct list_head        list;
        struct ar5523           *ar;
-       struct sk_buff          *skb;
        struct urb              *urb;
 };
 
index 1eebe2ea3dfb0b3d67ff387d2568c5aa434cdc7c..7e9481099a8e6ba9dd06e13a6f60559993956516 100644 (file)
@@ -131,6 +131,9 @@ struct ath_ops {
        void (*enable_write_buffer)(void *);
        void (*write_flush) (void *);
        u32 (*rmw)(void *, u32 reg_offset, u32 set, u32 clr);
+       void (*enable_rmw_buffer)(void *);
+       void (*rmw_flush) (void *);
+
 };
 
 struct ath_common;
index c18647b87f71dd6879a4d3ea39b22ad653d5a449..0eddb204d85bb9b08dcb84f55530b25fb3b901e8 100644 (file)
@@ -39,7 +39,7 @@ struct ath10k_ce_pipe;
 #define CE_DESC_FLAGS_GATHER         (1 << 0)
 #define CE_DESC_FLAGS_BYTE_SWAP      (1 << 1)
 #define CE_DESC_FLAGS_META_DATA_MASK 0xFFFC
-#define CE_DESC_FLAGS_META_DATA_LSB  3
+#define CE_DESC_FLAGS_META_DATA_LSB  2
 
 struct ce_desc {
        __le32 addr;
index 310e12bc078a6e47dd9f52637db92c1efed6d227..c0e454bb6a8df646b9266afbffa76ed179dff0fc 100644 (file)
@@ -436,16 +436,16 @@ static int ath10k_download_fw(struct ath10k *ar, enum ath10k_firmware_mode mode)
 
 static void ath10k_core_free_firmware_files(struct ath10k *ar)
 {
-       if (ar->board && !IS_ERR(ar->board))
+       if (!IS_ERR(ar->board))
                release_firmware(ar->board);
 
-       if (ar->otp && !IS_ERR(ar->otp))
+       if (!IS_ERR(ar->otp))
                release_firmware(ar->otp);
 
-       if (ar->firmware && !IS_ERR(ar->firmware))
+       if (!IS_ERR(ar->firmware))
                release_firmware(ar->firmware);
 
-       if (ar->cal_file && !IS_ERR(ar->cal_file))
+       if (!IS_ERR(ar->cal_file))
                release_firmware(ar->cal_file);
 
        ar->board = NULL;
index d60e46fe6d19ccc475a173df2fada3ea735d69d4..f65310c3ba5fe8d660cd4139f93407a22045b8aa 100644 (file)
@@ -159,6 +159,25 @@ struct ath10k_fw_stats_peer {
        u32 peer_rx_rate; /* 10x only */
 };
 
+struct ath10k_fw_stats_vdev {
+       struct list_head list;
+
+       u32 vdev_id;
+       u32 beacon_snr;
+       u32 data_snr;
+       u32 num_tx_frames[4];
+       u32 num_rx_frames;
+       u32 num_tx_frames_retries[4];
+       u32 num_tx_frames_failures[4];
+       u32 num_rts_fail;
+       u32 num_rts_success;
+       u32 num_rx_err;
+       u32 num_rx_discard;
+       u32 num_tx_not_acked;
+       u32 tx_rate_history[10];
+       u32 beacon_rssi_history[10];
+};
+
 struct ath10k_fw_stats_pdev {
        struct list_head list;
 
@@ -220,6 +239,7 @@ struct ath10k_fw_stats_pdev {
 
 struct ath10k_fw_stats {
        struct list_head pdevs;
+       struct list_head vdevs;
        struct list_head peers;
 };
 
@@ -288,6 +308,7 @@ struct ath10k_vif {
        bool is_started;
        bool is_up;
        bool spectral_enabled;
+       bool ps;
        u32 aid;
        u8 bssid[ETH_ALEN];
 
@@ -413,6 +434,12 @@ enum ath10k_fw_features {
         */
        ATH10K_FW_FEATURE_WMI_10_2 = 4,
 
+       /* Some firmware revisions lack proper multi-interface client powersave
+        * implementation. Enabling PS could result in connection drops,
+        * traffic stalls, etc.
+        */
+       ATH10K_FW_FEATURE_MULTI_VIF_PS_SUPPORT = 5,
+
        /* keep last */
        ATH10K_FW_FEATURE_COUNT,
 };
index d2281e5c2ffe2070039b43ca1d380860cd4f2294..301081db1ef60a9f7a68a9452d7155965520cc39 100644 (file)
@@ -243,6 +243,16 @@ static void ath10k_debug_fw_stats_pdevs_free(struct list_head *head)
        }
 }
 
+static void ath10k_debug_fw_stats_vdevs_free(struct list_head *head)
+{
+       struct ath10k_fw_stats_vdev *i, *tmp;
+
+       list_for_each_entry_safe(i, tmp, head, list) {
+               list_del(&i->list);
+               kfree(i);
+       }
+}
+
 static void ath10k_debug_fw_stats_peers_free(struct list_head *head)
 {
        struct ath10k_fw_stats_peer *i, *tmp;
@@ -258,6 +268,7 @@ static void ath10k_debug_fw_stats_reset(struct ath10k *ar)
        spin_lock_bh(&ar->data_lock);
        ar->debug.fw_stats_done = false;
        ath10k_debug_fw_stats_pdevs_free(&ar->debug.fw_stats.pdevs);
+       ath10k_debug_fw_stats_vdevs_free(&ar->debug.fw_stats.vdevs);
        ath10k_debug_fw_stats_peers_free(&ar->debug.fw_stats.peers);
        spin_unlock_bh(&ar->data_lock);
 }
@@ -273,14 +284,27 @@ static size_t ath10k_debug_fw_stats_num_peers(struct list_head *head)
        return num;
 }
 
+static size_t ath10k_debug_fw_stats_num_vdevs(struct list_head *head)
+{
+       struct ath10k_fw_stats_vdev *i;
+       size_t num = 0;
+
+       list_for_each_entry(i, head, list)
+               ++num;
+
+       return num;
+}
+
 void ath10k_debug_fw_stats_process(struct ath10k *ar, struct sk_buff *skb)
 {
        struct ath10k_fw_stats stats = {};
        bool is_start, is_started, is_end;
        size_t num_peers;
+       size_t num_vdevs;
        int ret;
 
        INIT_LIST_HEAD(&stats.pdevs);
+       INIT_LIST_HEAD(&stats.vdevs);
        INIT_LIST_HEAD(&stats.peers);
 
        spin_lock_bh(&ar->data_lock);
@@ -308,6 +332,7 @@ void ath10k_debug_fw_stats_process(struct ath10k *ar, struct sk_buff *skb)
        }
 
        num_peers = ath10k_debug_fw_stats_num_peers(&ar->debug.fw_stats.peers);
+       num_vdevs = ath10k_debug_fw_stats_num_vdevs(&ar->debug.fw_stats.vdevs);
        is_start = (list_empty(&ar->debug.fw_stats.pdevs) &&
                    !list_empty(&stats.pdevs));
        is_end = (!list_empty(&ar->debug.fw_stats.pdevs) &&
@@ -330,7 +355,13 @@ void ath10k_debug_fw_stats_process(struct ath10k *ar, struct sk_buff *skb)
                        goto free;
                }
 
+               if (num_vdevs >= BITS_PER_LONG) {
+                       ath10k_warn(ar, "dropping fw vdev stats\n");
+                       goto free;
+               }
+
                list_splice_tail_init(&stats.peers, &ar->debug.fw_stats.peers);
+               list_splice_tail_init(&stats.vdevs, &ar->debug.fw_stats.vdevs);
        }
 
        complete(&ar->debug.fw_stats_complete);
@@ -340,6 +371,7 @@ free:
         * resources if that is not the case.
         */
        ath10k_debug_fw_stats_pdevs_free(&stats.pdevs);
+       ath10k_debug_fw_stats_vdevs_free(&stats.vdevs);
        ath10k_debug_fw_stats_peers_free(&stats.peers);
 
 unlock:
@@ -363,7 +395,10 @@ static int ath10k_debug_fw_stats_request(struct ath10k *ar)
 
                reinit_completion(&ar->debug.fw_stats_complete);
 
-               ret = ath10k_wmi_request_stats(ar, WMI_REQUEST_PEER_STAT);
+               ret = ath10k_wmi_request_stats(ar,
+                                              WMI_STAT_PDEV |
+                                              WMI_STAT_VDEV |
+                                              WMI_STAT_PEER);
                if (ret) {
                        ath10k_warn(ar, "could not request stats (%d)\n", ret);
                        return ret;
@@ -395,8 +430,11 @@ static void ath10k_fw_stats_fill(struct ath10k *ar,
        unsigned int len = 0;
        unsigned int buf_len = ATH10K_FW_STATS_BUF_SIZE;
        const struct ath10k_fw_stats_pdev *pdev;
+       const struct ath10k_fw_stats_vdev *vdev;
        const struct ath10k_fw_stats_peer *peer;
        size_t num_peers;
+       size_t num_vdevs;
+       int i;
 
        spin_lock_bh(&ar->data_lock);
 
@@ -408,6 +446,7 @@ static void ath10k_fw_stats_fill(struct ath10k *ar,
        }
 
        num_peers = ath10k_debug_fw_stats_num_peers(&fw_stats->peers);
+       num_vdevs = ath10k_debug_fw_stats_num_vdevs(&fw_stats->vdevs);
 
        len += scnprintf(buf + len, buf_len - len, "\n");
        len += scnprintf(buf + len, buf_len - len, "%30s\n",
@@ -529,6 +568,65 @@ static void ath10k_fw_stats_fill(struct ath10k *ar,
        len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
                         "MPDU errors (FCS, MIC, ENC)", pdev->mpdu_errs);
 
+       len += scnprintf(buf + len, buf_len - len, "\n");
+       len += scnprintf(buf + len, buf_len - len, "%30s (%zu)\n",
+                        "ath10k VDEV stats", num_vdevs);
+       len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
+                                "=================");
+
+       list_for_each_entry(vdev, &fw_stats->vdevs, list) {
+               len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+                                "vdev id", vdev->vdev_id);
+               len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+                                "beacon snr", vdev->beacon_snr);
+               len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+                                "data snr", vdev->data_snr);
+               len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+                                "num rx frames", vdev->num_rx_frames);
+               len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+                                "num rts fail", vdev->num_rts_fail);
+               len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+                                "num rts success", vdev->num_rts_success);
+               len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+                                "num rx err", vdev->num_rx_err);
+               len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+                                "num rx discard", vdev->num_rx_discard);
+               len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+                                "num tx not acked", vdev->num_tx_not_acked);
+
+               for (i = 0 ; i < ARRAY_SIZE(vdev->num_tx_frames); i++)
+                       len += scnprintf(buf + len, buf_len - len,
+                                       "%25s [%02d] %u\n",
+                                        "num tx frames", i,
+                                        vdev->num_tx_frames[i]);
+
+               for (i = 0 ; i < ARRAY_SIZE(vdev->num_tx_frames_retries); i++)
+                       len += scnprintf(buf + len, buf_len - len,
+                                       "%25s [%02d] %u\n",
+                                        "num tx frames retries", i,
+                                        vdev->num_tx_frames_retries[i]);
+
+               for (i = 0 ; i < ARRAY_SIZE(vdev->num_tx_frames_failures); i++)
+                       len += scnprintf(buf + len, buf_len - len,
+                                       "%25s [%02d] %u\n",
+                                        "num tx frames failures", i,
+                                        vdev->num_tx_frames_failures[i]);
+
+               for (i = 0 ; i < ARRAY_SIZE(vdev->tx_rate_history); i++)
+                       len += scnprintf(buf + len, buf_len - len,
+                                       "%25s [%02d] 0x%08x\n",
+                                        "tx rate history", i,
+                                        vdev->tx_rate_history[i]);
+
+               for (i = 0 ; i < ARRAY_SIZE(vdev->beacon_rssi_history); i++)
+                       len += scnprintf(buf + len, buf_len - len,
+                                       "%25s [%02d] %u\n",
+                                        "beacon rssi history", i,
+                                        vdev->beacon_rssi_history[i]);
+
+               len += scnprintf(buf + len, buf_len - len, "\n");
+       }
+
        len += scnprintf(buf + len, buf_len - len, "\n");
        len += scnprintf(buf + len, buf_len - len, "%30s (%zu)\n",
                         "ath10k PEER stats", num_peers);
@@ -1900,6 +1998,7 @@ int ath10k_debug_create(struct ath10k *ar)
                return -ENOMEM;
 
        INIT_LIST_HEAD(&ar->debug.fw_stats.pdevs);
+       INIT_LIST_HEAD(&ar->debug.fw_stats.vdevs);
        INIT_LIST_HEAD(&ar->debug.fw_stats.peers);
 
        return 0;
index c1da44f65a4d0230bb2aac9461be7ca10a8ceb5c..01a2b384f358355ded1207323dd2f499b9e2a3c3 100644 (file)
@@ -176,7 +176,7 @@ static void ath10k_htt_rx_msdu_buff_replenish(struct ath10k_htt *htt)
         * automatically balances load wrt to CPU power.
         *
         * This probably comes at a cost of lower maximum throughput but
-        * improves the avarage and stability. */
+        * improves the average and stability. */
        spin_lock_bh(&htt->rx_ring.lock);
        num_deficit = htt->rx_ring.fill_level - htt->rx_ring.fill_cnt;
        num_to_fill = min(ATH10K_HTT_MAX_NUM_REFILL, num_deficit);
index d6d2f0f00caad18ec00ba69f5635c79f161b2ba7..6fd7189b7b01ab50db86b42dfc2b75bb8bcec0de 100644 (file)
@@ -611,7 +611,7 @@ static int ath10k_monitor_vdev_start(struct ath10k *ar, int vdev_id)
 
        ret = ath10k_vdev_setup_sync(ar);
        if (ret) {
-               ath10k_warn(ar, "failed to synchronize setup for monitor vdev %i: %d\n",
+               ath10k_warn(ar, "failed to synchronize setup for monitor vdev %i start: %d\n",
                            vdev_id, ret);
                return ret;
        }
@@ -658,7 +658,7 @@ static int ath10k_monitor_vdev_stop(struct ath10k *ar)
 
        ret = ath10k_vdev_setup_sync(ar);
        if (ret)
-               ath10k_warn(ar, "failed to synchronise monitor vdev %i: %d\n",
+               ath10k_warn(ar, "failed to synchronize monitor vdev %i stop: %d\n",
                            ar->monitor_vdev_id, ret);
 
        ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor vdev %i stopped\n",
@@ -927,8 +927,9 @@ static int ath10k_vdev_start_restart(struct ath10k_vif *arvif, bool restart)
 
        ret = ath10k_vdev_setup_sync(ar);
        if (ret) {
-               ath10k_warn(ar, "failed to synchronise setup for vdev %i: %d\n",
-                           arg.vdev_id, ret);
+               ath10k_warn(ar,
+                           "failed to synchronize setup for vdev %i restart %d: %d\n",
+                           arg.vdev_id, restart, ret);
                return ret;
        }
 
@@ -966,7 +967,7 @@ static int ath10k_vdev_stop(struct ath10k_vif *arvif)
 
        ret = ath10k_vdev_setup_sync(ar);
        if (ret) {
-               ath10k_warn(ar, "failed to syncronise setup for vdev %i: %d\n",
+               ath10k_warn(ar, "failed to synchronize setup for vdev %i stop: %d\n",
                            arvif->vdev_id, ret);
                return ret;
        }
@@ -1182,7 +1183,7 @@ static void ath10k_control_ibss(struct ath10k_vif *arvif,
                if (is_zero_ether_addr(arvif->bssid))
                        return;
 
-               memset(arvif->bssid, 0, ETH_ALEN);
+               eth_zero_addr(arvif->bssid);
 
                return;
        }
@@ -1253,6 +1254,20 @@ static int ath10k_mac_vif_recalc_ps_poll_count(struct ath10k_vif *arvif)
        return 0;
 }
 
+static int ath10k_mac_ps_vif_count(struct ath10k *ar)
+{
+       struct ath10k_vif *arvif;
+       int num = 0;
+
+       lockdep_assert_held(&ar->conf_mutex);
+
+       list_for_each_entry(arvif, &ar->arvifs, list)
+               if (arvif->ps)
+                       num++;
+
+       return num;
+}
+
 static int ath10k_mac_vif_setup_ps(struct ath10k_vif *arvif)
 {
        struct ath10k *ar = arvif->ar;
@@ -1262,13 +1277,24 @@ static int ath10k_mac_vif_setup_ps(struct ath10k_vif *arvif)
        enum wmi_sta_ps_mode psmode;
        int ret;
        int ps_timeout;
+       bool enable_ps;
 
        lockdep_assert_held(&arvif->ar->conf_mutex);
 
        if (arvif->vif->type != NL80211_IFTYPE_STATION)
                return 0;
 
-       if (vif->bss_conf.ps) {
+       enable_ps = arvif->ps;
+
+       if (enable_ps && ath10k_mac_ps_vif_count(ar) > 1 &&
+           !test_bit(ATH10K_FW_FEATURE_MULTI_VIF_PS_SUPPORT,
+                     ar->fw_features)) {
+               ath10k_warn(ar, "refusing to enable ps on vdev %i: not supported by fw\n",
+                           arvif->vdev_id);
+               enable_ps = false;
+       }
+
+       if (enable_ps) {
                psmode = WMI_STA_PS_MODE_ENABLED;
                param = WMI_STA_PS_PARAM_INACTIVITY_TIME;
 
@@ -1386,7 +1412,8 @@ static void ath10k_peer_assoc_h_crypto(struct ath10k *ar,
        lockdep_assert_held(&ar->conf_mutex);
 
        bss = cfg80211_get_bss(ar->hw->wiphy, ar->hw->conf.chandef.chan,
-                              info->bssid, NULL, 0, 0, 0);
+                              info->bssid, NULL, 0, IEEE80211_BSS_TYPE_ANY,
+                              IEEE80211_PRIVACY_ANY);
        if (bss) {
                const struct cfg80211_bss_ies *ies;
 
@@ -1781,6 +1808,68 @@ static int ath10k_setup_peer_smps(struct ath10k *ar, struct ath10k_vif *arvif,
                                         ath10k_smps_map[smps]);
 }
 
+static int ath10k_mac_vif_recalc_txbf(struct ath10k *ar,
+                                     struct ieee80211_vif *vif,
+                                     struct ieee80211_sta_vht_cap vht_cap)
+{
+       struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+       int ret;
+       u32 param;
+       u32 value;
+
+       if (!(ar->vht_cap_info &
+             (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
+              IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE |
+              IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE |
+              IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)))
+               return 0;
+
+       param = ar->wmi.vdev_param->txbf;
+       value = 0;
+
+       if (WARN_ON(param == WMI_VDEV_PARAM_UNSUPPORTED))
+               return 0;
+
+       /* The following logic is correct. If a remote STA advertises support
+        * for being a beamformer then we should enable us being a beamformee.
+        */
+
+       if (ar->vht_cap_info &
+           (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
+            IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE)) {
+               if (vht_cap.cap & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE)
+                       value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFEE;
+
+               if (vht_cap.cap & IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)
+                       value |= WMI_VDEV_PARAM_TXBF_MU_TX_BFEE;
+       }
+
+       if (ar->vht_cap_info &
+           (IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE |
+            IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)) {
+               if (vht_cap.cap & IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE)
+                       value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFER;
+
+               if (vht_cap.cap & IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE)
+                       value |= WMI_VDEV_PARAM_TXBF_MU_TX_BFER;
+       }
+
+       if (value & WMI_VDEV_PARAM_TXBF_MU_TX_BFEE)
+               value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFEE;
+
+       if (value & WMI_VDEV_PARAM_TXBF_MU_TX_BFER)
+               value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFER;
+
+       ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param, value);
+       if (ret) {
+               ath10k_warn(ar, "failed to submit vdev param txbf 0x%x: %d\n",
+                           value, ret);
+               return ret;
+       }
+
+       return 0;
+}
+
 /* can be called only in mac80211 callbacks due to `key_count` usage */
 static void ath10k_bss_assoc(struct ieee80211_hw *hw,
                             struct ieee80211_vif *vif,
@@ -1789,6 +1878,7 @@ static void ath10k_bss_assoc(struct ieee80211_hw *hw,
        struct ath10k *ar = hw->priv;
        struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
        struct ieee80211_sta_ht_cap ht_cap;
+       struct ieee80211_sta_vht_cap vht_cap;
        struct wmi_peer_assoc_complete_arg peer_arg;
        struct ieee80211_sta *ap_sta;
        int ret;
@@ -1811,6 +1901,7 @@ static void ath10k_bss_assoc(struct ieee80211_hw *hw,
        /* ap_sta must be accessed only within rcu section which must be left
         * before calling ath10k_setup_peer_smps() which might sleep. */
        ht_cap = ap_sta->ht_cap;
+       vht_cap = ap_sta->vht_cap;
 
        ret = ath10k_peer_assoc_prepare(ar, vif, ap_sta, &peer_arg);
        if (ret) {
@@ -1836,6 +1927,13 @@ static void ath10k_bss_assoc(struct ieee80211_hw *hw,
                return;
        }
 
+       ret = ath10k_mac_vif_recalc_txbf(ar, vif, vht_cap);
+       if (ret) {
+               ath10k_warn(ar, "failed to recalc txbf for vdev %i on bss %pM: %d\n",
+                           arvif->vdev_id, bss_conf->bssid, ret);
+               return;
+       }
+
        ath10k_dbg(ar, ATH10K_DBG_MAC,
                   "mac vdev %d up (associated) bssid %pM aid %d\n",
                   arvif->vdev_id, bss_conf->bssid, bss_conf->aid);
@@ -1853,6 +1951,18 @@ static void ath10k_bss_assoc(struct ieee80211_hw *hw,
        }
 
        arvif->is_up = true;
+
+       /* Workaround: Some firmware revisions (tested with qca6174
+        * WLAN.RM.2.0-00073) have buggy powersave state machine and must be
+        * poked with peer param command.
+        */
+       ret = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, arvif->bssid,
+                                       WMI_PEER_DUMMY_VAR, 1);
+       if (ret) {
+               ath10k_warn(ar, "failed to poke peer %pM param for ps workaround on vdev %i: %d\n",
+                           arvif->bssid, arvif->vdev_id, ret);
+               return;
+       }
 }
 
 static void ath10k_bss_disassoc(struct ieee80211_hw *hw,
@@ -1860,6 +1970,7 @@ static void ath10k_bss_disassoc(struct ieee80211_hw *hw,
 {
        struct ath10k *ar = hw->priv;
        struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+       struct ieee80211_sta_vht_cap vht_cap = {};
        int ret;
 
        lockdep_assert_held(&ar->conf_mutex);
@@ -1874,6 +1985,13 @@ static void ath10k_bss_disassoc(struct ieee80211_hw *hw,
 
        arvif->def_wep_key_idx = -1;
 
+       ret = ath10k_mac_vif_recalc_txbf(ar, vif, vht_cap);
+       if (ret) {
+               ath10k_warn(ar, "failed to recalc txbf for vdev %i: %d\n",
+                           arvif->vdev_id, ret);
+               return;
+       }
+
        arvif->is_up = false;
 }
 
@@ -2554,6 +2672,17 @@ static int ath10k_start_scan(struct ath10k *ar,
                return -ETIMEDOUT;
        }
 
+       /* If we failed to start the scan, return error code at
+        * this point.  This is probably due to some issue in the
+        * firmware, but no need to wedge the driver due to that...
+        */
+       spin_lock_bh(&ar->data_lock);
+       if (ar->scan.state == ATH10K_SCAN_IDLE) {
+               spin_unlock_bh(&ar->data_lock);
+               return -EINVAL;
+       }
+       spin_unlock_bh(&ar->data_lock);
+
        /* Add a 200ms margin to account for event/command processing */
        ieee80211_queue_delayed_work(ar->hw, &ar->scan.timeout,
                                     msecs_to_jiffies(arg->max_scan_time+200));
@@ -3323,9 +3452,10 @@ static void ath10k_remove_interface(struct ieee80211_hw *hw,
        list_del(&arvif->list);
 
        if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
-               ret = ath10k_peer_delete(arvif->ar, arvif->vdev_id, vif->addr);
+               ret = ath10k_wmi_peer_delete(arvif->ar, arvif->vdev_id,
+                                            vif->addr);
                if (ret)
-                       ath10k_warn(ar, "failed to remove peer for AP vdev %i: %d\n",
+                       ath10k_warn(ar, "failed to submit AP self-peer removal on vdev %i: %d\n",
                                    arvif->vdev_id, ret);
 
                kfree(arvif->u.ap.noa_data);
@@ -3339,6 +3469,21 @@ static void ath10k_remove_interface(struct ieee80211_hw *hw,
                ath10k_warn(ar, "failed to delete WMI vdev %i: %d\n",
                            arvif->vdev_id, ret);
 
+       /* Some firmware revisions don't notify host about self-peer removal
+        * until after associated vdev is deleted.
+        */
+       if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
+               ret = ath10k_wait_for_peer_deleted(ar, arvif->vdev_id,
+                                                  vif->addr);
+               if (ret)
+                       ath10k_warn(ar, "failed to remove AP self-peer on vdev %i: %d\n",
+                                   arvif->vdev_id, ret);
+
+               spin_lock_bh(&ar->data_lock);
+               ar->num_peers--;
+               spin_unlock_bh(&ar->data_lock);
+       }
+
        ath10k_peer_cleanup(ar, arvif->vdev_id);
 
        mutex_unlock(&ar->conf_mutex);
@@ -3534,7 +3679,9 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
        }
 
        if (changed & BSS_CHANGED_PS) {
-               ret = ath10k_mac_vif_setup_ps(arvif);
+               arvif->ps = vif->bss_conf.ps;
+
+               ret = ath10k_config_ps(ar);
                if (ret)
                        ath10k_warn(ar, "failed to setup ps on vdev %i: %d\n",
                                    arvif->vdev_id, ret);
index e6972b09333ebe37638a586213c46f1e31d11d56..7681237fe298a4d430ec4d8ac95fd5fad2cc9de2 100644 (file)
@@ -104,7 +104,7 @@ static const struct ce_attr host_ce_config_wlan[] = {
        {
                .flags = CE_ATTR_FLAGS,
                .src_nentries = 0,
-               .src_sz_max = 512,
+               .src_sz_max = 2048,
                .dest_nentries = 512,
        },
 
@@ -174,7 +174,7 @@ static const struct ce_pipe_config target_ce_config_wlan[] = {
                .pipenum = __cpu_to_le32(1),
                .pipedir = __cpu_to_le32(PIPEDIR_IN),
                .nentries = __cpu_to_le32(32),
-               .nbytes_max = __cpu_to_le32(512),
+               .nbytes_max = __cpu_to_le32(2048),
                .flags = __cpu_to_le32(CE_ATTR_FLAGS),
                .reserved = __cpu_to_le32(0),
        },
index 04dc4b9db04e70de19e6b2a0ce13db772225f0d3..c8b64e7a6089c2ba2f874cb5aad3007c5c04f296 100644 (file)
@@ -110,8 +110,7 @@ struct wmi_ops {
                                          bool deliver_cab);
        struct sk_buff *(*gen_pdev_set_wmm)(struct ath10k *ar,
                                            const struct wmi_wmm_params_all_arg *arg);
-       struct sk_buff *(*gen_request_stats)(struct ath10k *ar,
-                                            enum wmi_stats_id stats_id);
+       struct sk_buff *(*gen_request_stats)(struct ath10k *ar, u32 stats_mask);
        struct sk_buff *(*gen_force_fw_hang)(struct ath10k *ar,
                                             enum wmi_force_fw_hang_type type,
                                             u32 delay_ms);
@@ -816,14 +815,14 @@ ath10k_wmi_pdev_set_wmm_params(struct ath10k *ar,
 }
 
 static inline int
-ath10k_wmi_request_stats(struct ath10k *ar, enum wmi_stats_id stats_id)
+ath10k_wmi_request_stats(struct ath10k *ar, u32 stats_mask)
 {
        struct sk_buff *skb;
 
        if (!ar->wmi.ops->gen_request_stats)
                return -EOPNOTSUPP;
 
-       skb = ar->wmi.ops->gen_request_stats(ar, stats_id);
+       skb = ar->wmi.ops->gen_request_stats(ar, stats_mask);
        if (IS_ERR(skb))
                return PTR_ERR(skb);
 
index 71614ba1b145e590bd26a4cb8112237584248a11..ee0c5f602e297424b3f5eb143cdda542b1231291 100644 (file)
@@ -869,16 +869,57 @@ static int ath10k_wmi_tlv_op_pull_rdy_ev(struct ath10k *ar,
        return 0;
 }
 
+static void ath10k_wmi_tlv_pull_vdev_stats(const struct wmi_tlv_vdev_stats *src,
+                                          struct ath10k_fw_stats_vdev *dst)
+{
+       int i;
+
+       dst->vdev_id = __le32_to_cpu(src->vdev_id);
+       dst->beacon_snr = __le32_to_cpu(src->beacon_snr);
+       dst->data_snr = __le32_to_cpu(src->data_snr);
+       dst->num_rx_frames = __le32_to_cpu(src->num_rx_frames);
+       dst->num_rts_fail = __le32_to_cpu(src->num_rts_fail);
+       dst->num_rts_success = __le32_to_cpu(src->num_rts_success);
+       dst->num_rx_err = __le32_to_cpu(src->num_rx_err);
+       dst->num_rx_discard = __le32_to_cpu(src->num_rx_discard);
+       dst->num_tx_not_acked = __le32_to_cpu(src->num_tx_not_acked);
+
+       for (i = 0; i < ARRAY_SIZE(src->num_tx_frames); i++)
+               dst->num_tx_frames[i] =
+                       __le32_to_cpu(src->num_tx_frames[i]);
+
+       for (i = 0; i < ARRAY_SIZE(src->num_tx_frames_retries); i++)
+               dst->num_tx_frames_retries[i] =
+                       __le32_to_cpu(src->num_tx_frames_retries[i]);
+
+       for (i = 0; i < ARRAY_SIZE(src->num_tx_frames_failures); i++)
+               dst->num_tx_frames_failures[i] =
+                       __le32_to_cpu(src->num_tx_frames_failures[i]);
+
+       for (i = 0; i < ARRAY_SIZE(src->tx_rate_history); i++)
+               dst->tx_rate_history[i] =
+                       __le32_to_cpu(src->tx_rate_history[i]);
+
+       for (i = 0; i < ARRAY_SIZE(src->beacon_rssi_history); i++)
+               dst->beacon_rssi_history[i] =
+                       __le32_to_cpu(src->beacon_rssi_history[i]);
+}
+
 static int ath10k_wmi_tlv_op_pull_fw_stats(struct ath10k *ar,
                                           struct sk_buff *skb,
                                           struct ath10k_fw_stats *stats)
 {
        const void **tb;
-       const struct wmi_stats_event *ev;
+       const struct wmi_tlv_stats_ev *ev;
        const void *data;
-       u32 num_pdev_stats, num_vdev_stats, num_peer_stats;
+       u32 num_pdev_stats;
+       u32 num_vdev_stats;
+       u32 num_peer_stats;
+       u32 num_bcnflt_stats;
+       u32 num_chan_stats;
        size_t data_len;
        int ret;
+       int i;
 
        tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
        if (IS_ERR(tb)) {
@@ -899,8 +940,73 @@ static int ath10k_wmi_tlv_op_pull_fw_stats(struct ath10k *ar,
        num_pdev_stats = __le32_to_cpu(ev->num_pdev_stats);
        num_vdev_stats = __le32_to_cpu(ev->num_vdev_stats);
        num_peer_stats = __le32_to_cpu(ev->num_peer_stats);
+       num_bcnflt_stats = __le32_to_cpu(ev->num_bcnflt_stats);
+       num_chan_stats = __le32_to_cpu(ev->num_chan_stats);
+
+       ath10k_dbg(ar, ATH10K_DBG_WMI,
+                  "wmi tlv stats update pdev %i vdev %i peer %i bcnflt %i chan %i\n",
+                  num_pdev_stats, num_vdev_stats, num_peer_stats,
+                  num_bcnflt_stats, num_chan_stats);
+
+       for (i = 0; i < num_pdev_stats; i++) {
+               const struct wmi_pdev_stats *src;
+               struct ath10k_fw_stats_pdev *dst;
+
+               src = data;
+               if (data_len < sizeof(*src))
+                       return -EPROTO;
+
+               data += sizeof(*src);
+               data_len -= sizeof(*src);
+
+               dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
+               if (!dst)
+                       continue;
+
+               ath10k_wmi_pull_pdev_stats_base(&src->base, dst);
+               ath10k_wmi_pull_pdev_stats_tx(&src->tx, dst);
+               ath10k_wmi_pull_pdev_stats_rx(&src->rx, dst);
+               list_add_tail(&dst->list, &stats->pdevs);
+       }
+
+       for (i = 0; i < num_vdev_stats; i++) {
+               const struct wmi_tlv_vdev_stats *src;
+               struct ath10k_fw_stats_vdev *dst;
+
+               src = data;
+               if (data_len < sizeof(*src))
+                       return -EPROTO;
+
+               data += sizeof(*src);
+               data_len -= sizeof(*src);
+
+               dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
+               if (!dst)
+                       continue;
 
-       WARN_ON(1); /* FIXME: not implemented yet */
+               ath10k_wmi_tlv_pull_vdev_stats(src, dst);
+               list_add_tail(&dst->list, &stats->vdevs);
+       }
+
+       for (i = 0; i < num_peer_stats; i++) {
+               const struct wmi_10x_peer_stats *src;
+               struct ath10k_fw_stats_peer *dst;
+
+               src = data;
+               if (data_len < sizeof(*src))
+                       return -EPROTO;
+
+               data += sizeof(*src);
+               data_len -= sizeof(*src);
+
+               dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
+               if (!dst)
+                       continue;
+
+               ath10k_wmi_pull_peer_stats(&src->old, dst);
+               dst->peer_rx_rate = __le32_to_cpu(src->peer_rx_rate);
+               list_add_tail(&dst->list, &stats->peers);
+       }
 
        kfree(tb);
        return 0;
@@ -1604,14 +1710,12 @@ ath10k_wmi_tlv_op_gen_vdev_wmm_conf(struct ath10k *ar, u32 vdev_id,
                                    const struct wmi_wmm_params_all_arg *arg)
 {
        struct wmi_tlv_vdev_set_wmm_cmd *cmd;
-       struct wmi_wmm_params *wmm;
        struct wmi_tlv *tlv;
        struct sk_buff *skb;
        size_t len;
        void *ptr;
 
-       len = (sizeof(*tlv) + sizeof(*cmd)) +
-             (4 * (sizeof(*tlv) + sizeof(*wmm)));
+       len = sizeof(*tlv) + sizeof(*cmd);
        skb = ath10k_wmi_alloc_skb(ar, len);
        if (!skb)
                return ERR_PTR(-ENOMEM);
@@ -1623,13 +1727,10 @@ ath10k_wmi_tlv_op_gen_vdev_wmm_conf(struct ath10k *ar, u32 vdev_id,
        cmd = (void *)tlv->value;
        cmd->vdev_id = __cpu_to_le32(vdev_id);
 
-       ptr += sizeof(*tlv);
-       ptr += sizeof(*cmd);
-
-       ptr = ath10k_wmi_tlv_put_wmm(ptr, &arg->ac_be);
-       ptr = ath10k_wmi_tlv_put_wmm(ptr, &arg->ac_bk);
-       ptr = ath10k_wmi_tlv_put_wmm(ptr, &arg->ac_vi);
-       ptr = ath10k_wmi_tlv_put_wmm(ptr, &arg->ac_vo);
+       ath10k_wmi_set_wmm_param(&cmd->vdev_wmm_params[0].params, &arg->ac_be);
+       ath10k_wmi_set_wmm_param(&cmd->vdev_wmm_params[1].params, &arg->ac_bk);
+       ath10k_wmi_set_wmm_param(&cmd->vdev_wmm_params[2].params, &arg->ac_vi);
+       ath10k_wmi_set_wmm_param(&cmd->vdev_wmm_params[3].params, &arg->ac_vo);
 
        ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev wmm conf\n");
        return skb;
@@ -2080,8 +2181,7 @@ ath10k_wmi_tlv_op_gen_pdev_set_wmm(struct ath10k *ar,
 }
 
 static struct sk_buff *
-ath10k_wmi_tlv_op_gen_request_stats(struct ath10k *ar,
-                                   enum wmi_stats_id stats_id)
+ath10k_wmi_tlv_op_gen_request_stats(struct ath10k *ar, u32 stats_mask)
 {
        struct wmi_request_stats_cmd *cmd;
        struct wmi_tlv *tlv;
@@ -2095,7 +2195,7 @@ ath10k_wmi_tlv_op_gen_request_stats(struct ath10k *ar,
        tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_REQUEST_STATS_CMD);
        tlv->len = __cpu_to_le16(sizeof(*cmd));
        cmd = (void *)tlv->value;
-       cmd->stats_id = __cpu_to_le32(stats_id);
+       cmd->stats_id = __cpu_to_le32(stats_mask);
 
        ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv request stats\n");
        return skb;
index de68fe76eae6eea583b048a7293a01858d6bc4c4..a6c8280cc4b194384c1abee08e8d379bd4754e23 100644 (file)
@@ -1302,8 +1302,14 @@ struct wmi_tlv_pdev_set_wmm_cmd {
        __le32 dg_type; /* no idea.. */
 } __packed;
 
+struct wmi_tlv_vdev_wmm_params {
+       __le32 dummy;
+       struct wmi_wmm_params params;
+} __packed;
+
 struct wmi_tlv_vdev_set_wmm_cmd {
        __le32 vdev_id;
+       struct wmi_tlv_vdev_wmm_params vdev_wmm_params[4];
 } __packed;
 
 struct wmi_tlv_phyerr_ev {
@@ -1439,6 +1445,15 @@ struct wmi_tlv_sta_keepalive_cmd {
        __le32 interval; /* in seconds */
 } __packed;
 
+struct wmi_tlv_stats_ev {
+       __le32 stats_id; /* WMI_STAT_ */
+       __le32 num_pdev_stats;
+       __le32 num_vdev_stats;
+       __le32 num_peer_stats;
+       __le32 num_bcnflt_stats;
+       __le32 num_chan_stats;
+} __packed;
+
 void ath10k_wmi_tlv_attach(struct ath10k *ar);
 
 #endif
index aeea1c7939434d561e61fb9eed4784dafa5ffacb..c7ea77edce245ccd389111ad3ac9fd3958d4bc89 100644 (file)
@@ -1125,6 +1125,25 @@ static void ath10k_wmi_event_scan_started(struct ath10k *ar)
        }
 }
 
+static void ath10k_wmi_event_scan_start_failed(struct ath10k *ar)
+{
+       lockdep_assert_held(&ar->data_lock);
+
+       switch (ar->scan.state) {
+       case ATH10K_SCAN_IDLE:
+       case ATH10K_SCAN_RUNNING:
+       case ATH10K_SCAN_ABORTING:
+               ath10k_warn(ar, "received scan start failed event in an invalid scan state: %s (%d)\n",
+                           ath10k_scan_state_str(ar->scan.state),
+                           ar->scan.state);
+               break;
+       case ATH10K_SCAN_STARTING:
+               complete(&ar->scan.started);
+               __ath10k_scan_finish(ar);
+               break;
+       }
+}
+
 static void ath10k_wmi_event_scan_completed(struct ath10k *ar)
 {
        lockdep_assert_held(&ar->data_lock);
@@ -1292,6 +1311,7 @@ int ath10k_wmi_event_scan(struct ath10k *ar, struct sk_buff *skb)
                break;
        case WMI_SCAN_EVENT_START_FAILED:
                ath10k_warn(ar, "received scan start failure event\n");
+               ath10k_wmi_event_scan_start_failed(ar);
                break;
        case WMI_SCAN_EVENT_DEQUEUED:
        case WMI_SCAN_EVENT_PREEMPTED:
@@ -4954,7 +4974,7 @@ ath10k_wmi_op_gen_pdev_set_wmm(struct ath10k *ar,
 }
 
 static struct sk_buff *
-ath10k_wmi_op_gen_request_stats(struct ath10k *ar, enum wmi_stats_id stats_id)
+ath10k_wmi_op_gen_request_stats(struct ath10k *ar, u32 stats_mask)
 {
        struct wmi_request_stats_cmd *cmd;
        struct sk_buff *skb;
@@ -4964,9 +4984,10 @@ ath10k_wmi_op_gen_request_stats(struct ath10k *ar, enum wmi_stats_id stats_id)
                return ERR_PTR(-ENOMEM);
 
        cmd = (struct wmi_request_stats_cmd *)skb->data;
-       cmd->stats_id = __cpu_to_le32(stats_id);
+       cmd->stats_id = __cpu_to_le32(stats_mask);
 
-       ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi request stats %d\n", (int)stats_id);
+       ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi request stats 0x%08x\n",
+                  stats_mask);
        return skb;
 }
 
index 20ce3603e64b73374aeb1ce452a930092189e974..adf935bf0580f488708688c4728aaab1f7325dc5 100644 (file)
@@ -3057,8 +3057,12 @@ struct wmi_pdev_stats_peer {
 } __packed;
 
 enum wmi_stats_id {
-       WMI_REQUEST_PEER_STAT   = 0x01,
-       WMI_REQUEST_AP_STAT     = 0x02
+       WMI_STAT_PEER = BIT(0),
+       WMI_STAT_AP = BIT(1),
+       WMI_STAT_PDEV = BIT(2),
+       WMI_STAT_VDEV = BIT(3),
+       WMI_STAT_BCNFLT = BIT(4),
+       WMI_STAT_VDEV_RATE = BIT(5),
 };
 
 struct wlan_inst_rssi_args {
@@ -3093,7 +3097,7 @@ struct wmi_pdev_suspend_cmd {
 } __packed;
 
 struct wmi_stats_event {
-       __le32 stats_id; /* %WMI_REQUEST_ */
+       __le32 stats_id; /* WMI_STAT_ */
        /*
         * number of pdev stats event structures
         * (wmi_pdev_stats) 0 or 1
@@ -3745,6 +3749,11 @@ enum wmi_10x_vdev_param {
        WMI_10X_VDEV_PARAM_VHT80_RATEMASK,
 };
 
+#define WMI_VDEV_PARAM_TXBF_SU_TX_BFEE BIT(0)
+#define WMI_VDEV_PARAM_TXBF_MU_TX_BFEE BIT(1)
+#define WMI_VDEV_PARAM_TXBF_SU_TX_BFER BIT(2)
+#define WMI_VDEV_PARAM_TXBF_MU_TX_BFER BIT(3)
+
 /* slot time long */
 #define WMI_VDEV_SLOT_TIME_LONG                0x1
 /* slot time short */
@@ -4436,7 +4445,8 @@ enum wmi_peer_param {
        WMI_PEER_AUTHORIZE  = 0x3,
        WMI_PEER_CHAN_WIDTH = 0x4,
        WMI_PEER_NSS        = 0x5,
-       WMI_PEER_USE_4ADDR  = 0x6
+       WMI_PEER_USE_4ADDR  = 0x6,
+       WMI_PEER_DUMMY_VAR  = 0xff, /* dummy parameter for STA PS workaround */
 };
 
 struct wmi_peer_set_param_cmd {
index 1ed7a88aeea9cd7ba7890fd308a1952888cfd28c..7ca0d6f930fd2e6e11021ac8a5d09ef002e80fc5 100644 (file)
@@ -1283,6 +1283,7 @@ struct ath5k_hw {
 #define ATH_STAT_PROMISC       1
 #define ATH_STAT_LEDSOFT       2               /* enable LED gpio status */
 #define ATH_STAT_STARTED       3               /* opened & irqs enabled */
+#define ATH_STAT_RESET         4               /* hw reset */
 
        unsigned int            filter_flags;   /* HW flags, AR5K_RX_FILTER_* */
        unsigned int            fif_filter_flags; /* Current FIF_* filter flags */
index bc9cb356fa697fd67efcb186f7e28e890bbe3287..a6131825c9f6eb82c2f2a75bb4c00b4279b6dea4 100644 (file)
@@ -528,7 +528,7 @@ ath5k_update_bssid_mask_and_opmode(struct ath5k_hw *ah,
         * together with the BSSID mask when matching addresses.
         */
        iter_data.hw_macaddr = common->macaddr;
-       memset(&iter_data.mask, 0xff, ETH_ALEN);
+       eth_broadcast_addr(iter_data.mask);
        iter_data.found_active = false;
        iter_data.need_set_hw_addr = true;
        iter_data.opmode = NL80211_IFTYPE_UNSPECIFIED;
@@ -1523,6 +1523,9 @@ ath5k_set_current_imask(struct ath5k_hw *ah)
        enum ath5k_int imask;
        unsigned long flags;
 
+       if (test_bit(ATH_STAT_RESET, ah->status))
+               return;
+
        spin_lock_irqsave(&ah->irqlock, flags);
        imask = ah->imask;
        if (ah->rx_pending)
@@ -2858,10 +2861,12 @@ ath5k_reset(struct ath5k_hw *ah, struct ieee80211_channel *chan,
 {
        struct ath_common *common = ath5k_hw_common(ah);
        int ret, ani_mode;
-       bool fast;
+       bool fast = chan && modparam_fastchanswitch ? 1 : 0;
 
        ATH5K_DBG(ah, ATH5K_DEBUG_RESET, "resetting\n");
 
+       __set_bit(ATH_STAT_RESET, ah->status);
+
        ath5k_hw_set_imr(ah, 0);
        synchronize_irq(ah->irq);
        ath5k_stop_tasklets(ah);
@@ -2876,11 +2881,29 @@ ath5k_reset(struct ath5k_hw *ah, struct ieee80211_channel *chan,
         * so we should also free any remaining
         * tx buffers */
        ath5k_drain_tx_buffs(ah);
+
+       /* Stop PCU */
+       ath5k_hw_stop_rx_pcu(ah);
+
+       /* Stop DMA
+        *
+        * Note: If DMA didn't stop continue
+        * since only a reset will fix it.
+        */
+       ret = ath5k_hw_dma_stop(ah);
+
+       /* RF Bus grant won't work if we have pending
+        * frames
+        */
+       if (ret && fast) {
+               ATH5K_DBG(ah, ATH5K_DEBUG_RESET,
+                         "DMA didn't stop, falling back to normal reset\n");
+               fast = false;
+       }
+
        if (chan)
                ah->curchan = chan;
 
-       fast = ((chan != NULL) && modparam_fastchanswitch) ? 1 : 0;
-
        ret = ath5k_hw_reset(ah, ah->opmode, ah->curchan, fast, skip_pcu);
        if (ret) {
                ATH5K_ERR(ah, "can't reset hardware (%d)\n", ret);
@@ -2934,6 +2957,8 @@ ath5k_reset(struct ath5k_hw *ah, struct ieee80211_channel *chan,
         */
 /*     ath5k_chan_change(ah, c); */
 
+       __clear_bit(ATH_STAT_RESET, ah->status);
+
        ath5k_beacon_config(ah);
        /* intrs are enabled by ath5k_beacon_config */
 
index b9b651ea985156d0cd5dcbb814d75b927bf7d238..99e62f99a182db5c205a7dca60492b4e312f6f84 100644 (file)
@@ -1169,30 +1169,6 @@ ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
        if (ah->ah_version == AR5K_AR5212)
                ath5k_hw_set_sleep_clock(ah, false);
 
-       /*
-        * Stop PCU
-        */
-       ath5k_hw_stop_rx_pcu(ah);
-
-       /*
-        * Stop DMA
-        *
-        * Note: If DMA didn't stop continue
-        * since only a reset will fix it.
-        */
-       ret = ath5k_hw_dma_stop(ah);
-
-       /* RF Bus grant won't work if we have pending
-        * frames */
-       if (ret && fast) {
-               ATH5K_DBG(ah, ATH5K_DEBUG_RESET,
-                       "DMA didn't stop, falling back to normal reset\n");
-               fast = false;
-               /* Non fatal, just continue with
-                * normal reset */
-               ret = 0;
-       }
-
        mode = channel->hw_value;
        switch (mode) {
        case AR5K_MODE_11A:
index 85da63a67faf56f35f92615f3bae5cda67504c4d..cce4625a53ad7eb630bef4b717cc9fe0177d96fc 100644 (file)
@@ -686,20 +686,21 @@ ath6kl_add_bss_if_needed(struct ath6kl_vif *vif,
 {
        struct ath6kl *ar = vif->ar;
        struct cfg80211_bss *bss;
-       u16 cap_mask, cap_val;
+       u16 cap_val;
+       enum ieee80211_bss_type bss_type;
        u8 *ie;
 
        if (nw_type & ADHOC_NETWORK) {
-               cap_mask = WLAN_CAPABILITY_IBSS;
                cap_val = WLAN_CAPABILITY_IBSS;
+               bss_type = IEEE80211_BSS_TYPE_IBSS;
        } else {
-               cap_mask = WLAN_CAPABILITY_ESS;
                cap_val = WLAN_CAPABILITY_ESS;
+               bss_type = IEEE80211_BSS_TYPE_ESS;
        }
 
        bss = cfg80211_get_bss(ar->wiphy, chan, bssid,
                               vif->ssid, vif->ssid_len,
-                              cap_mask, cap_val);
+                              bss_type, IEEE80211_PRIVACY_ANY);
        if (bss == NULL) {
                /*
                 * Since cfg80211 may not yet know about the BSS,
@@ -1495,6 +1496,7 @@ static int ath6kl_cfg80211_set_power_mgmt(struct wiphy *wiphy,
 
 static struct wireless_dev *ath6kl_cfg80211_add_iface(struct wiphy *wiphy,
                                                      const char *name,
+                                                     unsigned char name_assign_type,
                                                      enum nl80211_iftype type,
                                                      u32 *flags,
                                                      struct vif_params *params)
@@ -1513,7 +1515,7 @@ static struct wireless_dev *ath6kl_cfg80211_add_iface(struct wiphy *wiphy,
                return ERR_PTR(-EINVAL);
        }
 
-       wdev = ath6kl_interface_add(ar, name, type, if_idx, nw_type);
+       wdev = ath6kl_interface_add(ar, name, name_assign_type, type, if_idx, nw_type);
        if (!wdev)
                return ERR_PTR(-ENOMEM);
 
@@ -2033,7 +2035,7 @@ static int ath6kl_wow_sta(struct ath6kl *ar, struct ath6kl_vif *vif)
        int ret;
 
        /* Setup unicast pkt pattern */
-       memset(mac_mask, 0xff, ETH_ALEN);
+       eth_broadcast_addr(mac_mask);
        ret = ath6kl_wmi_add_wow_pattern_cmd(ar->wmi,
                                vif->fw_vif_idx, WOW_LIST_ID,
                                ETH_ALEN, 0, ndev->dev_addr,
@@ -3633,13 +3635,14 @@ void ath6kl_cfg80211_vif_cleanup(struct ath6kl_vif *vif)
 }
 
 struct wireless_dev *ath6kl_interface_add(struct ath6kl *ar, const char *name,
+                                         unsigned char name_assign_type,
                                          enum nl80211_iftype type,
                                          u8 fw_vif_idx, u8 nw_type)
 {
        struct net_device *ndev;
        struct ath6kl_vif *vif;
 
-       ndev = alloc_netdev(sizeof(*vif), name, NET_NAME_UNKNOWN, ether_setup);
+       ndev = alloc_netdev(sizeof(*vif), name, name_assign_type, ether_setup);
        if (!ndev)
                return NULL;
 
index b59becd91aeaf9474728902a13db8bb899a0dc2a..5aa57a7639bfe3fd76e3ca888f78d7ec96036c80 100644 (file)
@@ -25,6 +25,7 @@ enum ath6kl_cfg_suspend_mode {
 };
 
 struct wireless_dev *ath6kl_interface_add(struct ath6kl *ar, const char *name,
+                                         unsigned char name_assign_type,
                                          enum nl80211_iftype type,
                                          u8 fw_vif_idx, u8 nw_type);
 void ath6kl_cfg80211_ch_switch_notify(struct ath6kl_vif *vif, int freq,
index 0df74b245af4c0180ea9219c8765453241aea9f0..4ec02cea0f430830a7f063c0baff3b5baeed9fef 100644 (file)
@@ -211,8 +211,8 @@ int ath6kl_core_init(struct ath6kl *ar, enum ath6kl_htc_type htc_type)
        rtnl_lock();
 
        /* Add an initial station interface */
-       wdev = ath6kl_interface_add(ar, "wlan%d", NL80211_IFTYPE_STATION, 0,
-                                   INFRA_NETWORK);
+       wdev = ath6kl_interface_add(ar, "wlan%d", NET_NAME_ENUM,
+                                   NL80211_IFTYPE_STATION, 0, INFRA_NETWORK);
 
        rtnl_unlock();
 
index b42ba46b50307d09e9972150297e7df0a7eb5c8e..1af3fed5a72caa203e9cbda00f677905ef966863 100644 (file)
@@ -105,7 +105,7 @@ static void ath6kl_sta_cleanup(struct ath6kl *ar, u8 i)
 
        memset(&ar->ap_stats.sta[sta->aid - 1], 0,
               sizeof(struct wmi_per_sta_stat));
-       memset(sta->mac, 0, ETH_ALEN);
+       eth_zero_addr(sta->mac);
        memset(sta->wpa_ie, 0, ATH6KL_MAX_IE);
        sta->aid = 0;
        sta->sta_flags = 0;
index 473972288a84a901a69cc555802b2d8228e274f7..ecda613c2d547d4278ac0665c8c0eeaaf594c66a 100644 (file)
@@ -46,7 +46,8 @@ ath9k_hw-y:=  \
 ath9k_hw-$(CONFIG_ATH9K_WOW) += ar9003_wow.o
 
 ath9k_hw-$(CONFIG_ATH9K_BTCOEX_SUPPORT) += btcoex.o \
-                                          ar9003_mci.o
+                                          ar9003_mci.o \
+                                          ar9003_aic.o
 
 ath9k_hw-$(CONFIG_ATH9K_PCOEM) += ar9003_rtt.o
 
index ca01d17d130fec62067e2e2f9da78f836e2fa643..25e45e4d1a605ba69092f9c76af4dcf3ebfae056 100644 (file)
@@ -107,11 +107,21 @@ static const struct ani_cck_level_entry cck_level_table[] = {
 static void ath9k_hw_update_mibstats(struct ath_hw *ah,
                                     struct ath9k_mib_stats *stats)
 {
-       stats->ackrcv_bad += REG_READ(ah, AR_ACK_FAIL);
-       stats->rts_bad += REG_READ(ah, AR_RTS_FAIL);
-       stats->fcs_bad += REG_READ(ah, AR_FCS_FAIL);
-       stats->rts_good += REG_READ(ah, AR_RTS_OK);
-       stats->beacons += REG_READ(ah, AR_BEACON_CNT);
+       u32 addr[5] = {AR_RTS_OK, AR_RTS_FAIL, AR_ACK_FAIL,
+                      AR_FCS_FAIL, AR_BEACON_CNT};
+       u32 data[5];
+
+       REG_READ_MULTI(ah, &addr[0], &data[0], 5);
+       /* AR_RTS_OK */
+       stats->rts_good += data[0];
+       /* AR_RTS_FAIL */
+       stats->rts_bad += data[1];
+       /* AR_ACK_FAIL */
+       stats->ackrcv_bad += data[2];
+       /* AR_FCS_FAIL */
+       stats->fcs_bad += data[3];
+       /* AR_BEACON_CNT */
+       stats->beacons += data[4];
 }
 
 static void ath9k_ani_restart(struct ath_hw *ah)
index f273427fdd29ff93a3406d582928499ae22398c7..6c23d279525f5dfa8671342b3c30b25163b6e0ba 100644 (file)
@@ -681,12 +681,13 @@ static void ar5008_hw_set_channel_regs(struct ath_hw *ah,
                        phymode |= AR_PHY_FC_DYN2040_PRI_CH;
 
        }
+       ENABLE_REGWRITE_BUFFER(ah);
        REG_WRITE(ah, AR_PHY_TURBO, phymode);
 
+       /* This function do only REG_WRITE, so
+        * we can include it to REGWRITE_BUFFER. */
        ath9k_hw_set11nmac2040(ah, chan);
 
-       ENABLE_REGWRITE_BUFFER(ah);
-
        REG_WRITE(ah, AR_GTXTO, 25 << AR_GTXTO_TIMEOUT_LIMIT_S);
        REG_WRITE(ah, AR_CST, 0xF << AR_CST_TIMEOUT_LIMIT_S);
 
index 42190b67c6719594bcbe57b9d70e02186645bace..50fcd343c41af12d865d55f1ceb5427797bd20d7 100644 (file)
@@ -430,46 +430,43 @@ static void ar9271_hw_pa_cal(struct ath_hw *ah, bool is_reset)
        u32 regVal;
        unsigned int i;
        u32 regList[][2] = {
-               { 0x786c, 0 },
-               { 0x7854, 0 },
-               { 0x7820, 0 },
-               { 0x7824, 0 },
-               { 0x7868, 0 },
-               { 0x783c, 0 },
-               { 0x7838, 0 } ,
-               { 0x7828, 0 } ,
+               { AR9285_AN_TOP3, 0 },
+               { AR9285_AN_RXTXBB1, 0 },
+               { AR9285_AN_RF2G1, 0 },
+               { AR9285_AN_RF2G2, 0 },
+               { AR9285_AN_TOP2, 0 },
+               { AR9285_AN_RF2G8, 0 },
+               { AR9285_AN_RF2G7, 0 },
+               { AR9285_AN_RF2G3, 0 },
        };
 
-       for (i = 0; i < ARRAY_SIZE(regList); i++)
-               regList[i][1] = REG_READ(ah, regList[i][0]);
-
-       regVal = REG_READ(ah, 0x7834);
-       regVal &= (~(0x1));
-       REG_WRITE(ah, 0x7834, regVal);
-       regVal = REG_READ(ah, 0x9808);
-       regVal |= (0x1 << 27);
-       REG_WRITE(ah, 0x9808, regVal);
+       REG_READ_ARRAY(ah, regList, ARRAY_SIZE(regList));
 
+       ENABLE_REG_RMW_BUFFER(ah);
+       /* 7834, b1=0 */
+       REG_CLR_BIT(ah, AR9285_AN_RF2G6, 1 << 0);
+       /* 9808, b27=1 */
+       REG_SET_BIT(ah, 0x9808, 1 << 27);
        /* 786c,b23,1, pwddac=1 */
-       REG_RMW_FIELD(ah, AR9285_AN_TOP3, AR9285_AN_TOP3_PWDDAC, 1);
+       REG_SET_BIT(ah, AR9285_AN_TOP3, AR9285_AN_TOP3_PWDDAC);
        /* 7854, b5,1, pdrxtxbb=1 */
-       REG_RMW_FIELD(ah, AR9285_AN_RXTXBB1, AR9285_AN_RXTXBB1_PDRXTXBB1, 1);
+       REG_SET_BIT(ah, AR9285_AN_RXTXBB1, AR9285_AN_RXTXBB1_PDRXTXBB1);
        /* 7854, b7,1, pdv2i=1 */
-       REG_RMW_FIELD(ah, AR9285_AN_RXTXBB1, AR9285_AN_RXTXBB1_PDV2I, 1);
+       REG_SET_BIT(ah, AR9285_AN_RXTXBB1, AR9285_AN_RXTXBB1_PDV2I);
        /* 7854, b8,1, pddacinterface=1 */
-       REG_RMW_FIELD(ah, AR9285_AN_RXTXBB1, AR9285_AN_RXTXBB1_PDDACIF, 1);
+       REG_SET_BIT(ah, AR9285_AN_RXTXBB1, AR9285_AN_RXTXBB1_PDDACIF);
        /* 7824,b12,0, offcal=0 */
-       REG_RMW_FIELD(ah, AR9285_AN_RF2G2, AR9285_AN_RF2G2_OFFCAL, 0);
+       REG_CLR_BIT(ah, AR9285_AN_RF2G2, AR9285_AN_RF2G2_OFFCAL);
        /* 7838, b1,0, pwddb=0 */
-       REG_RMW_FIELD(ah, AR9285_AN_RF2G7, AR9285_AN_RF2G7_PWDDB, 0);
+       REG_CLR_BIT(ah, AR9285_AN_RF2G7, AR9285_AN_RF2G7_PWDDB);
        /* 7820,b11,0, enpacal=0 */
-       REG_RMW_FIELD(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_ENPACAL, 0);
+       REG_CLR_BIT(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_ENPACAL);
        /* 7820,b25,1, pdpadrv1=0 */
-       REG_RMW_FIELD(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_PDPADRV1, 0);
+       REG_CLR_BIT(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_PDPADRV1);
        /* 7820,b24,0, pdpadrv2=0 */
-       REG_RMW_FIELD(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_PDPADRV2, 0);
+       REG_CLR_BIT(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_PDPADRV2);
        /* 7820,b23,0, pdpaout=0 */
-       REG_RMW_FIELD(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_PDPAOUT, 0);
+       REG_CLR_BIT(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_PDPAOUT);
        /* 783c,b14-16,7, padrvgn2tab_0=7 */
        REG_RMW_FIELD(ah, AR9285_AN_RF2G8, AR9285_AN_RF2G8_PADRVGN2TAB0, 7);
        /*
@@ -477,8 +474,9 @@ static void ar9271_hw_pa_cal(struct ath_hw *ah, bool is_reset)
         * does not matter since we turn it off
         */
        REG_RMW_FIELD(ah, AR9285_AN_RF2G7, AR9285_AN_RF2G7_PADRVGN2TAB0, 0);
-
+       /* 7828, b0-11, ccom=fff */
        REG_RMW_FIELD(ah, AR9285_AN_RF2G3, AR9271_AN_RF2G3_CCOMP, 0xfff);
+       REG_RMW_BUFFER_FLUSH(ah);
 
        /* Set:
         * localmode=1,bmode=1,bmoderxtx=1,synthon=1,
@@ -490,15 +488,16 @@ static void ar9271_hw_pa_cal(struct ath_hw *ah, bool is_reset)
 
        /* find off_6_1; */
        for (i = 6; i > 0; i--) {
-               regVal = REG_READ(ah, 0x7834);
+               regVal = REG_READ(ah, AR9285_AN_RF2G6);
                regVal |= (1 << (20 + i));
-               REG_WRITE(ah, 0x7834, regVal);
+               REG_WRITE(ah, AR9285_AN_RF2G6, regVal);
                udelay(1);
                /* regVal = REG_READ(ah, 0x7834); */
                regVal &= (~(0x1 << (20 + i)));
-               regVal |= (MS(REG_READ(ah, 0x7840), AR9285_AN_RXTXBB1_SPARE9)
+               regVal |= (MS(REG_READ(ah, AR9285_AN_RF2G9),
+                             AR9285_AN_RXTXBB1_SPARE9)
                            << (20 + i));
-               REG_WRITE(ah, 0x7834, regVal);
+               REG_WRITE(ah, AR9285_AN_RF2G6, regVal);
        }
 
        regVal = (regVal >> 20) & 0x7f;
@@ -515,15 +514,15 @@ static void ar9271_hw_pa_cal(struct ath_hw *ah, bool is_reset)
                ah->pacal_info.prev_offset = regVal;
        }
 
-       ENABLE_REGWRITE_BUFFER(ah);
 
-       regVal = REG_READ(ah, 0x7834);
-       regVal |= 0x1;
-       REG_WRITE(ah, 0x7834, regVal);
-       regVal = REG_READ(ah, 0x9808);
-       regVal &= (~(0x1 << 27));
-       REG_WRITE(ah, 0x9808, regVal);
+       ENABLE_REG_RMW_BUFFER(ah);
+       /* 7834, b1=1 */
+       REG_SET_BIT(ah, AR9285_AN_RF2G6, 1 << 0);
+       /* 9808, b27=0 */
+       REG_CLR_BIT(ah, 0x9808, 1 << 27);
+       REG_RMW_BUFFER_FLUSH(ah);
 
+       ENABLE_REGWRITE_BUFFER(ah);
        for (i = 0; i < ARRAY_SIZE(regList); i++)
                REG_WRITE(ah, regList[i][0], regList[i][1]);
 
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_aic.c b/drivers/net/wireless/ath/ath9k/ar9003_aic.c
new file mode 100644 (file)
index 0000000..1db119d
--- /dev/null
@@ -0,0 +1,599 @@
+/*
+ * Copyright (c) 2015 Qualcomm Atheros Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "hw.h"
+#include "hw-ops.h"
+#include "ar9003_mci.h"
+#include "ar9003_aic.h"
+#include "ar9003_phy.h"
+#include "reg_aic.h"
+
+static const u8 com_att_db_table[ATH_AIC_MAX_COM_ATT_DB_TABLE] = {
+       0, 3, 9, 15, 21, 27
+};
+
+static const u16 aic_lin_table[ATH_AIC_MAX_AIC_LIN_TABLE] = {
+       8191, 7300, 6506, 5799, 5168, 4606, 4105, 3659,
+       3261, 2906, 2590, 2309, 2057, 1834, 1634, 1457,
+       1298, 1157, 1031, 919,  819,  730,  651,  580,
+       517,  461,  411,  366,  326,  291,  259,  231,
+       206,  183,  163,  146,  130,  116,  103,  92,
+       82,   73,   65,   58,   52,   46,   41,   37,
+       33,   29,   26,   23,   21,   18,   16,   15,
+       13,   12,   10,   9,    8,    7,    7,    6,
+       5,    5,    4,    4,    3
+};
+
+static bool ar9003_hw_is_aic_enabled(struct ath_hw *ah)
+{
+       struct ath9k_hw_mci *mci_hw = &ah->btcoex_hw.mci;
+
+       /*
+        * Disable AIC for now, until we have all the
+        * HW code and the driver-layer support ready.
+        */
+       return false;
+
+       if (mci_hw->config & ATH_MCI_CONFIG_DISABLE_AIC)
+               return false;
+
+       return true;
+}
+
+static int16_t ar9003_aic_find_valid(struct ath_aic_sram_info *cal_sram,
+                                    bool dir, u8 index)
+{
+       int16_t i;
+
+       if (dir) {
+               for (i = index + 1; i < ATH_AIC_MAX_BT_CHANNEL; i++) {
+                       if (cal_sram[i].valid)
+                               break;
+               }
+       } else {
+               for (i = index - 1; i >= 0; i--) {
+                       if (cal_sram[i].valid)
+                               break;
+               }
+       }
+
+       if ((i >= ATH_AIC_MAX_BT_CHANNEL) || (i < 0))
+               i = -1;
+
+       return i;
+}
+
+/*
+ * type 0: aic_lin_table, 1: com_att_db_table
+ */
+static int16_t ar9003_aic_find_index(u8 type, int16_t value)
+{
+       int16_t i = -1;
+
+       if (type == 0) {
+               for (i = ATH_AIC_MAX_AIC_LIN_TABLE - 1; i >= 0; i--) {
+                       if (aic_lin_table[i] >= value)
+                               break;
+               }
+       } else if (type == 1) {
+               for (i = 0; i < ATH_AIC_MAX_COM_ATT_DB_TABLE; i++) {
+                       if (com_att_db_table[i] > value) {
+                               i--;
+                               break;
+                       }
+               }
+
+               if (i >= ATH_AIC_MAX_COM_ATT_DB_TABLE)
+                       i = -1;
+       }
+
+       return i;
+}
+
+static void ar9003_aic_gain_table(struct ath_hw *ah)
+{
+       u32 aic_atten_word[19], i;
+
+       /* Config LNA gain difference */
+       REG_WRITE(ah, AR_PHY_BT_COEX_4, 0x2c200a00);
+       REG_WRITE(ah, AR_PHY_BT_COEX_5, 0x5c4e4438);
+
+       /* Program gain table */
+       aic_atten_word[0] = (0x1 & 0xf) << 14 | (0x1f & 0x1f) << 9 | (0x0 & 0xf) << 5 |
+               (0x1f & 0x1f); /* -01 dB: 4'd1, 5'd31,  00 dB: 4'd0, 5'd31 */
+       aic_atten_word[1] = (0x3 & 0xf) << 14 | (0x1f & 0x1f) << 9 | (0x2 & 0xf) << 5 |
+               (0x1f & 0x1f); /* -03 dB: 4'd3, 5'd31, -02 dB: 4'd2, 5'd31 */
+       aic_atten_word[2] = (0x5 & 0xf) << 14 | (0x1f & 0x1f) << 9 | (0x4 & 0xf) << 5 |
+               (0x1f & 0x1f); /* -05 dB: 4'd5, 5'd31, -04 dB: 4'd4, 5'd31 */
+       aic_atten_word[3] = (0x1 & 0xf) << 14 | (0x1e & 0x1f) << 9 | (0x0 & 0xf) << 5 |
+               (0x1e & 0x1f); /* -07 dB: 4'd1, 5'd30, -06 dB: 4'd0, 5'd30 */
+       aic_atten_word[4] = (0x3 & 0xf) << 14 | (0x1e & 0x1f) << 9 | (0x2 & 0xf) << 5 |
+               (0x1e & 0x1f); /* -09 dB: 4'd3, 5'd30, -08 dB: 4'd2, 5'd30 */
+       aic_atten_word[5] = (0x5 & 0xf) << 14 | (0x1e & 0x1f) << 9 | (0x4 & 0xf) << 5 |
+               (0x1e & 0x1f); /* -11 dB: 4'd5, 5'd30, -10 dB: 4'd4, 5'd30 */
+       aic_atten_word[6] = (0x1 & 0xf) << 14 | (0xf & 0x1f) << 9  | (0x0 & 0xf) << 5 |
+               (0xf & 0x1f);  /* -13 dB: 4'd1, 5'd15, -12 dB: 4'd0, 5'd15 */
+       aic_atten_word[7] = (0x3 & 0xf) << 14 | (0xf & 0x1f) << 9  | (0x2 & 0xf) << 5 |
+               (0xf & 0x1f);  /* -15 dB: 4'd3, 5'd15, -14 dB: 4'd2, 5'd15 */
+       aic_atten_word[8] = (0x5 & 0xf) << 14 | (0xf & 0x1f) << 9  | (0x4 & 0xf) << 5 |
+               (0xf & 0x1f);  /* -17 dB: 4'd5, 5'd15, -16 dB: 4'd4, 5'd15 */
+       aic_atten_word[9] = (0x1 & 0xf) << 14 | (0x7 & 0x1f) << 9  | (0x0 & 0xf) << 5 |
+               (0x7 & 0x1f);  /* -19 dB: 4'd1, 5'd07, -18 dB: 4'd0, 5'd07 */
+       aic_atten_word[10] = (0x3 & 0xf) << 14 | (0x7 & 0x1f) << 9  | (0x2 & 0xf) << 5 |
+               (0x7 & 0x1f);  /* -21 dB: 4'd3, 5'd07, -20 dB: 4'd2, 5'd07 */
+       aic_atten_word[11] = (0x5 & 0xf) << 14 | (0x7 & 0x1f) << 9  | (0x4 & 0xf) << 5 |
+               (0x7 & 0x1f);  /* -23 dB: 4'd5, 5'd07, -22 dB: 4'd4, 5'd07 */
+       aic_atten_word[12] = (0x7 & 0xf) << 14 | (0x7 & 0x1f) << 9  | (0x6 & 0xf) << 5 |
+               (0x7 & 0x1f);  /* -25 dB: 4'd7, 5'd07, -24 dB: 4'd6, 5'd07 */
+       aic_atten_word[13] = (0x3 & 0xf) << 14 | (0x3 & 0x1f) << 9  | (0x2 & 0xf) << 5 |
+               (0x3 & 0x1f);  /* -27 dB: 4'd3, 5'd03, -26 dB: 4'd2, 5'd03 */
+       aic_atten_word[14] = (0x5 & 0xf) << 14 | (0x3 & 0x1f) << 9  | (0x4 & 0xf) << 5 |
+               (0x3 & 0x1f);  /* -29 dB: 4'd5, 5'd03, -28 dB: 4'd4, 5'd03 */
+       aic_atten_word[15] = (0x1 & 0xf) << 14 | (0x1 & 0x1f) << 9  | (0x0 & 0xf) << 5 |
+               (0x1 & 0x1f);  /* -31 dB: 4'd1, 5'd01, -30 dB: 4'd0, 5'd01 */
+       aic_atten_word[16] = (0x3 & 0xf) << 14 | (0x1 & 0x1f) << 9  | (0x2 & 0xf) << 5 |
+               (0x1 & 0x1f);  /* -33 dB: 4'd3, 5'd01, -32 dB: 4'd2, 5'd01 */
+       aic_atten_word[17] = (0x5 & 0xf) << 14 | (0x1 & 0x1f) << 9  | (0x4 & 0xf) << 5 |
+               (0x1 & 0x1f);  /* -35 dB: 4'd5, 5'd01, -34 dB: 4'd4, 5'd01 */
+       aic_atten_word[18] = (0x7 & 0xf) << 14 | (0x1 & 0x1f) << 9  | (0x6 & 0xf) << 5 |
+               (0x1 & 0x1f);  /* -37 dB: 4'd7, 5'd01, -36 dB: 4'd6, 5'd01 */
+
+       /* Write to Gain table with auto increment enabled. */
+       REG_WRITE(ah, (AR_PHY_AIC_SRAM_ADDR_B0 + 0x3000),
+                 (ATH_AIC_SRAM_AUTO_INCREMENT |
+                  ATH_AIC_SRAM_GAIN_TABLE_OFFSET));
+
+       for (i = 0; i < 19; i++) {
+               REG_WRITE(ah, (AR_PHY_AIC_SRAM_DATA_B0 + 0x3000),
+                         aic_atten_word[i]);
+       }
+}
+
+static u8 ar9003_aic_cal_start(struct ath_hw *ah, u8 min_valid_count)
+{
+       struct ath9k_hw_aic *aic = &ah->btcoex_hw.aic;
+       int i;
+
+       /* Write to Gain table with auto increment enabled. */
+       REG_WRITE(ah, (AR_PHY_AIC_SRAM_ADDR_B0 + 0x3000),
+                 (ATH_AIC_SRAM_AUTO_INCREMENT |
+                  ATH_AIC_SRAM_CAL_OFFSET));
+
+       for (i = 0; i < ATH_AIC_MAX_BT_CHANNEL; i++) {
+               REG_WRITE(ah, (AR_PHY_AIC_SRAM_DATA_B0 + 0x3000), 0);
+               aic->aic_sram[i] = 0;
+       }
+
+       REG_WRITE(ah, AR_PHY_AIC_CTRL_0_B0,
+                 (SM(0, AR_PHY_AIC_MON_ENABLE) |
+                  SM(127, AR_PHY_AIC_CAL_MAX_HOP_COUNT) |
+                  SM(min_valid_count, AR_PHY_AIC_CAL_MIN_VALID_COUNT) |
+                  SM(37, AR_PHY_AIC_F_WLAN) |
+                  SM(1, AR_PHY_AIC_CAL_CH_VALID_RESET) |
+                  SM(0, AR_PHY_AIC_CAL_ENABLE) |
+                  SM(0x40, AR_PHY_AIC_BTTX_PWR_THR) |
+                  SM(0, AR_PHY_AIC_ENABLE)));
+
+       REG_WRITE(ah, AR_PHY_AIC_CTRL_0_B1,
+                 (SM(0, AR_PHY_AIC_MON_ENABLE) |
+                  SM(1, AR_PHY_AIC_CAL_CH_VALID_RESET) |
+                  SM(0, AR_PHY_AIC_CAL_ENABLE) |
+                  SM(0x40, AR_PHY_AIC_BTTX_PWR_THR) |
+                  SM(0, AR_PHY_AIC_ENABLE)));
+
+       REG_WRITE(ah, AR_PHY_AIC_CTRL_1_B0,
+                 (SM(8, AR_PHY_AIC_CAL_BT_REF_DELAY) |
+                  SM(0, AR_PHY_AIC_BT_IDLE_CFG) |
+                  SM(1, AR_PHY_AIC_STDBY_COND) |
+                  SM(37, AR_PHY_AIC_STDBY_ROT_ATT_DB) |
+                  SM(5, AR_PHY_AIC_STDBY_COM_ATT_DB) |
+                  SM(15, AR_PHY_AIC_RSSI_MAX) |
+                  SM(0, AR_PHY_AIC_RSSI_MIN)));
+
+       REG_WRITE(ah, AR_PHY_AIC_CTRL_1_B1,
+                 (SM(15, AR_PHY_AIC_RSSI_MAX) |
+                  SM(0, AR_PHY_AIC_RSSI_MIN)));
+
+       REG_WRITE(ah, AR_PHY_AIC_CTRL_2_B0,
+                 (SM(44, AR_PHY_AIC_RADIO_DELAY) |
+                  SM(8, AR_PHY_AIC_CAL_STEP_SIZE_CORR) |
+                  SM(12, AR_PHY_AIC_CAL_ROT_IDX_CORR) |
+                  SM(2, AR_PHY_AIC_CAL_CONV_CHECK_FACTOR) |
+                  SM(5, AR_PHY_AIC_ROT_IDX_COUNT_MAX) |
+                  SM(0, AR_PHY_AIC_CAL_SYNTH_TOGGLE) |
+                  SM(0, AR_PHY_AIC_CAL_SYNTH_AFTER_BTRX) |
+                  SM(200, AR_PHY_AIC_CAL_SYNTH_SETTLING)));
+
+       REG_WRITE(ah, AR_PHY_AIC_CTRL_3_B0,
+                 (SM(2, AR_PHY_AIC_MON_MAX_HOP_COUNT) |
+                  SM(1, AR_PHY_AIC_MON_MIN_STALE_COUNT) |
+                  SM(1, AR_PHY_AIC_MON_PWR_EST_LONG) |
+                  SM(2, AR_PHY_AIC_MON_PD_TALLY_SCALING) |
+                  SM(10, AR_PHY_AIC_MON_PERF_THR) |
+                  SM(2, AR_PHY_AIC_CAL_TARGET_MAG_SETTING) |
+                  SM(1, AR_PHY_AIC_CAL_PERF_CHECK_FACTOR) |
+                  SM(1, AR_PHY_AIC_CAL_PWR_EST_LONG)));
+
+       REG_WRITE(ah, AR_PHY_AIC_CTRL_4_B0,
+                 (SM(2, AR_PHY_AIC_CAL_ROT_ATT_DB_EST_ISO) |
+                  SM(3, AR_PHY_AIC_CAL_COM_ATT_DB_EST_ISO) |
+                  SM(0, AR_PHY_AIC_CAL_ISO_EST_INIT_SETTING) |
+                  SM(2, AR_PHY_AIC_CAL_COM_ATT_DB_BACKOFF) |
+                  SM(1, AR_PHY_AIC_CAL_COM_ATT_DB_FIXED)));
+
+       REG_WRITE(ah, AR_PHY_AIC_CTRL_4_B1,
+                 (SM(2, AR_PHY_AIC_CAL_ROT_ATT_DB_EST_ISO) |
+                  SM(3, AR_PHY_AIC_CAL_COM_ATT_DB_EST_ISO) |
+                  SM(0, AR_PHY_AIC_CAL_ISO_EST_INIT_SETTING) |
+                  SM(2, AR_PHY_AIC_CAL_COM_ATT_DB_BACKOFF) |
+                  SM(1, AR_PHY_AIC_CAL_COM_ATT_DB_FIXED)));
+
+       ar9003_aic_gain_table(ah);
+
+       /* Need to enable AIC reference signal in BT modem. */
+       REG_WRITE(ah, ATH_AIC_BT_JUPITER_CTRL,
+                 (REG_READ(ah, ATH_AIC_BT_JUPITER_CTRL) |
+                  ATH_AIC_BT_AIC_ENABLE));
+
+       aic->aic_cal_start_time = REG_READ(ah, AR_TSF_L32);
+
+       /* Start calibration */
+       REG_CLR_BIT(ah, AR_PHY_AIC_CTRL_0_B1, AR_PHY_AIC_CAL_ENABLE);
+       REG_SET_BIT(ah, AR_PHY_AIC_CTRL_0_B1, AR_PHY_AIC_CAL_CH_VALID_RESET);
+       REG_SET_BIT(ah, AR_PHY_AIC_CTRL_0_B1, AR_PHY_AIC_CAL_ENABLE);
+
+       aic->aic_caled_chan = 0;
+       aic->aic_cal_state = AIC_CAL_STATE_STARTED;
+
+       return aic->aic_cal_state;
+}
+
+static bool ar9003_aic_cal_post_process(struct ath_hw *ah)
+{
+       struct ath9k_hw_aic *aic = &ah->btcoex_hw.aic;
+       struct ath_aic_sram_info cal_sram[ATH_AIC_MAX_BT_CHANNEL];
+       struct ath_aic_out_info aic_sram[ATH_AIC_MAX_BT_CHANNEL];
+       u32 dir_path_gain_idx, quad_path_gain_idx, value;
+       u32 fixed_com_att_db;
+       int8_t dir_path_sign, quad_path_sign;
+       int16_t i;
+       bool ret = true;
+
+       memset(&cal_sram, 0, sizeof(cal_sram));
+       memset(&aic_sram, 0, sizeof(aic_sram));
+
+       for (i = 0; i < ATH_AIC_MAX_BT_CHANNEL; i++) {
+               value = aic->aic_sram[i];
+
+               cal_sram[i].valid =
+                       MS(value, AR_PHY_AIC_SRAM_VALID);
+               cal_sram[i].rot_quad_att_db =
+                       MS(value, AR_PHY_AIC_SRAM_ROT_QUAD_ATT_DB);
+               cal_sram[i].vga_quad_sign =
+                       MS(value, AR_PHY_AIC_SRAM_VGA_QUAD_SIGN);
+               cal_sram[i].rot_dir_att_db =
+                       MS(value, AR_PHY_AIC_SRAM_ROT_DIR_ATT_DB);
+               cal_sram[i].vga_dir_sign =
+                       MS(value, AR_PHY_AIC_SRAM_VGA_DIR_SIGN);
+               cal_sram[i].com_att_6db =
+                       MS(value, AR_PHY_AIC_SRAM_COM_ATT_6DB);
+
+               if (cal_sram[i].valid) {
+                       dir_path_gain_idx = cal_sram[i].rot_dir_att_db +
+                               com_att_db_table[cal_sram[i].com_att_6db];
+                       quad_path_gain_idx = cal_sram[i].rot_quad_att_db +
+                               com_att_db_table[cal_sram[i].com_att_6db];
+
+                       dir_path_sign = (cal_sram[i].vga_dir_sign) ? 1 : -1;
+                       quad_path_sign = (cal_sram[i].vga_quad_sign) ? 1 : -1;
+
+                       aic_sram[i].dir_path_gain_lin = dir_path_sign *
+                               aic_lin_table[dir_path_gain_idx];
+                       aic_sram[i].quad_path_gain_lin = quad_path_sign *
+                               aic_lin_table[quad_path_gain_idx];
+               }
+       }
+
+       for (i = 0; i < ATH_AIC_MAX_BT_CHANNEL; i++) {
+               int16_t start_idx, end_idx;
+
+               if (cal_sram[i].valid)
+                       continue;
+
+               start_idx = ar9003_aic_find_valid(cal_sram, 0, i);
+               end_idx = ar9003_aic_find_valid(cal_sram, 1, i);
+
+               if (start_idx < 0) {
+                       /* extrapolation */
+                       start_idx = end_idx;
+                       end_idx = ar9003_aic_find_valid(cal_sram, 1, start_idx);
+
+                       if (end_idx < 0) {
+                               ret = false;
+                               break;
+                       }
+
+                       aic_sram[i].dir_path_gain_lin =
+                               ((aic_sram[start_idx].dir_path_gain_lin -
+                                 aic_sram[end_idx].dir_path_gain_lin) *
+                                (start_idx - i) + ((end_idx - i) >> 1)) /
+                               (end_idx - i) +
+                               aic_sram[start_idx].dir_path_gain_lin;
+                       aic_sram[i].quad_path_gain_lin =
+                               ((aic_sram[start_idx].quad_path_gain_lin -
+                                 aic_sram[end_idx].quad_path_gain_lin) *
+                                (start_idx - i) + ((end_idx - i) >> 1)) /
+                               (end_idx - i) +
+                               aic_sram[start_idx].quad_path_gain_lin;
+               }
+
+               if (end_idx < 0) {
+                       /* extrapolation */
+                       end_idx = ar9003_aic_find_valid(cal_sram, 0, start_idx);
+
+                       if (end_idx < 0) {
+                               ret = false;
+                               break;
+                       }
+
+                       aic_sram[i].dir_path_gain_lin =
+                               ((aic_sram[start_idx].dir_path_gain_lin -
+                                 aic_sram[end_idx].dir_path_gain_lin) *
+                                (i - start_idx) + ((start_idx - end_idx) >> 1)) /
+                               (start_idx - end_idx) +
+                               aic_sram[start_idx].dir_path_gain_lin;
+                       aic_sram[i].quad_path_gain_lin =
+                               ((aic_sram[start_idx].quad_path_gain_lin -
+                                 aic_sram[end_idx].quad_path_gain_lin) *
+                                (i - start_idx) + ((start_idx - end_idx) >> 1)) /
+                               (start_idx - end_idx) +
+                               aic_sram[start_idx].quad_path_gain_lin;
+
+               } else if (start_idx >= 0){
+                       /* interpolation */
+                       aic_sram[i].dir_path_gain_lin =
+                               (((end_idx - i) * aic_sram[start_idx].dir_path_gain_lin) +
+                                ((i - start_idx) * aic_sram[end_idx].dir_path_gain_lin) +
+                                ((end_idx - start_idx) >> 1)) /
+                               (end_idx - start_idx);
+                       aic_sram[i].quad_path_gain_lin =
+                               (((end_idx - i) * aic_sram[start_idx].quad_path_gain_lin) +
+                                ((i - start_idx) * aic_sram[end_idx].quad_path_gain_lin) +
+                                ((end_idx - start_idx) >> 1))/
+                               (end_idx - start_idx);
+               }
+       }
+
+       /* From dir/quad_path_gain_lin to sram. */
+       i = ar9003_aic_find_valid(cal_sram, 1, 0);
+       if (i < 0) {
+               i = 0;
+               ret = false;
+       }
+       fixed_com_att_db = com_att_db_table[cal_sram[i].com_att_6db];
+
+       for (i = 0; i < ATH_AIC_MAX_BT_CHANNEL; i++) {
+               int16_t rot_dir_path_att_db, rot_quad_path_att_db;
+
+               aic_sram[i].sram.vga_dir_sign =
+                       (aic_sram[i].dir_path_gain_lin >= 0) ? 1 : 0;
+               aic_sram[i].sram.vga_quad_sign=
+                       (aic_sram[i].quad_path_gain_lin >= 0) ? 1 : 0;
+
+               rot_dir_path_att_db =
+                       ar9003_aic_find_index(0, abs(aic_sram[i].dir_path_gain_lin)) -
+                       fixed_com_att_db;
+               rot_quad_path_att_db =
+                       ar9003_aic_find_index(0, abs(aic_sram[i].quad_path_gain_lin)) -
+                       fixed_com_att_db;
+
+               aic_sram[i].sram.com_att_6db =
+                       ar9003_aic_find_index(1, fixed_com_att_db);
+
+               aic_sram[i].sram.valid = 1;
+
+               aic_sram[i].sram.rot_dir_att_db =
+                       min(max(rot_dir_path_att_db,
+                               (int16_t)ATH_AIC_MIN_ROT_DIR_ATT_DB),
+                           ATH_AIC_MAX_ROT_DIR_ATT_DB);
+               aic_sram[i].sram.rot_quad_att_db =
+                       min(max(rot_quad_path_att_db,
+                               (int16_t)ATH_AIC_MIN_ROT_QUAD_ATT_DB),
+                           ATH_AIC_MAX_ROT_QUAD_ATT_DB);
+       }
+
+       for (i = 0; i < ATH_AIC_MAX_BT_CHANNEL; i++) {
+               aic->aic_sram[i] = (SM(aic_sram[i].sram.vga_dir_sign,
+                                      AR_PHY_AIC_SRAM_VGA_DIR_SIGN) |
+                                   SM(aic_sram[i].sram.vga_quad_sign,
+                                      AR_PHY_AIC_SRAM_VGA_QUAD_SIGN) |
+                                   SM(aic_sram[i].sram.com_att_6db,
+                                      AR_PHY_AIC_SRAM_COM_ATT_6DB) |
+                                   SM(aic_sram[i].sram.valid,
+                                      AR_PHY_AIC_SRAM_VALID) |
+                                   SM(aic_sram[i].sram.rot_dir_att_db,
+                                      AR_PHY_AIC_SRAM_ROT_DIR_ATT_DB) |
+                                   SM(aic_sram[i].sram.rot_quad_att_db,
+                                      AR_PHY_AIC_SRAM_ROT_QUAD_ATT_DB));
+       }
+
+       return ret;
+}
+
+static void ar9003_aic_cal_done(struct ath_hw *ah)
+{
+       struct ath9k_hw_aic *aic = &ah->btcoex_hw.aic;
+
+       /* Disable AIC reference signal in BT modem. */
+       REG_WRITE(ah, ATH_AIC_BT_JUPITER_CTRL,
+                 (REG_READ(ah, ATH_AIC_BT_JUPITER_CTRL) &
+                  ~ATH_AIC_BT_AIC_ENABLE));
+
+       if (ar9003_aic_cal_post_process(ah))
+               aic->aic_cal_state = AIC_CAL_STATE_DONE;
+       else
+               aic->aic_cal_state = AIC_CAL_STATE_ERROR;
+}
+
+static u8 ar9003_aic_cal_continue(struct ath_hw *ah, bool cal_once)
+{
+       struct ath_common *common = ath9k_hw_common(ah);
+       struct ath9k_hw_mci *mci_hw = &ah->btcoex_hw.mci;
+       struct ath9k_hw_aic *aic = &ah->btcoex_hw.aic;
+       int i, num_chan;
+
+       num_chan = MS(mci_hw->config, ATH_MCI_CONFIG_AIC_CAL_NUM_CHAN);
+
+       if (!num_chan) {
+               aic->aic_cal_state = AIC_CAL_STATE_ERROR;
+               return aic->aic_cal_state;
+       }
+
+       if (cal_once) {
+               for (i = 0; i < 10000; i++) {
+                       if ((REG_READ(ah, AR_PHY_AIC_CTRL_0_B1) &
+                            AR_PHY_AIC_CAL_ENABLE) == 0)
+                               break;
+
+                       udelay(100);
+               }
+       }
+
+       /*
+        * Use AR_PHY_AIC_CAL_ENABLE bit instead of AR_PHY_AIC_CAL_DONE.
+        * Sometimes CAL_DONE bit is not asserted.
+        */
+       if ((REG_READ(ah, AR_PHY_AIC_CTRL_0_B1) &
+            AR_PHY_AIC_CAL_ENABLE) != 0) {
+               ath_dbg(common, MCI, "AIC cal is not done after 40ms");
+               goto exit;
+       }
+
+       REG_WRITE(ah, AR_PHY_AIC_SRAM_ADDR_B1,
+                 (ATH_AIC_SRAM_CAL_OFFSET | ATH_AIC_SRAM_AUTO_INCREMENT));
+
+       for (i = 0; i < ATH_AIC_MAX_BT_CHANNEL; i++) {
+               u32 value;
+
+               value = REG_READ(ah, AR_PHY_AIC_SRAM_DATA_B1);
+
+               if (value & 0x01) {
+                       if (aic->aic_sram[i] == 0)
+                               aic->aic_caled_chan++;
+
+                       aic->aic_sram[i] = value;
+
+                       if (!cal_once)
+                               break;
+               }
+       }
+
+       if ((aic->aic_caled_chan >= num_chan) || cal_once) {
+               ar9003_aic_cal_done(ah);
+       } else {
+               /* Start calibration */
+               REG_CLR_BIT(ah, AR_PHY_AIC_CTRL_0_B1, AR_PHY_AIC_CAL_ENABLE);
+               REG_SET_BIT(ah, AR_PHY_AIC_CTRL_0_B1,
+                           AR_PHY_AIC_CAL_CH_VALID_RESET);
+               REG_SET_BIT(ah, AR_PHY_AIC_CTRL_0_B1, AR_PHY_AIC_CAL_ENABLE);
+       }
+exit:
+       return aic->aic_cal_state;
+
+}
+
+u8 ar9003_aic_calibration(struct ath_hw *ah)
+{
+       struct ath9k_hw_aic *aic = &ah->btcoex_hw.aic;
+       u8 cal_ret = AIC_CAL_STATE_ERROR;
+
+       switch (aic->aic_cal_state) {
+       case AIC_CAL_STATE_IDLE:
+               cal_ret = ar9003_aic_cal_start(ah, 1);
+               break;
+       case AIC_CAL_STATE_STARTED:
+               cal_ret = ar9003_aic_cal_continue(ah, false);
+               break;
+       case AIC_CAL_STATE_DONE:
+               cal_ret = AIC_CAL_STATE_DONE;
+               break;
+       default:
+               break;
+       }
+
+       return cal_ret;
+}
+
+u8 ar9003_aic_start_normal(struct ath_hw *ah)
+{
+       struct ath9k_hw_aic *aic = &ah->btcoex_hw.aic;
+       int16_t i;
+
+       if (aic->aic_cal_state != AIC_CAL_STATE_DONE)
+               return 1;
+
+       ar9003_aic_gain_table(ah);
+
+       REG_WRITE(ah, AR_PHY_AIC_SRAM_ADDR_B1, ATH_AIC_SRAM_AUTO_INCREMENT);
+
+       for (i = 0; i < ATH_AIC_MAX_BT_CHANNEL; i++) {
+               REG_WRITE(ah, AR_PHY_AIC_SRAM_DATA_B1, aic->aic_sram[i]);
+       }
+
+       /* FIXME: Replace these with proper register names */
+       REG_WRITE(ah, 0xa6b0, 0x80);
+       REG_WRITE(ah, 0xa6b4, 0x5b2df0);
+       REG_WRITE(ah, 0xa6b8, 0x10762cc8);
+       REG_WRITE(ah, 0xa6bc, 0x1219a4b);
+       REG_WRITE(ah, 0xa6c0, 0x1e01);
+       REG_WRITE(ah, 0xb6b4, 0xf0);
+       REG_WRITE(ah, 0xb6c0, 0x1e01);
+       REG_WRITE(ah, 0xb6b0, 0x81);
+       REG_WRITE(ah, AR_PHY_65NM_CH1_RXTX4, 0x40000000);
+
+       aic->aic_enabled = true;
+
+       return 0;
+}
+
+u8 ar9003_aic_cal_reset(struct ath_hw *ah)
+{
+       struct ath9k_hw_aic *aic = &ah->btcoex_hw.aic;
+
+       aic->aic_cal_state = AIC_CAL_STATE_IDLE;
+       return aic->aic_cal_state;
+}
+
+u8 ar9003_aic_calibration_single(struct ath_hw *ah)
+{
+       struct ath9k_hw_mci *mci_hw = &ah->btcoex_hw.mci;
+       u8 cal_ret;
+       int num_chan;
+
+       num_chan = MS(mci_hw->config, ATH_MCI_CONFIG_AIC_CAL_NUM_CHAN);
+
+       (void) ar9003_aic_cal_start(ah, num_chan);
+       cal_ret = ar9003_aic_cal_continue(ah, true);
+
+       return cal_ret;
+}
+
+void ar9003_hw_attach_aic_ops(struct ath_hw *ah)
+{
+       struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah);
+
+       priv_ops->is_aic_enabled = ar9003_hw_is_aic_enabled;
+}
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_aic.h b/drivers/net/wireless/ath/ath9k/ar9003_aic.h
new file mode 100644 (file)
index 0000000..86f4064
--- /dev/null
@@ -0,0 +1,61 @@
+/*
+ * Copyright (c) 2015 Qualcomm Atheros Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef AR9003_AIC_H
+#define AR9003_AIC_H
+
+#define ATH_AIC_MAX_COM_ATT_DB_TABLE    6
+#define ATH_AIC_MAX_AIC_LIN_TABLE       69
+#define ATH_AIC_MIN_ROT_DIR_ATT_DB      0
+#define ATH_AIC_MIN_ROT_QUAD_ATT_DB     0
+#define ATH_AIC_MAX_ROT_DIR_ATT_DB      37
+#define ATH_AIC_MAX_ROT_QUAD_ATT_DB     37
+#define ATH_AIC_SRAM_AUTO_INCREMENT     0x80000000
+#define ATH_AIC_SRAM_GAIN_TABLE_OFFSET  0x280
+#define ATH_AIC_SRAM_CAL_OFFSET         0x140
+#define ATH_AIC_SRAM_OFFSET             0x00
+#define ATH_AIC_MEAS_MAG_THRESH         20
+#define ATH_AIC_BT_JUPITER_CTRL         0x66820
+#define ATH_AIC_BT_AIC_ENABLE           0x02
+
+enum aic_cal_state {
+       AIC_CAL_STATE_IDLE = 0,
+       AIC_CAL_STATE_STARTED,
+       AIC_CAL_STATE_DONE,
+       AIC_CAL_STATE_ERROR
+};
+
+struct ath_aic_sram_info {
+       bool valid:1;
+       bool vga_quad_sign:1;
+       bool vga_dir_sign:1;
+       u8 rot_quad_att_db;
+       u8 rot_dir_att_db;
+       u8 com_att_6db;
+};
+
+struct ath_aic_out_info {
+       int16_t dir_path_gain_lin;
+       int16_t quad_path_gain_lin;
+       struct ath_aic_sram_info sram;
+};
+
+u8 ar9003_aic_calibration(struct ath_hw *ah);
+u8 ar9003_aic_start_normal(struct ath_hw *ah);
+u8 ar9003_aic_cal_reset(struct ath_hw *ah);
+u8 ar9003_aic_calibration_single(struct ath_hw *ah);
+
+#endif /* AR9003_AIC_H */
index 4335ccbe7d7e078537216301a4fdb2120d3ee7fb..79fd3b2dcbdef9fc117b99177cb44fe46a581ae4 100644 (file)
@@ -195,16 +195,16 @@ static void ar9003_hw_init_mode_regs(struct ath_hw *ah)
                INIT_INI_ARRAY(&ah->iniCckfirJapan2484,
                               ar9485_1_1_baseband_core_txfir_coeff_japan_2484);
 
-               if (ah->config.no_pll_pwrsave) {
+               if (ah->config.pll_pwrsave & AR_PCIE_PLL_PWRSAVE_CONTROL) {
                        INIT_INI_ARRAY(&ah->iniPcieSerdes,
-                                      ar9485_1_1_pcie_phy_clkreq_disable_L1);
+                                      ar9485_1_1_pll_on_cdr_on_clkreq_disable_L1);
                        INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower,
-                                      ar9485_1_1_pcie_phy_clkreq_disable_L1);
+                                      ar9485_1_1_pll_on_cdr_on_clkreq_disable_L1);
                } else {
                        INIT_INI_ARRAY(&ah->iniPcieSerdes,
-                                      ar9485_1_1_pll_on_cdr_on_clkreq_disable_L1);
+                                      ar9485_1_1_pcie_phy_clkreq_disable_L1);
                        INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower,
-                                      ar9485_1_1_pll_on_cdr_on_clkreq_disable_L1);
+                                      ar9485_1_1_pcie_phy_clkreq_disable_L1);
                }
        } else if (AR_SREV_9462_21(ah)) {
                INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE],
@@ -231,10 +231,20 @@ static void ar9003_hw_init_mode_regs(struct ath_hw *ah)
                               ar9462_2p1_modes_fast_clock);
                INIT_INI_ARRAY(&ah->iniCckfirJapan2484,
                               ar9462_2p1_baseband_core_txfir_coeff_japan_2484);
-               INIT_INI_ARRAY(&ah->iniPcieSerdes,
-                              ar9462_2p1_pciephy_clkreq_disable_L1);
-               INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower,
-                              ar9462_2p1_pciephy_clkreq_disable_L1);
+
+               /* Awake -> Sleep Setting */
+               if ((ah->config.pll_pwrsave & AR_PCIE_PLL_PWRSAVE_CONTROL) &&
+                   (ah->config.pll_pwrsave & AR_PCIE_PLL_PWRSAVE_ON_D3)) {
+                       INIT_INI_ARRAY(&ah->iniPcieSerdes,
+                                      ar9462_2p1_pciephy_clkreq_disable_L1);
+               }
+
+               /* Sleep -> Awake Setting */
+               if ((ah->config.pll_pwrsave & AR_PCIE_PLL_PWRSAVE_CONTROL) &&
+                   (ah->config.pll_pwrsave & AR_PCIE_PLL_PWRSAVE_ON_D0)) {
+                       INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower,
+                                      ar9462_2p1_pciephy_clkreq_disable_L1);
+               }
        } else if (AR_SREV_9462_20(ah)) {
 
                INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE], ar9462_2p0_mac_core);
@@ -262,11 +272,18 @@ static void ar9003_hw_init_mode_regs(struct ath_hw *ah)
                                ar9462_2p0_common_rx_gain);
 
                /* Awake -> Sleep Setting */
-               INIT_INI_ARRAY(&ah->iniPcieSerdes,
-                              ar9462_2p0_pciephy_clkreq_disable_L1);
+               if ((ah->config.pll_pwrsave & AR_PCIE_PLL_PWRSAVE_CONTROL) &&
+                   (ah->config.pll_pwrsave & AR_PCIE_PLL_PWRSAVE_ON_D3)) {
+                       INIT_INI_ARRAY(&ah->iniPcieSerdes,
+                                      ar9462_2p0_pciephy_clkreq_disable_L1);
+               }
+
                /* Sleep -> Awake Setting */
-               INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower,
-                              ar9462_2p0_pciephy_clkreq_disable_L1);
+               if ((ah->config.pll_pwrsave & AR_PCIE_PLL_PWRSAVE_CONTROL) &&
+                   (ah->config.pll_pwrsave & AR_PCIE_PLL_PWRSAVE_ON_D0)) {
+                       INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower,
+                                      ar9462_2p0_pciephy_clkreq_disable_L1);
+               }
 
                /* Fast clock modal settings */
                INIT_INI_ARRAY(&ah->iniModesFastClock,
@@ -456,10 +473,19 @@ static void ar9003_hw_init_mode_regs(struct ath_hw *ah)
                INIT_INI_ARRAY(&ah->iniModesTxGain,
                               ar9565_1p1_Modes_lowest_ob_db_tx_gain_table);
 
-               INIT_INI_ARRAY(&ah->iniPcieSerdes,
-                              ar9565_1p1_pciephy_clkreq_disable_L1);
-               INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower,
-                              ar9565_1p1_pciephy_clkreq_disable_L1);
+               /* Awake -> Sleep Setting */
+               if ((ah->config.pll_pwrsave & AR_PCIE_PLL_PWRSAVE_CONTROL) &&
+                   (ah->config.pll_pwrsave & AR_PCIE_PLL_PWRSAVE_ON_D3)) {
+                       INIT_INI_ARRAY(&ah->iniPcieSerdes,
+                                      ar9565_1p1_pciephy_clkreq_disable_L1);
+               }
+
+               /* Sleep -> Awake Setting */
+               if ((ah->config.pll_pwrsave & AR_PCIE_PLL_PWRSAVE_CONTROL) &&
+                   (ah->config.pll_pwrsave & AR_PCIE_PLL_PWRSAVE_ON_D0)) {
+                       INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower,
+                                      ar9565_1p1_pciephy_clkreq_disable_L1);
+               }
 
                INIT_INI_ARRAY(&ah->iniModesFastClock,
                                ar9565_1p1_modes_fast_clock);
@@ -491,10 +517,19 @@ static void ar9003_hw_init_mode_regs(struct ath_hw *ah)
                INIT_INI_ARRAY(&ah->iniModesTxGain,
                               ar9565_1p0_Modes_lowest_ob_db_tx_gain_table);
 
-               INIT_INI_ARRAY(&ah->iniPcieSerdes,
-                              ar9565_1p0_pciephy_clkreq_disable_L1);
-               INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower,
-                              ar9565_1p0_pciephy_clkreq_disable_L1);
+               /* Awake -> Sleep Setting */
+               if ((ah->config.pll_pwrsave & AR_PCIE_PLL_PWRSAVE_CONTROL) &&
+                   (ah->config.pll_pwrsave & AR_PCIE_PLL_PWRSAVE_ON_D3)) {
+                       INIT_INI_ARRAY(&ah->iniPcieSerdes,
+                                      ar9565_1p0_pciephy_clkreq_disable_L1);
+               }
+
+               /* Sleep -> Awake Setting */
+               if ((ah->config.pll_pwrsave & AR_PCIE_PLL_PWRSAVE_CONTROL) &&
+                   (ah->config.pll_pwrsave & AR_PCIE_PLL_PWRSAVE_ON_D0)) {
+                       INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower,
+                                      ar9565_1p0_pciephy_clkreq_disable_L1);
+               }
 
                INIT_INI_ARRAY(&ah->iniModesFastClock,
                                ar9565_1p0_modes_fast_clock);
@@ -1130,6 +1165,12 @@ void ar9003_hw_attach_ops(struct ath_hw *ah)
        struct ath_hw_ops *ops = ath9k_hw_ops(ah);
 
        ar9003_hw_init_mode_regs(ah);
+
+       if (AR_SREV_9003_PCOEM(ah)) {
+               WARN_ON(!ah->iniPcieSerdes.ia_array);
+               WARN_ON(!ah->iniPcieSerdesLowPower.ia_array);
+       }
+
        priv_ops->init_mode_gain_regs = ar9003_hw_init_mode_gain_regs;
        priv_ops->init_hang_checks = ar9003_hw_init_hang_checks;
        priv_ops->detect_mac_hang = ar9003_hw_detect_mac_hang;
@@ -1139,4 +1180,5 @@ void ar9003_hw_attach_ops(struct ath_hw *ah)
        ar9003_hw_attach_phy_ops(ah);
        ar9003_hw_attach_calib_ops(ah);
        ar9003_hw_attach_mac_ops(ah);
+       ar9003_hw_attach_aic_ops(ah);
 }
index 7b94a6c7db3d50dd4feb53dd74e1973bd68f2e6d..af5ee416a560dab726805c58850cddfe65291e03 100644 (file)
@@ -19,6 +19,7 @@
 #include "hw-ops.h"
 #include "ar9003_phy.h"
 #include "ar9003_mci.h"
+#include "ar9003_aic.h"
 
 static void ar9003_mci_reset_req_wakeup(struct ath_hw *ah)
 {
@@ -284,12 +285,12 @@ static void ar9003_mci_prep_interface(struct ath_hw *ah)
                  AR_MCI_INTERRUPT_RX_MSG_CONT_RST);
        REG_WRITE(ah, AR_MCI_INTERRUPT_RAW, AR_MCI_INTERRUPT_BT_PRI);
 
-       if (mci->is_2g) {
+       if (mci->is_2g && MCI_ANT_ARCH_PA_LNA_SHARED(mci)) {
                ar9003_mci_send_lna_transfer(ah, true);
                udelay(5);
        }
 
-       if ((mci->is_2g && !mci->update_2g5g)) {
+       if (mci->is_2g && !mci->update_2g5g && MCI_ANT_ARCH_PA_LNA_SHARED(mci)) {
                if (ar9003_mci_wait_for_interrupt(ah,
                                        AR_MCI_INTERRUPT_RX_MSG_RAW,
                                        AR_MCI_INTERRUPT_RX_MSG_LNA_INFO,
@@ -593,7 +594,7 @@ static u32 ar9003_mci_wait_for_gpm(struct ath_hw *ah, u8 gpm_type,
                if (!time_out)
                        break;
 
-               offset = ar9003_mci_get_next_gpm_offset(ah, false, &more_data);
+               offset = ar9003_mci_get_next_gpm_offset(ah, &more_data);
 
                if (offset == MCI_GPM_INVALID)
                        continue;
@@ -657,7 +658,7 @@ static u32 ar9003_mci_wait_for_gpm(struct ath_hw *ah, u8 gpm_type,
                time_out = 0;
 
        while (more_data == MCI_GPM_MORE) {
-               offset = ar9003_mci_get_next_gpm_offset(ah, false, &more_data);
+               offset = ar9003_mci_get_next_gpm_offset(ah, &more_data);
                if (offset == MCI_GPM_INVALID)
                        break;
 
@@ -771,8 +772,14 @@ exit:
 
 static void ar9003_mci_mute_bt(struct ath_hw *ah)
 {
+       struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+
        /* disable all MCI messages */
        REG_WRITE(ah, AR_MCI_MSG_ATTRIBUTES_TABLE, 0xffff0000);
+       REG_WRITE(ah, AR_BTCOEX_WL_WEIGHTS0, 0xffffffff);
+       REG_WRITE(ah, AR_BTCOEX_WL_WEIGHTS1, 0xffffffff);
+       REG_WRITE(ah, AR_BTCOEX_WL_WEIGHTS2, 0xffffffff);
+       REG_WRITE(ah, AR_BTCOEX_WL_WEIGHTS3, 0xffffffff);
        REG_SET_BIT(ah, AR_MCI_TX_CTRL, AR_MCI_TX_CTRL_DISABLE_LNA_UPDATE);
 
        /* wait pending HW messages to flush out */
@@ -783,9 +790,10 @@ static void ar9003_mci_mute_bt(struct ath_hw *ah)
         * 1. reset not after resuming from full sleep
         * 2. before reset MCI RX, to quiet BT and avoid MCI RX misalignment
         */
-       ar9003_mci_send_lna_take(ah, true);
-
-       udelay(5);
+       if (MCI_ANT_ARCH_PA_LNA_SHARED(mci)) {
+               ar9003_mci_send_lna_take(ah, true);
+               udelay(5);
+       }
 
        ar9003_mci_send_sys_sleeping(ah, true);
 }
@@ -821,6 +829,80 @@ static void ar9003_mci_osla_setup(struct ath_hw *ah, bool enable)
                      AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN, 1);
 }
 
+static void ar9003_mci_stat_setup(struct ath_hw *ah)
+{
+       struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+
+       if (!AR_SREV_9565(ah))
+               return;
+
+       if (mci->config & ATH_MCI_CONFIG_MCI_STAT_DBG) {
+               REG_RMW_FIELD(ah, AR_MCI_DBG_CNT_CTRL,
+                             AR_MCI_DBG_CNT_CTRL_ENABLE, 1);
+               REG_RMW_FIELD(ah, AR_MCI_DBG_CNT_CTRL,
+                             AR_MCI_DBG_CNT_CTRL_BT_LINKID,
+                             MCI_STAT_ALL_BT_LINKID);
+       } else {
+               REG_RMW_FIELD(ah, AR_MCI_DBG_CNT_CTRL,
+                             AR_MCI_DBG_CNT_CTRL_ENABLE, 0);
+       }
+}
+
+static void ar9003_mci_set_btcoex_ctrl_9565_1ANT(struct ath_hw *ah)
+{
+       u32 regval;
+
+       regval = SM(1, AR_BTCOEX_CTRL_AR9462_MODE) |
+                SM(1, AR_BTCOEX_CTRL_WBTIMER_EN) |
+                SM(1, AR_BTCOEX_CTRL_PA_SHARED) |
+                SM(1, AR_BTCOEX_CTRL_LNA_SHARED) |
+                SM(1, AR_BTCOEX_CTRL_NUM_ANTENNAS) |
+                SM(1, AR_BTCOEX_CTRL_RX_CHAIN_MASK) |
+                SM(0, AR_BTCOEX_CTRL_1_CHAIN_ACK) |
+                SM(0, AR_BTCOEX_CTRL_1_CHAIN_BCN) |
+                SM(0, AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN);
+
+       REG_RMW_FIELD(ah, AR_BTCOEX_CTRL2,
+                     AR_BTCOEX_CTRL2_TX_CHAIN_MASK, 0x1);
+       REG_WRITE(ah, AR_BTCOEX_CTRL, regval);
+}
+
+static void ar9003_mci_set_btcoex_ctrl_9565_2ANT(struct ath_hw *ah)
+{
+       u32 regval;
+
+       regval = SM(1, AR_BTCOEX_CTRL_AR9462_MODE) |
+                SM(1, AR_BTCOEX_CTRL_WBTIMER_EN) |
+                SM(0, AR_BTCOEX_CTRL_PA_SHARED) |
+                SM(0, AR_BTCOEX_CTRL_LNA_SHARED) |
+                SM(2, AR_BTCOEX_CTRL_NUM_ANTENNAS) |
+                SM(1, AR_BTCOEX_CTRL_RX_CHAIN_MASK) |
+                SM(0, AR_BTCOEX_CTRL_1_CHAIN_ACK) |
+                SM(0, AR_BTCOEX_CTRL_1_CHAIN_BCN) |
+                SM(0, AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN);
+
+       REG_RMW_FIELD(ah, AR_BTCOEX_CTRL2,
+                     AR_BTCOEX_CTRL2_TX_CHAIN_MASK, 0x0);
+       REG_WRITE(ah, AR_BTCOEX_CTRL, regval);
+}
+
+static void ar9003_mci_set_btcoex_ctrl_9462(struct ath_hw *ah)
+{
+       u32 regval;
+
+        regval = SM(1, AR_BTCOEX_CTRL_AR9462_MODE) |
+                SM(1, AR_BTCOEX_CTRL_WBTIMER_EN) |
+                SM(1, AR_BTCOEX_CTRL_PA_SHARED) |
+                SM(1, AR_BTCOEX_CTRL_LNA_SHARED) |
+                SM(2, AR_BTCOEX_CTRL_NUM_ANTENNAS) |
+                SM(3, AR_BTCOEX_CTRL_RX_CHAIN_MASK) |
+                SM(0, AR_BTCOEX_CTRL_1_CHAIN_ACK) |
+                SM(0, AR_BTCOEX_CTRL_1_CHAIN_BCN) |
+                SM(0, AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN);
+
+       REG_WRITE(ah, AR_BTCOEX_CTRL, regval);
+}
+
 int ar9003_mci_reset(struct ath_hw *ah, bool en_int, bool is_2g,
                     bool is_full_sleep)
 {
@@ -831,11 +913,6 @@ int ar9003_mci_reset(struct ath_hw *ah, bool en_int, bool is_2g,
        ath_dbg(common, MCI, "MCI Reset (full_sleep = %d, is_2g = %d)\n",
                is_full_sleep, is_2g);
 
-       if (!mci->gpm_addr && !mci->sched_addr) {
-               ath_err(common, "MCI GPM and schedule buffers are not allocated\n");
-               return -ENOMEM;
-       }
-
        if (REG_READ(ah, AR_BTCOEX_CTRL) == 0xdeadbeef) {
                ath_err(common, "BTCOEX control register is dead\n");
                return -EINVAL;
@@ -850,26 +927,17 @@ int ar9003_mci_reset(struct ath_hw *ah, bool en_int, bool is_2g,
        * To avoid MCI state machine be affected by incoming remote MCI msgs,
        * MCI mode will be enabled later, right before reset the MCI TX and RX.
        */
-
-       regval = SM(1, AR_BTCOEX_CTRL_AR9462_MODE) |
-                SM(1, AR_BTCOEX_CTRL_WBTIMER_EN) |
-                SM(1, AR_BTCOEX_CTRL_PA_SHARED) |
-                SM(1, AR_BTCOEX_CTRL_LNA_SHARED) |
-                SM(0, AR_BTCOEX_CTRL_1_CHAIN_ACK) |
-                SM(0, AR_BTCOEX_CTRL_1_CHAIN_BCN) |
-                SM(0, AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN);
        if (AR_SREV_9565(ah)) {
-               regval |= SM(1, AR_BTCOEX_CTRL_NUM_ANTENNAS) |
-                         SM(1, AR_BTCOEX_CTRL_RX_CHAIN_MASK);
-               REG_RMW_FIELD(ah, AR_BTCOEX_CTRL2,
-                             AR_BTCOEX_CTRL2_TX_CHAIN_MASK, 0x1);
+               u8 ant = MS(mci->config, ATH_MCI_CONFIG_ANT_ARCH);
+
+               if (ant == ATH_MCI_ANT_ARCH_1_ANT_PA_LNA_SHARED)
+                       ar9003_mci_set_btcoex_ctrl_9565_1ANT(ah);
+               else
+                       ar9003_mci_set_btcoex_ctrl_9565_2ANT(ah);
        } else {
-               regval |= SM(2, AR_BTCOEX_CTRL_NUM_ANTENNAS) |
-                         SM(3, AR_BTCOEX_CTRL_RX_CHAIN_MASK);
+               ar9003_mci_set_btcoex_ctrl_9462(ah);
        }
 
-       REG_WRITE(ah, AR_BTCOEX_CTRL, regval);
-
        if (is_2g && !(mci->config & ATH_MCI_CONFIG_DISABLE_OSLA))
                ar9003_mci_osla_setup(ah, true);
        else
@@ -926,26 +994,32 @@ int ar9003_mci_reset(struct ath_hw *ah, bool en_int, bool is_2g,
        regval &= ~SM(1, AR_MCI_COMMAND2_RESET_RX);
        REG_WRITE(ah, AR_MCI_COMMAND2, regval);
 
-       ar9003_mci_get_next_gpm_offset(ah, true, NULL);
+       /* Init GPM offset after MCI Reset Rx */
+       ar9003_mci_state(ah, MCI_STATE_INIT_GPM_OFFSET);
 
        REG_WRITE(ah, AR_MCI_MSG_ATTRIBUTES_TABLE,
                  (SM(0xe801, AR_MCI_MSG_ATTRIBUTES_TABLE_INVALID_HDR) |
                   SM(0x0000, AR_MCI_MSG_ATTRIBUTES_TABLE_CHECKSUM)));
 
-       REG_CLR_BIT(ah, AR_MCI_TX_CTRL,
-                   AR_MCI_TX_CTRL_DISABLE_LNA_UPDATE);
+       if (MCI_ANT_ARCH_PA_LNA_SHARED(mci))
+               REG_CLR_BIT(ah, AR_MCI_TX_CTRL,
+                           AR_MCI_TX_CTRL_DISABLE_LNA_UPDATE);
+       else
+               REG_SET_BIT(ah, AR_MCI_TX_CTRL,
+                           AR_MCI_TX_CTRL_DISABLE_LNA_UPDATE);
 
        ar9003_mci_observation_set_up(ah);
 
        mci->ready = true;
        ar9003_mci_prep_interface(ah);
+       ar9003_mci_stat_setup(ah);
 
-       if (AR_SREV_9565(ah))
-               REG_RMW_FIELD(ah, AR_MCI_DBG_CNT_CTRL,
-                             AR_MCI_DBG_CNT_CTRL_ENABLE, 0);
        if (en_int)
                ar9003_mci_enable_interrupt(ah);
 
+       if (ath9k_hw_is_aic_enabled(ah))
+               ar9003_aic_start_normal(ah);
+
        return 0;
 }
 
@@ -1218,6 +1292,14 @@ u32 ar9003_mci_state(struct ath_hw *ah, u32 state_type)
                }
                value &= AR_BTCOEX_CTRL_MCI_MODE_EN;
                break;
+       case MCI_STATE_INIT_GPM_OFFSET:
+               value = MS(REG_READ(ah, AR_MCI_GPM_1), AR_MCI_GPM_WRITE_PTR);
+
+               if (value < mci->gpm_len)
+                       mci->gpm_idx = value;
+               else
+                       mci->gpm_idx = 0;
+               break;
        case MCI_STATE_LAST_SCHD_MSG_OFFSET:
                value = MS(REG_READ(ah, AR_MCI_RX_STATUS),
                                    AR_MCI_RX_LAST_SCHD_MSG_INDEX);
@@ -1284,6 +1366,22 @@ u32 ar9003_mci_state(struct ath_hw *ah, u32 state_type)
                value = (!mci->unhalt_bt_gpm && mci->need_flush_btinfo) ? 1 : 0;
                mci->need_flush_btinfo = false;
                break;
+       case MCI_STATE_AIC_CAL:
+               if (ath9k_hw_is_aic_enabled(ah))
+                       value = ar9003_aic_calibration(ah);
+               break;
+       case MCI_STATE_AIC_START:
+               if (ath9k_hw_is_aic_enabled(ah))
+                       ar9003_aic_start_normal(ah);
+               break;
+       case MCI_STATE_AIC_CAL_RESET:
+               if (ath9k_hw_is_aic_enabled(ah))
+                       value = ar9003_aic_cal_reset(ah);
+               break;
+       case MCI_STATE_AIC_CAL_SINGLE:
+               if (ath9k_hw_is_aic_enabled(ah))
+                       value = ar9003_aic_calibration_single(ah);
+               break;
        default:
                break;
        }
@@ -1364,21 +1462,11 @@ void ar9003_mci_check_gpm_offset(struct ath_hw *ah)
        mci->gpm_idx = 0;
 }
 
-u32 ar9003_mci_get_next_gpm_offset(struct ath_hw *ah, bool first, u32 *more)
+u32 ar9003_mci_get_next_gpm_offset(struct ath_hw *ah, u32 *more)
 {
        struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
        u32 offset, more_gpm = 0, gpm_ptr;
 
-       if (first) {
-               gpm_ptr = MS(REG_READ(ah, AR_MCI_GPM_1), AR_MCI_GPM_WRITE_PTR);
-
-               if (gpm_ptr >= mci->gpm_len)
-                       gpm_ptr = 0;
-
-               mci->gpm_idx = gpm_ptr;
-               return gpm_ptr;
-       }
-
        /*
         * This could be useful to avoid new GPM message interrupt which
         * may lead to spurious interrupt after power sleep, or multiple
index 66d7ab9f920dbccf15739e4511ae3089330acda4..e288611c12d50de2959c8a0178597c60097ae6b9 100644 (file)
@@ -92,14 +92,36 @@ enum mci_gpm_coex_bt_update_flags_op {
 #define ATH_MCI_CONFIG_CLK_DIV              0x00003000
 #define ATH_MCI_CONFIG_CLK_DIV_S            12
 #define ATH_MCI_CONFIG_DISABLE_TUNING       0x00004000
+#define ATH_MCI_CONFIG_DISABLE_AIC          0x00008000
+#define ATH_MCI_CONFIG_AIC_CAL_NUM_CHAN     0x007f0000
+#define ATH_MCI_CONFIG_AIC_CAL_NUM_CHAN_S   16
+#define ATH_MCI_CONFIG_NO_QUIET_ACK         0x00800000
+#define ATH_MCI_CONFIG_NO_QUIET_ACK_S       23
+#define ATH_MCI_CONFIG_ANT_ARCH             0x07000000
+#define ATH_MCI_CONFIG_ANT_ARCH_S           24
+#define ATH_MCI_CONFIG_FORCE_QUIET_ACK      0x08000000
+#define ATH_MCI_CONFIG_FORCE_QUIET_ACK_S    27
+#define ATH_MCI_CONFIG_FORCE_2CHAIN_ACK     0x10000000
+#define ATH_MCI_CONFIG_MCI_STAT_DBG         0x20000000
 #define ATH_MCI_CONFIG_MCI_WEIGHT_DBG       0x40000000
 #define ATH_MCI_CONFIG_DISABLE_MCI          0x80000000
 
 #define ATH_MCI_CONFIG_MCI_OBS_MASK     (ATH_MCI_CONFIG_MCI_OBS_MCI  | \
                                         ATH_MCI_CONFIG_MCI_OBS_TXRX | \
                                         ATH_MCI_CONFIG_MCI_OBS_BT)
+
 #define ATH_MCI_CONFIG_MCI_OBS_GPIO     0x0000002F
 
+#define ATH_MCI_ANT_ARCH_1_ANT_PA_LNA_NON_SHARED 0x00
+#define ATH_MCI_ANT_ARCH_1_ANT_PA_LNA_SHARED     0x01
+#define ATH_MCI_ANT_ARCH_2_ANT_PA_LNA_NON_SHARED 0x02
+#define ATH_MCI_ANT_ARCH_2_ANT_PA_LNA_SHARED     0x03
+#define ATH_MCI_ANT_ARCH_3_ANT                   0x04
+
+#define MCI_ANT_ARCH_PA_LNA_SHARED(mci)                                        \
+       ((MS(mci->config, ATH_MCI_CONFIG_ANT_ARCH) == ATH_MCI_ANT_ARCH_1_ANT_PA_LNA_SHARED) || \
+        (MS(mci->config, ATH_MCI_CONFIG_ANT_ARCH) == ATH_MCI_ANT_ARCH_2_ANT_PA_LNA_SHARED))
+
 enum mci_message_header {              /* length of payload */
        MCI_LNA_CTRL     = 0x10,        /* len = 0 */
        MCI_CONT_NACK    = 0x20,        /* len = 0 */
@@ -188,20 +210,55 @@ enum mci_bt_state {
        MCI_BT_CAL
 };
 
+enum mci_ps_state {
+       MCI_PS_DISABLE,
+       MCI_PS_ENABLE,
+       MCI_PS_ENABLE_OFF,
+       MCI_PS_ENABLE_ON
+};
+
 /* Type of state query */
 enum mci_state_type {
        MCI_STATE_ENABLE,
+       MCI_STATE_INIT_GPM_OFFSET,
+       MCI_STATE_CHECK_GPM_OFFSET,
+       MCI_STATE_NEXT_GPM_OFFSET,
+       MCI_STATE_LAST_GPM_OFFSET,
+       MCI_STATE_BT,
+       MCI_STATE_SET_BT_SLEEP,
        MCI_STATE_SET_BT_AWAKE,
+       MCI_STATE_SET_BT_CAL_START,
+       MCI_STATE_SET_BT_CAL,
        MCI_STATE_LAST_SCHD_MSG_OFFSET,
        MCI_STATE_REMOTE_SLEEP,
+       MCI_STATE_CONT_STATUS,
        MCI_STATE_RESET_REQ_WAKE,
        MCI_STATE_SEND_WLAN_COEX_VERSION,
+       MCI_STATE_SET_BT_COEX_VERSION,
+       MCI_STATE_SEND_WLAN_CHANNELS,
        MCI_STATE_SEND_VERSION_QUERY,
        MCI_STATE_SEND_STATUS_QUERY,
+       MCI_STATE_NEED_FLUSH_BT_INFO,
+       MCI_STATE_SET_CONCUR_TX_PRI,
        MCI_STATE_RECOVER_RX,
        MCI_STATE_NEED_FTP_STOMP,
+       MCI_STATE_NEED_TUNING,
+       MCI_STATE_NEED_STAT_DEBUG,
+       MCI_STATE_SHARED_CHAIN_CONCUR_TX,
+       MCI_STATE_AIC_CAL,
+       MCI_STATE_AIC_START,
+       MCI_STATE_AIC_CAL_RESET,
+       MCI_STATE_AIC_CAL_SINGLE,
+       MCI_STATE_IS_AR9462,
+       MCI_STATE_IS_AR9565_1ANT,
+       MCI_STATE_IS_AR9565_2ANT,
+       MCI_STATE_WLAN_WEAK_SIGNAL,
+       MCI_STATE_SET_WLAN_PS_STATE,
+       MCI_STATE_GET_WLAN_PS_STATE,
        MCI_STATE_DEBUG,
-       MCI_STATE_NEED_FLUSH_BT_INFO,
+       MCI_STATE_STAT_DEBUG,
+       MCI_STATE_ALLOW_FCS,
+       MCI_STATE_SET_2G_CONTENTION,
        MCI_STATE_MAX
 };
 
@@ -255,7 +312,7 @@ int ar9003_mci_setup(struct ath_hw *ah, u32 gpm_addr, void *gpm_buf,
 void ar9003_mci_cleanup(struct ath_hw *ah);
 void ar9003_mci_get_interrupt(struct ath_hw *ah, u32 *raw_intr,
                              u32 *rx_msg_intr);
-u32 ar9003_mci_get_next_gpm_offset(struct ath_hw *ah, bool first, u32 *more);
+u32 ar9003_mci_get_next_gpm_offset(struct ath_hw *ah, u32 *more);
 void ar9003_mci_set_bt_version(struct ath_hw *ah, u8 major, u8 minor);
 void ar9003_mci_send_wlan_channels(struct ath_hw *ah);
 /*
index c311b2bfdb004a68ebfef3f7ccba229b056216b3..fc595b92ac56007a024bc9cb5d871e19cef4ba9c 100644 (file)
 #define AR_PHY_BB_THERM_ADC_4_LATEST_VOLT_VALUE                0x0000ff00
 #define AR_PHY_BB_THERM_ADC_4_LATEST_VOLT_VALUE_S      8
 
-/* AIC Registers */
-#define AR_PHY_AIC_CTRL_0_B0   (AR_SM_BASE + 0x4b0)
-#define AR_PHY_AIC_CTRL_1_B0   (AR_SM_BASE + 0x4b4)
-#define AR_PHY_AIC_CTRL_2_B0   (AR_SM_BASE + 0x4b8)
-#define AR_PHY_AIC_CTRL_3_B0   (AR_SM_BASE + 0x4bc)
-#define AR_PHY_AIC_STAT_0_B0   (AR_SM_BASE + 0x4c4))
-#define AR_PHY_AIC_STAT_1_B0   (AR_SM_BASE + 0x4c8))
-#define AR_PHY_AIC_CTRL_4_B0   (AR_SM_BASE + 0x4c0)
-#define AR_PHY_AIC_STAT_2_B0   (AR_SM_BASE + 0x4cc)
-
 #define AR_PHY_65NM_CH0_TXRF3       0x16048
 #define AR_PHY_65NM_CH0_TXRF3_CAPDIV2G         0x0000001e
 #define AR_PHY_65NM_CH0_TXRF3_CAPDIV2G_S       1
 #define AR_PHY_TX_IQCAL_STATUS_B1   (AR_SM1_BASE + 0x48c)
 #define AR_PHY_TX_IQCAL_CORR_COEFF_B1(_i)    (AR_SM1_BASE + 0x450 + ((_i) << 2))
 
-/* SM 1 AIC Registers */
-
-#define AR_PHY_AIC_CTRL_0_B1   (AR_SM1_BASE + 0x4b0)
-#define AR_PHY_AIC_CTRL_1_B1   (AR_SM1_BASE + 0x4b4)
-#define AR_PHY_AIC_CTRL_2_B1   (AR_SM1_BASE + 0x4b8)
-#define AR_PHY_AIC_STAT_0_B1   (AR_SM1_BASE + (AR_SREV_9462_10(ah) ? \
-                                       0x4c0 : 0x4c4))
-#define AR_PHY_AIC_STAT_1_B1   (AR_SM1_BASE + (AR_SREV_9462_10(ah) ? \
-                                       0x4c4 : 0x4c8))
-#define AR_PHY_AIC_CTRL_4_B1   (AR_SM1_BASE + 0x4c0)
-#define AR_PHY_AIC_STAT_2_B1   (AR_SM1_BASE + 0x4cc)
-
-#define AR_PHY_AIC_SRAM_ADDR_B1        (AR_SM1_BASE + 0x5f0)
-#define AR_PHY_AIC_SRAM_DATA_B1        (AR_SM1_BASE + 0x5f4)
-
 #define AR_PHY_RTT_TABLE_SW_INTF_B(i)  (0x384 + ((i) ? \
                                        AR_SM1_BASE : AR_SM_BASE))
 #define AR_PHY_RTT_TABLE_SW_INTF_1_B(i)        (0x388 + ((i) ? \
index 934418872e8e156a641145a512280f3bee6f86bd..e4d11fa7fe8ce8d6d9a8191928cde773210dec2a 100644 (file)
@@ -106,7 +106,7 @@ void ar9003_hw_rtt_load_hist(struct ath_hw *ah)
        int chain, i;
 
        for (chain = 0; chain < AR9300_MAX_CHAINS; chain++) {
-               if (!(ah->rxchainmask & (1 << chain)))
+               if (!(ah->caps.rx_chainmask & (1 << chain)))
                        continue;
                for (i = 0; i < MAX_RTT_TABLE_ENTRY; i++) {
                        ar9003_hw_rtt_load_hist_entry(ah, chain, i,
@@ -171,7 +171,7 @@ void ar9003_hw_rtt_fill_hist(struct ath_hw *ah)
        int chain, i;
 
        for (chain = 0; chain < AR9300_MAX_CHAINS; chain++) {
-               if (!(ah->rxchainmask & (1 << chain)))
+               if (!(ah->caps.rx_chainmask & (1 << chain)))
                        continue;
                for (i = 0; i < MAX_RTT_TABLE_ENTRY; i++) {
                        ah->caldata->rtt_table[chain][i] =
@@ -193,7 +193,7 @@ void ar9003_hw_rtt_clear_hist(struct ath_hw *ah)
        int chain, i;
 
        for (chain = 0; chain < AR9300_MAX_CHAINS; chain++) {
-               if (!(ah->rxchainmask & (1 << chain)))
+               if (!(ah->caps.rx_chainmask & (1 << chain)))
                        continue;
                for (i = 0; i < MAX_RTT_TABLE_ENTRY; i++)
                        ar9003_hw_rtt_load_hist_entry(ah, chain, i, 0);
index 86bfc9604dcabec92e8ba4703f1bdc3637ad30f5..bea41df9fbd7407575f79349ac7d4b68ed72d955 100644 (file)
 #include "reg_wow.h"
 #include "hw-ops.h"
 
+static void ath9k_hw_set_sta_powersave(struct ath_hw *ah)
+{
+       if (!ath9k_hw_mci_is_enabled(ah))
+               goto set;
+       /*
+        * If MCI is being used, set PWR_SAV only when MCI's
+        * PS state is disabled.
+        */
+       if (ar9003_mci_state(ah, MCI_STATE_GET_WLAN_PS_STATE) != MCI_PS_DISABLE)
+               return;
+set:
+       REG_SET_BIT(ah, AR_STA_ID1, AR_STA_ID1_PWR_SAV);
+}
+
 static void ath9k_hw_set_powermode_wow_sleep(struct ath_hw *ah)
 {
        struct ath_common *common = ath9k_hw_common(ah);
 
-       REG_SET_BIT(ah, AR_STA_ID1, AR_STA_ID1_PWR_SAV);
+       ath9k_hw_set_sta_powersave(ah);
 
        /* set rx disable bit */
        REG_WRITE(ah, AR_CR, AR_CR_RXD);
@@ -44,6 +58,9 @@ static void ath9k_hw_set_powermode_wow_sleep(struct ath_hw *ah)
                        REG_CLR_BIT(ah, AR_DIRECT_CONNECT, AR_DC_TSF2_ENABLE);
        }
 
+       if (ath9k_hw_mci_is_enabled(ah))
+               REG_WRITE(ah, AR_RTC_KEEP_AWAKE, 0x2);
+
        REG_WRITE(ah, AR_RTC_FORCE_WAKE, AR_RTC_FORCE_WAKE_ON_INT);
 }
 
@@ -74,8 +91,6 @@ static void ath9k_wow_create_keep_alive_pattern(struct ath_hw *ah)
        for (i = 0; i < KAL_NUM_DESC_WORDS; i++)
                REG_WRITE(ah, (AR_WOW_KA_DESC_WORD2 + i * 4), ctl[i]);
 
-       REG_WRITE(ah, (AR_WOW_KA_DESC_WORD2 + i * 4), ctl[i]);
-
        data_word[0] = (KAL_FRAME_TYPE << 2) | (KAL_FRAME_SUB_TYPE << 4) |
                       (KAL_TO_DS << 8) | (KAL_DURATION_ID << 16);
        data_word[1] = (ap_mac_addr[3] << 24) | (ap_mac_addr[2] << 16) |
@@ -88,9 +103,11 @@ static void ath9k_wow_create_keep_alive_pattern(struct ath_hw *ah)
                       (ap_mac_addr[1] << 8) | (ap_mac_addr[0]);
        data_word[5] = (ap_mac_addr[5] << 8) | (ap_mac_addr[4]);
 
-       if (AR_SREV_9462_20(ah)) {
-               /* AR9462 2.0 has an extra descriptor word (time based
-                * discard) compared to other chips */
+       if (AR_SREV_9462_20_OR_LATER(ah) || AR_SREV_9565(ah)) {
+               /*
+                * AR9462 2.0 and AR9565 have an extra descriptor word
+                * (time based discard) compared to other chips.
+                */
                REG_WRITE(ah, (AR_WOW_KA_DESC_WORD2 + (12 * 4)), 0);
                wow_ka_data_word0 = AR_WOW_TXBUF(13);
        } else {
@@ -99,7 +116,6 @@ static void ath9k_wow_create_keep_alive_pattern(struct ath_hw *ah)
 
        for (i = 0; i < KAL_NUM_DATA_WORDS; i++)
                REG_WRITE(ah, (wow_ka_data_word0 + i*4), data_word[i]);
-
 }
 
 int ath9k_hw_wow_apply_pattern(struct ath_hw *ah, u8 *user_pattern,
@@ -170,18 +186,17 @@ u32 ath9k_hw_wow_wakeup(struct ath_hw *ah)
        u32 val = 0, rval;
 
        /*
-        * read the WoW status register to know
-        * the wakeup reason
+        * Read the WoW status register to know
+        * the wakeup reason.
         */
        rval = REG_READ(ah, AR_WOW_PATTERN);
        val = AR_WOW_STATUS(rval);
 
        /*
-        * mask only the WoW events that we have enabled. Sometimes
+        * Mask only the WoW events that we have enabled. Sometimes
         * we have spurious WoW events from the AR_WOW_PATTERN
         * register. This mask will clean it up.
         */
-
        val &= ah->wow.wow_event_mask;
 
        if (val) {
@@ -195,6 +210,15 @@ u32 ath9k_hw_wow_wakeup(struct ath_hw *ah)
                        wow_status |= AH_WOW_BEACON_MISS;
        }
 
+       rval = REG_READ(ah, AR_MAC_PCU_WOW4);
+       val = AR_WOW_STATUS2(rval);
+       val &= ah->wow.wow_event_mask2;
+
+       if (val) {
+               if (AR_WOW2_PATTERN_FOUND(val))
+                       wow_status |= AH_WOW_USER_PATTERN_EN;
+       }
+
        /*
         * set and clear WOW_PME_CLEAR registers for the chip to
         * generate next wow signal.
@@ -206,10 +230,12 @@ u32 ath9k_hw_wow_wakeup(struct ath_hw *ah)
                AR_PMCTRL_PWR_STATE_D1D3);
 
        /*
-        * clear all events
+        * Clear all events.
         */
        REG_WRITE(ah, AR_WOW_PATTERN,
                  AR_WOW_CLEAR_EVENTS(REG_READ(ah, AR_WOW_PATTERN)));
+       REG_WRITE(ah, AR_MAC_PCU_WOW4,
+                 AR_WOW_CLEAR_EVENTS2(REG_READ(ah, AR_MAC_PCU_WOW4)));
 
        /*
         * restore the beacon threshold to init value
@@ -226,7 +252,15 @@ u32 ath9k_hw_wow_wakeup(struct ath_hw *ah)
        if (ah->is_pciexpress)
                ath9k_hw_configpcipowersave(ah, false);
 
+       if (AR_SREV_9462(ah) || AR_SREV_9565(ah) || AR_SREV_9485(ah)) {
+               u32 dc = REG_READ(ah, AR_DIRECT_CONNECT);
+
+               if (!(dc & AR_DC_TSF2_ENABLE))
+                       ath9k_hw_gen_timer_start_tsf2(ah);
+       }
+
        ah->wow.wow_event_mask = 0;
+       ah->wow.wow_event_mask2 = 0;
 
        return wow_status;
 }
@@ -408,6 +442,9 @@ void ath9k_hw_wow_enable(struct ath_hw *ah, u32 pattern_enable)
 
        ath9k_hw_wow_set_arwr_reg(ah);
 
+       if (ath9k_hw_mci_is_enabled(ah))
+               REG_WRITE(ah, AR_RTC_KEEP_AWAKE, 0x2);
+
        /* HW WoW */
        REG_CLR_BIT(ah, AR_PCU_MISC_MODE3, BIT(5));
 
index 0f8e9464e4ab36963ef015ec19c0a4537d8a668f..a7a81b3969cec7e79b2cb73959c4d8ff1fb7489e 100644 (file)
@@ -184,12 +184,12 @@ struct ath_frame_info {
        struct ath_buf *bf;
        u16 framelen;
        s8 txq;
-       enum ath9k_key_type keytype;
        u8 keyix;
        u8 rtscts_rate;
        u8 retries : 7;
        u8 baw_tracked : 1;
        u8 tx_power;
+       enum ath9k_key_type keytype:2;
 };
 
 struct ath_rxbuf {
@@ -645,6 +645,7 @@ void ath9k_calculate_iter_data(struct ath_softc *sc,
                               struct ath9k_vif_iter_data *iter_data);
 void ath9k_calculate_summary_state(struct ath_softc *sc,
                                   struct ath_chanctx *ctx);
+void ath9k_set_txpower(struct ath_softc *sc, struct ieee80211_vif *vif);
 
 /*******************/
 /* Beacon Handling */
index 3dfc2c7f1f07862ce81238aa297b4a45cb944fd8..5a084d94ed90793f22d5964bee6114a63a26f605 100644 (file)
@@ -103,7 +103,9 @@ void ath9k_hw_btcoex_init_scheme(struct ath_hw *ah)
                return;
        }
 
-       if (AR_SREV_9300_20_OR_LATER(ah)) {
+       if (ah->caps.hw_caps & ATH9K_HW_CAP_MCI) {
+               btcoex_hw->scheme = ATH_BTCOEX_CFG_MCI;
+       } else if (AR_SREV_9300_20_OR_LATER(ah)) {
                btcoex_hw->scheme = ATH_BTCOEX_CFG_3WIRE;
                btcoex_hw->btactive_gpio = ATH_BTACTIVE_GPIO_9300;
                btcoex_hw->wlanactive_gpio = ATH_WLANACTIVE_GPIO_9300;
@@ -307,6 +309,18 @@ static void ath9k_hw_btcoex_enable_mci(struct ath_hw *ah)
        btcoex->enabled = true;
 }
 
+static void ath9k_hw_btcoex_disable_mci(struct ath_hw *ah)
+{
+       struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw;
+       int i;
+
+       ath9k_hw_btcoex_bt_stomp(ah, ATH_BTCOEX_STOMP_NONE);
+
+       for (i = 0; i < AR9300_NUM_BT_WEIGHTS; i++)
+               REG_WRITE(ah, AR_MCI_COEX_WL_WEIGHTS(i),
+                         btcoex_hw->wlan_weight[i]);
+}
+
 void ath9k_hw_btcoex_enable(struct ath_hw *ah)
 {
        struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw;
@@ -318,17 +332,18 @@ void ath9k_hw_btcoex_enable(struct ath_hw *ah)
                ath9k_hw_btcoex_enable_2wire(ah);
                break;
        case ATH_BTCOEX_CFG_3WIRE:
-               if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) {
-                       ath9k_hw_btcoex_enable_mci(ah);
-                       return;
-               }
                ath9k_hw_btcoex_enable_3wire(ah);
                break;
+       case ATH_BTCOEX_CFG_MCI:
+               ath9k_hw_btcoex_enable_mci(ah);
+               break;
        }
 
-       REG_RMW(ah, AR_GPIO_PDPU,
-               (0x2 << (btcoex_hw->btactive_gpio * 2)),
-               (0x3 << (btcoex_hw->btactive_gpio * 2)));
+       if (ath9k_hw_get_btcoex_scheme(ah) != ATH_BTCOEX_CFG_MCI) {
+               REG_RMW(ah, AR_GPIO_PDPU,
+                       (0x2 << (btcoex_hw->btactive_gpio * 2)),
+                       (0x3 << (btcoex_hw->btactive_gpio * 2)));
+       }
 
        ah->btcoex_hw.enabled = true;
 }
@@ -340,14 +355,14 @@ void ath9k_hw_btcoex_disable(struct ath_hw *ah)
        int i;
 
        btcoex_hw->enabled = false;
-       if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) {
-               ath9k_hw_btcoex_bt_stomp(ah, ATH_BTCOEX_STOMP_NONE);
-               for (i = 0; i < AR9300_NUM_BT_WEIGHTS; i++)
-                       REG_WRITE(ah, AR_MCI_COEX_WL_WEIGHTS(i),
-                                 btcoex_hw->wlan_weight[i]);
+
+       if (ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_MCI) {
+               ath9k_hw_btcoex_disable_mci(ah);
                return;
        }
-       ath9k_hw_set_gpio(ah, btcoex_hw->wlanactive_gpio, 0);
+
+       if (!AR_SREV_9300_20_OR_LATER(ah))
+               ath9k_hw_set_gpio(ah, btcoex_hw->wlanactive_gpio, 0);
 
        ath9k_hw_cfg_output(ah, btcoex_hw->wlanactive_gpio,
                        AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
index 6de26ea5d5fa11e190d89edd92dcec24058e1f75..cd2f0a2373cb92f7eeb47f0fa0ab21e5a78c3a9e 100644 (file)
@@ -44,6 +44,9 @@
 
 #define AR9300_NUM_BT_WEIGHTS   4
 #define AR9300_NUM_WLAN_WEIGHTS 4
+
+#define ATH_AIC_MAX_BT_CHANNEL  79
+
 /* Defines the BT AR_BT_COEX_WGHT used */
 enum ath_stomp_type {
        ATH_BTCOEX_STOMP_ALL,
@@ -58,6 +61,7 @@ enum ath_btcoex_scheme {
        ATH_BTCOEX_CFG_NONE,
        ATH_BTCOEX_CFG_2WIRE,
        ATH_BTCOEX_CFG_3WIRE,
+       ATH_BTCOEX_CFG_MCI,
 };
 
 struct ath9k_hw_mci {
@@ -92,9 +96,18 @@ struct ath9k_hw_mci {
        u32 last_recovery;
 };
 
+struct ath9k_hw_aic {
+       bool aic_enabled;
+       u8 aic_cal_state;
+       u8 aic_caled_chan;
+       u32 aic_sram[ATH_AIC_MAX_BT_CHANNEL];
+       u32 aic_cal_start_time;
+};
+
 struct ath_btcoex_hw {
        enum ath_btcoex_scheme scheme;
        struct ath9k_hw_mci mci;
+       struct ath9k_hw_aic aic;
        bool enabled;
        u8 wlanactive_gpio;
        u8 btactive_gpio;
index e200a6e3aca5f4e4ce98354814358724cb0444d3..3e2e24e4843fdbf94ef4d4484a4f05ef1f3bb21a 100644 (file)
@@ -238,7 +238,6 @@ int ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan)
 {
        struct ath9k_nfcal_hist *h = NULL;
        unsigned i, j;
-       int32_t val;
        u8 chainmask = (ah->rxchainmask << 3) | ah->rxchainmask;
        struct ath_common *common = ath9k_hw_common(ah);
        s16 default_nf = ath9k_hw_get_default_nf(ah, chan);
@@ -246,6 +245,7 @@ int ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan)
        if (ah->caldata)
                h = ah->caldata->nfCalHist;
 
+       ENABLE_REG_RMW_BUFFER(ah);
        for (i = 0; i < NUM_NF_READINGS; i++) {
                if (chainmask & (1 << i)) {
                        s16 nfval;
@@ -258,10 +258,8 @@ int ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan)
                        else
                                nfval = default_nf;
 
-                       val = REG_READ(ah, ah->nf_regs[i]);
-                       val &= 0xFFFFFE00;
-                       val |= (((u32) nfval << 1) & 0x1ff);
-                       REG_WRITE(ah, ah->nf_regs[i], val);
+                       REG_RMW(ah, ah->nf_regs[i],
+                               (((u32) nfval << 1) & 0x1ff), 0x1ff);
                }
        }
 
@@ -274,6 +272,7 @@ int ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan)
        REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL,
                    AR_PHY_AGC_CONTROL_NO_UPDATE_NF);
        REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_NF);
+       REG_RMW_BUFFER_FLUSH(ah);
 
        /*
         * Wait for load to complete, should be fast, a few 10s of us.
@@ -309,19 +308,17 @@ int ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan)
         * by the median we just loaded.  This will be initial (and max) value
         * of next noise floor calibration the baseband does.
         */
-       ENABLE_REGWRITE_BUFFER(ah);
+       ENABLE_REG_RMW_BUFFER(ah);
        for (i = 0; i < NUM_NF_READINGS; i++) {
                if (chainmask & (1 << i)) {
                        if ((i >= AR5416_MAX_CHAINS) && !IS_CHAN_HT40(chan))
                                continue;
 
-                       val = REG_READ(ah, ah->nf_regs[i]);
-                       val &= 0xFFFFFE00;
-                       val |= (((u32) (-50) << 1) & 0x1ff);
-                       REG_WRITE(ah, ah->nf_regs[i], val);
+                       REG_RMW(ah, ah->nf_regs[i],
+                                       (((u32) (-50) << 1) & 0x1ff), 0x1ff);
                }
        }
-       REGWRITE_BUFFER_FLUSH(ah);
+       REG_RMW_BUFFER_FLUSH(ah);
 
        return 0;
 }
index 50a2e0ac3b8b4c5a8f5653e6dd59b15318fb56df..dbf8f495964217e1b5799fb165155ff3c88b4894 100644 (file)
@@ -1156,7 +1156,10 @@ static ssize_t write_file_tpc(struct file *file, const char __user *user_buf,
 
        if (tpc_enabled != ah->tpc_enabled) {
                ah->tpc_enabled = tpc_enabled;
-               ath9k_hw_set_txpowerlimit(ah, sc->cur_chan->txpower, false);
+
+               mutex_lock(&sc->mutex);
+               ath9k_set_txpower(sc, NULL);
+               mutex_unlock(&sc->mutex);
        }
 
        return count;
index 726271c7c3306e8a9255fd645bcbe0d3c6d8fb3e..e98a9eaba7ff3f1b84a85945e63e901c8a216207 100644 (file)
@@ -126,8 +126,19 @@ ath9k_postprocess_radar_event(struct ath_softc *sc,
        DFS_STAT_INC(sc, pulses_detected);
        return true;
 }
-#undef PRI_CH_RADAR_FOUND
-#undef EXT_CH_RADAR_FOUND
+
+static void
+ath9k_dfs_process_radar_pulse(struct ath_softc *sc, struct pulse_event *pe)
+{
+       struct dfs_pattern_detector *pd = sc->dfs_detector;
+       DFS_STAT_INC(sc, pulses_processed);
+       if (pd == NULL)
+               return;
+       if (!pd->add_pulse(pd, pe))
+               return;
+       DFS_STAT_INC(sc, radar_detected);
+       ieee80211_radar_detected(sc->hw);
+}
 
 /*
  * DFS: check PHY-error for radar pulse and feed the detector
@@ -176,18 +187,21 @@ void ath9k_dfs_process_phyerr(struct ath_softc *sc, void *data,
        ard.pulse_length_pri = vdata_end[-3];
        pe.freq = ah->curchan->channel;
        pe.ts = mactime;
-       if (ath9k_postprocess_radar_event(sc, &ard, &pe)) {
-               struct dfs_pattern_detector *pd = sc->dfs_detector;
-               ath_dbg(common, DFS,
-                       "ath9k_dfs_process_phyerr: channel=%d, ts=%llu, "
-                       "width=%d, rssi=%d, delta_ts=%llu\n",
-                       pe.freq, pe.ts, pe.width, pe.rssi,
-                       pe.ts - sc->dfs_prev_pulse_ts);
-               sc->dfs_prev_pulse_ts = pe.ts;
-               DFS_STAT_INC(sc, pulses_processed);
-               if (pd != NULL && pd->add_pulse(pd, &pe)) {
-                       DFS_STAT_INC(sc, radar_detected);
-                       ieee80211_radar_detected(sc->hw);
-               }
+       if (!ath9k_postprocess_radar_event(sc, &ard, &pe))
+               return;
+
+       ath_dbg(common, DFS,
+               "ath9k_dfs_process_phyerr: type=%d, freq=%d, ts=%llu, "
+               "width=%d, rssi=%d, delta_ts=%llu\n",
+               ard.pulse_bw_info, pe.freq, pe.ts, pe.width, pe.rssi,
+               pe.ts - sc->dfs_prev_pulse_ts);
+       sc->dfs_prev_pulse_ts = pe.ts;
+       if (ard.pulse_bw_info & PRI_CH_RADAR_FOUND)
+               ath9k_dfs_process_radar_pulse(sc, &pe);
+       if (ard.pulse_bw_info & EXT_CH_RADAR_FOUND) {
+               pe.freq += IS_CHAN_HT40PLUS(ah->curchan) ? 20 : -20;
+               ath9k_dfs_process_radar_pulse(sc, &pe);
        }
 }
+#undef PRI_CH_RADAR_FOUND
+#undef EXT_CH_RADAR_FOUND
index 971d770722cf239bde42cdfedc5fe0fa6c52c7dc..cc81482c934d61e6a40c3b181435348040c93cd2 100644 (file)
@@ -27,12 +27,7 @@ void ath9k_hw_analog_shift_regwrite(struct ath_hw *ah, u32 reg, u32 val)
 void ath9k_hw_analog_shift_rmw(struct ath_hw *ah, u32 reg, u32 mask,
                               u32 shift, u32 val)
 {
-       u32 regVal;
-
-       regVal = REG_READ(ah, reg) & ~mask;
-       regVal |= (val << shift) & mask;
-
-       REG_WRITE(ah, reg, regVal);
+       REG_RMW(ah, reg, ((val << shift) & mask), mask);
 
        if (ah->config.analog_shiftreg)
                udelay(100);
index e5a78d4fd66e570765a0ac5fd020be9115894009..4773da6dc6f2d7d96b8e8a51e7ca252737cfe88b 100644 (file)
@@ -389,6 +389,7 @@ static void ath9k_hw_set_4k_power_cal_table(struct ath_hw *ah,
                }
        }
 
+       ENABLE_REG_RMW_BUFFER(ah);
        REG_RMW_FIELD(ah, AR_PHY_TPCRG1, AR_PHY_TPCRG1_NUM_PD_GAIN,
                      (numXpdGain - 1) & 0x3);
        REG_RMW_FIELD(ah, AR_PHY_TPCRG1, AR_PHY_TPCRG1_PD_GAIN_1,
@@ -396,6 +397,7 @@ static void ath9k_hw_set_4k_power_cal_table(struct ath_hw *ah,
        REG_RMW_FIELD(ah, AR_PHY_TPCRG1, AR_PHY_TPCRG1_PD_GAIN_2,
                      xpdGainValues[1]);
        REG_RMW_FIELD(ah, AR_PHY_TPCRG1, AR_PHY_TPCRG1_PD_GAIN_3, 0);
+       REG_RMW_BUFFER_FLUSH(ah);
 
        for (i = 0; i < AR5416_EEP4K_MAX_CHAINS; i++) {
                regChainOffset = i * 0x1000;
@@ -770,15 +772,14 @@ static void ath9k_hw_4k_set_gain(struct ath_hw *ah,
                                 struct ar5416_eeprom_4k *eep,
                                 u8 txRxAttenLocal)
 {
-       REG_WRITE(ah, AR_PHY_SWITCH_CHAIN_0,
-                 pModal->antCtrlChain[0]);
+       ENABLE_REG_RMW_BUFFER(ah);
+       REG_RMW(ah, AR_PHY_SWITCH_CHAIN_0,
+               pModal->antCtrlChain[0], 0);
 
-       REG_WRITE(ah, AR_PHY_TIMING_CTRL4(0),
-                 (REG_READ(ah, AR_PHY_TIMING_CTRL4(0)) &
-                  ~(AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF |
-                    AR_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF)) |
-                 SM(pModal->iqCalICh[0], AR_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF) |
-                 SM(pModal->iqCalQCh[0], AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF));
+       REG_RMW(ah, AR_PHY_TIMING_CTRL4(0),
+               SM(pModal->iqCalICh[0], AR_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF) |
+               SM(pModal->iqCalQCh[0], AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF),
+               AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF | AR_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF);
 
        if ((eep->baseEepHeader.version & AR5416_EEP_VER_MINOR_MASK) >=
            AR5416_EEP_MINOR_VER_3) {
@@ -817,6 +818,7 @@ static void ath9k_hw_4k_set_gain(struct ath_hw *ah,
                      AR9280_PHY_RXGAIN_TXRX_ATTEN, txRxAttenLocal);
        REG_RMW_FIELD(ah, AR_PHY_RXGAIN + 0x1000,
                      AR9280_PHY_RXGAIN_TXRX_MARGIN, pModal->rxTxMarginCh[0]);
+       REG_RMW_BUFFER_FLUSH(ah);
 }
 
 /*
@@ -928,6 +930,7 @@ static void ath9k_hw_4k_set_board_values(struct ath_hw *ah,
                }
        }
 
+       ENABLE_REG_RMW_BUFFER(ah);
        if (AR_SREV_9271(ah)) {
                ath9k_hw_analog_shift_rmw(ah,
                                          AR9285_AN_RF2G3,
@@ -1032,18 +1035,19 @@ static void ath9k_hw_4k_set_board_values(struct ath_hw *ah,
                                          AR9285_AN_RF2G4_DB2_4_S,
                                          db2[4]);
        }
+       REG_RMW_BUFFER_FLUSH(ah);
 
-
+       ENABLE_REG_RMW_BUFFER(ah);
        REG_RMW_FIELD(ah, AR_PHY_SETTLING, AR_PHY_SETTLING_SWITCH,
                      pModal->switchSettling);
        REG_RMW_FIELD(ah, AR_PHY_DESIRED_SZ, AR_PHY_DESIRED_SZ_ADC,
                      pModal->adcDesiredSize);
 
-       REG_WRITE(ah, AR_PHY_RF_CTL4,
-                 SM(pModal->txEndToXpaOff, AR_PHY_RF_CTL4_TX_END_XPAA_OFF) |
-                 SM(pModal->txEndToXpaOff, AR_PHY_RF_CTL4_TX_END_XPAB_OFF) |
-                 SM(pModal->txFrameToXpaOn, AR_PHY_RF_CTL4_FRAME_XPAA_ON)  |
-                 SM(pModal->txFrameToXpaOn, AR_PHY_RF_CTL4_FRAME_XPAB_ON));
+       REG_RMW(ah, AR_PHY_RF_CTL4,
+               SM(pModal->txEndToXpaOff, AR_PHY_RF_CTL4_TX_END_XPAA_OFF) |
+               SM(pModal->txEndToXpaOff, AR_PHY_RF_CTL4_TX_END_XPAB_OFF) |
+               SM(pModal->txFrameToXpaOn, AR_PHY_RF_CTL4_FRAME_XPAA_ON)  |
+               SM(pModal->txFrameToXpaOn, AR_PHY_RF_CTL4_FRAME_XPAB_ON), 0);
 
        REG_RMW_FIELD(ah, AR_PHY_RF_CTL3, AR_PHY_TX_END_TO_A2_RX_ON,
                      pModal->txEndToRxOn);
@@ -1072,6 +1076,8 @@ static void ath9k_hw_4k_set_board_values(struct ath_hw *ah,
                                      pModal->swSettleHt40);
        }
 
+       REG_RMW_BUFFER_FLUSH(ah);
+
        bb_desired_scale = (pModal->bb_scale_smrt_antenna &
                        EEP_4K_BB_DESIRED_SCALE_MASK);
        if ((pBase->txGainType == 0) && (bb_desired_scale != 0)) {
@@ -1080,6 +1086,7 @@ static void ath9k_hw_4k_set_board_values(struct ath_hw *ah,
                mask = BIT(0)|BIT(5)|BIT(10)|BIT(15)|BIT(20)|BIT(25);
                pwrctrl = mask * bb_desired_scale;
                clr = mask * 0x1f;
+               ENABLE_REG_RMW_BUFFER(ah);
                REG_RMW(ah, AR_PHY_TX_PWRCTRL8, pwrctrl, clr);
                REG_RMW(ah, AR_PHY_TX_PWRCTRL10, pwrctrl, clr);
                REG_RMW(ah, AR_PHY_CH0_TX_PWRCTRL12, pwrctrl, clr);
@@ -1094,6 +1101,7 @@ static void ath9k_hw_4k_set_board_values(struct ath_hw *ah,
                clr = mask * 0x1f;
                REG_RMW(ah, AR_PHY_CH0_TX_PWRCTRL11, pwrctrl, clr);
                REG_RMW(ah, AR_PHY_CH0_TX_PWRCTRL13, pwrctrl, clr);
+               REG_RMW_BUFFER_FLUSH(ah);
        }
 }
 
index 098059039351fb065cafcb252a5c91b1e071d43b..056f516bf017629e4be0ced579140acca5355868 100644 (file)
@@ -466,6 +466,7 @@ static void ath9k_hw_def_set_gain(struct ath_hw *ah,
                                  struct ar5416_eeprom_def *eep,
                                  u8 txRxAttenLocal, int regChainOffset, int i)
 {
+       ENABLE_REG_RMW_BUFFER(ah);
        if (AR5416_VER_MASK >= AR5416_EEP_MINOR_VER_3) {
                txRxAttenLocal = pModal->txRxAttenCh[i];
 
@@ -483,16 +484,12 @@ static void ath9k_hw_def_set_gain(struct ath_hw *ah,
                              AR_PHY_GAIN_2GHZ_XATTEN2_DB,
                              pModal->xatten2Db[i]);
                } else {
-                       REG_WRITE(ah, AR_PHY_GAIN_2GHZ + regChainOffset,
-                         (REG_READ(ah, AR_PHY_GAIN_2GHZ + regChainOffset) &
-                          ~AR_PHY_GAIN_2GHZ_BSW_MARGIN)
-                         | SM(pModal-> bswMargin[i],
-                              AR_PHY_GAIN_2GHZ_BSW_MARGIN));
-                       REG_WRITE(ah, AR_PHY_GAIN_2GHZ + regChainOffset,
-                         (REG_READ(ah, AR_PHY_GAIN_2GHZ + regChainOffset) &
-                          ~AR_PHY_GAIN_2GHZ_BSW_ATTEN)
-                         | SM(pModal->bswAtten[i],
-                              AR_PHY_GAIN_2GHZ_BSW_ATTEN));
+                       REG_RMW(ah, AR_PHY_GAIN_2GHZ + regChainOffset,
+                               SM(pModal-> bswMargin[i], AR_PHY_GAIN_2GHZ_BSW_MARGIN),
+                               AR_PHY_GAIN_2GHZ_BSW_MARGIN);
+                       REG_RMW(ah, AR_PHY_GAIN_2GHZ + regChainOffset,
+                               SM(pModal->bswAtten[i], AR_PHY_GAIN_2GHZ_BSW_ATTEN),
+                               AR_PHY_GAIN_2GHZ_BSW_ATTEN);
                }
        }
 
@@ -504,17 +501,14 @@ static void ath9k_hw_def_set_gain(struct ath_hw *ah,
                      AR_PHY_RXGAIN + regChainOffset,
                      AR9280_PHY_RXGAIN_TXRX_MARGIN, pModal->rxTxMarginCh[i]);
        } else {
-               REG_WRITE(ah,
-                         AR_PHY_RXGAIN + regChainOffset,
-                         (REG_READ(ah, AR_PHY_RXGAIN + regChainOffset) &
-                          ~AR_PHY_RXGAIN_TXRX_ATTEN)
-                         | SM(txRxAttenLocal, AR_PHY_RXGAIN_TXRX_ATTEN));
-               REG_WRITE(ah,
-                         AR_PHY_GAIN_2GHZ + regChainOffset,
-                         (REG_READ(ah, AR_PHY_GAIN_2GHZ + regChainOffset) &
-                          ~AR_PHY_GAIN_2GHZ_RXTX_MARGIN) |
-                         SM(pModal->rxTxMarginCh[i], AR_PHY_GAIN_2GHZ_RXTX_MARGIN));
+               REG_RMW(ah, AR_PHY_RXGAIN + regChainOffset,
+                       SM(txRxAttenLocal, AR_PHY_RXGAIN_TXRX_ATTEN),
+                       AR_PHY_RXGAIN_TXRX_ATTEN);
+               REG_RMW(ah, AR_PHY_GAIN_2GHZ + regChainOffset,
+                       SM(pModal->rxTxMarginCh[i], AR_PHY_GAIN_2GHZ_RXTX_MARGIN),
+                       AR_PHY_GAIN_2GHZ_RXTX_MARGIN);
        }
+       REG_RMW_BUFFER_FLUSH(ah);
 }
 
 static void ath9k_hw_def_set_board_values(struct ath_hw *ah,
index da344b27326c9f036e0ebef1ac9d48c5179a7134..86d46c196966f0e65b3c963011b124318f592f8b 100644 (file)
@@ -202,17 +202,16 @@ static void ath_btcoex_period_timer(unsigned long data)
        }
        spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
 
-       ath9k_mci_update_rssi(sc);
-
        ath9k_ps_wakeup(sc);
+       spin_lock_bh(&btcoex->btcoex_lock);
 
-       if (!(ah->caps.hw_caps & ATH9K_HW_CAP_MCI))
-               ath_detect_bt_priority(sc);
-
-       if (ah->caps.hw_caps & ATH9K_HW_CAP_MCI)
+       if (ah->caps.hw_caps & ATH9K_HW_CAP_MCI) {
+               ath9k_mci_update_rssi(sc);
                ath_mci_ftp_adjust(sc);
+       }
 
-       spin_lock_bh(&btcoex->btcoex_lock);
+       if (!(ah->caps.hw_caps & ATH9K_HW_CAP_MCI))
+               ath_detect_bt_priority(sc);
 
        stomp_type = btcoex->bt_stomp_type;
        timer_period = btcoex->btcoex_no_stomp;
@@ -252,9 +251,6 @@ static void ath_btcoex_no_stomp_timer(unsigned long arg)
        struct ath_softc *sc = (struct ath_softc *)arg;
        struct ath_hw *ah = sc->sc_ah;
        struct ath_btcoex *btcoex = &sc->btcoex;
-       struct ath_common *common = ath9k_hw_common(ah);
-
-       ath_dbg(common, BTCOEX, "no stomp timer running\n");
 
        ath9k_ps_wakeup(sc);
        spin_lock_bh(&btcoex->btcoex_lock);
@@ -271,7 +267,7 @@ static void ath_btcoex_no_stomp_timer(unsigned long arg)
        ath9k_ps_restore(sc);
 }
 
-static int ath_init_btcoex_timer(struct ath_softc *sc)
+static void ath_init_btcoex_timer(struct ath_softc *sc)
 {
        struct ath_btcoex *btcoex = &sc->btcoex;
 
@@ -280,6 +276,7 @@ static int ath_init_btcoex_timer(struct ath_softc *sc)
                btcoex->btcoex_period / 100;
        btcoex->btscan_no_stomp = (100 - ATH_BTCOEX_BTSCAN_DUTY_CYCLE) *
                                   btcoex->btcoex_period / 100;
+       btcoex->bt_stomp_type = ATH_BTCOEX_STOMP_LOW;
 
        setup_timer(&btcoex->period_timer, ath_btcoex_period_timer,
                        (unsigned long) sc);
@@ -287,8 +284,6 @@ static int ath_init_btcoex_timer(struct ath_softc *sc)
                        (unsigned long) sc);
 
        spin_lock_init(&btcoex->btcoex_lock);
-
-       return 0;
 }
 
 /*
@@ -299,6 +294,10 @@ void ath9k_btcoex_timer_resume(struct ath_softc *sc)
        struct ath_btcoex *btcoex = &sc->btcoex;
        struct ath_hw *ah = sc->sc_ah;
 
+       if (ath9k_hw_get_btcoex_scheme(ah) != ATH_BTCOEX_CFG_3WIRE &&
+           ath9k_hw_get_btcoex_scheme(ah) != ATH_BTCOEX_CFG_MCI)
+               return;
+
        ath_dbg(ath9k_hw_common(ah), BTCOEX, "Starting btcoex timers\n");
 
        /* make sure duty cycle timer is also stopped when resuming */
@@ -312,13 +311,19 @@ void ath9k_btcoex_timer_resume(struct ath_softc *sc)
        mod_timer(&btcoex->period_timer, jiffies);
 }
 
-
 /*
  * Pause btcoex timer and bt duty cycle timer
  */
 void ath9k_btcoex_timer_pause(struct ath_softc *sc)
 {
        struct ath_btcoex *btcoex = &sc->btcoex;
+       struct ath_hw *ah = sc->sc_ah;
+
+       if (ath9k_hw_get_btcoex_scheme(ah) != ATH_BTCOEX_CFG_3WIRE &&
+           ath9k_hw_get_btcoex_scheme(ah) != ATH_BTCOEX_CFG_MCI)
+               return;
+
+       ath_dbg(ath9k_hw_common(ah), BTCOEX, "Stopping btcoex timers\n");
 
        del_timer_sync(&btcoex->period_timer);
        del_timer_sync(&btcoex->no_stomp_timer);
@@ -356,33 +361,33 @@ void ath9k_start_btcoex(struct ath_softc *sc)
 {
        struct ath_hw *ah = sc->sc_ah;
 
-       if ((ath9k_hw_get_btcoex_scheme(ah) != ATH_BTCOEX_CFG_NONE) &&
-           !ah->btcoex_hw.enabled) {
-               if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_MCI))
-                       ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT,
-                                                  AR_STOMP_LOW_WLAN_WGHT, 0);
-               else
-                       ath9k_hw_btcoex_set_weight(ah, 0, 0,
-                                                  ATH_BTCOEX_STOMP_NONE);
-               ath9k_hw_btcoex_enable(ah);
+       if (ah->btcoex_hw.enabled ||
+           ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_NONE)
+               return;
 
-               if (ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_3WIRE)
-                       ath9k_btcoex_timer_resume(sc);
-       }
+       if (!(ah->caps.hw_caps & ATH9K_HW_CAP_MCI))
+               ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT,
+                                          AR_STOMP_LOW_WLAN_WGHT, 0);
+       else
+               ath9k_hw_btcoex_set_weight(ah, 0, 0,
+                                          ATH_BTCOEX_STOMP_NONE);
+       ath9k_hw_btcoex_enable(ah);
+       ath9k_btcoex_timer_resume(sc);
 }
 
 void ath9k_stop_btcoex(struct ath_softc *sc)
 {
        struct ath_hw *ah = sc->sc_ah;
 
-       if (ah->btcoex_hw.enabled &&
-           ath9k_hw_get_btcoex_scheme(ah) != ATH_BTCOEX_CFG_NONE) {
-               if (ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_3WIRE)
-                       ath9k_btcoex_timer_pause(sc);
-               ath9k_hw_btcoex_disable(ah);
-               if (AR_SREV_9462(ah) || AR_SREV_9565(ah))
-                       ath_mci_flush_profile(&sc->btcoex.mci);
-       }
+       if (!ah->btcoex_hw.enabled ||
+           ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_NONE)
+               return;
+
+       ath9k_btcoex_timer_pause(sc);
+       ath9k_hw_btcoex_disable(ah);
+
+       if (ah->caps.hw_caps & ATH9K_HW_CAP_MCI)
+               ath_mci_flush_profile(&sc->btcoex.mci);
 }
 
 void ath9k_deinit_btcoex(struct ath_softc *sc)
@@ -409,22 +414,20 @@ int ath9k_init_btcoex(struct ath_softc *sc)
                break;
        case ATH_BTCOEX_CFG_3WIRE:
                ath9k_hw_btcoex_init_3wire(sc->sc_ah);
-               r = ath_init_btcoex_timer(sc);
-               if (r)
-                       return -1;
+               ath_init_btcoex_timer(sc);
                txq = sc->tx.txq_map[IEEE80211_AC_BE];
                ath9k_hw_init_btcoex_hw(sc->sc_ah, txq->axq_qnum);
-               sc->btcoex.bt_stomp_type = ATH_BTCOEX_STOMP_LOW;
-               if (ath9k_hw_mci_is_enabled(ah)) {
-                       sc->btcoex.duty_cycle = ATH_BTCOEX_DEF_DUTY_CYCLE;
-                       INIT_LIST_HEAD(&sc->btcoex.mci.info);
+               break;
+       case ATH_BTCOEX_CFG_MCI:
+               ath_init_btcoex_timer(sc);
 
-                       r = ath_mci_setup(sc);
-                       if (r)
-                               return r;
+               sc->btcoex.duty_cycle = ATH_BTCOEX_DEF_DUTY_CYCLE;
+               INIT_LIST_HEAD(&sc->btcoex.mci.info);
+               ath9k_hw_btcoex_init_mci(ah);
 
-                       ath9k_hw_btcoex_init_mci(ah);
-               }
+               r = ath_mci_setup(sc);
+               if (r)
+                       return r;
 
                break;
        default:
index 8e7153b186ede94c4409fb422da8e7d4c58e9b0c..10c02f5cbc5eb8ec45abbbbab4c11ad973aabbc1 100644 (file)
@@ -40,6 +40,7 @@ static struct usb_device_id ath9k_hif_usb_ids[] = {
        { USB_DEVICE(0x0cf3, 0xb003) }, /* Ubiquiti WifiStation Ext */
        { USB_DEVICE(0x0cf3, 0xb002) }, /* Ubiquiti WifiStation */
        { USB_DEVICE(0x057c, 0x8403) }, /* AVM FRITZ!WLAN 11N v2 USB */
+       { USB_DEVICE(0x0471, 0x209e) }, /* Philips (or NXP) PTA01 */
 
        { USB_DEVICE(0x0cf3, 0x7015),
          .driver_info = AR9287_USB },  /* Atheros */
index 300d3671d0ef820dc99cb5c202cd7945437a13b6..e82a0d4ce23f99247ea540be2a00dd4c26f9bd90 100644 (file)
@@ -444,6 +444,10 @@ static inline void ath9k_htc_stop_btcoex(struct ath9k_htc_priv *priv)
 #define OP_BT_SCAN                 BIT(4)
 #define OP_TSF_RESET               BIT(6)
 
+enum htc_op_flags {
+       HTC_FWFLAG_NO_RMW,
+};
+
 struct ath9k_htc_priv {
        struct device *dev;
        struct ieee80211_hw *hw;
@@ -482,6 +486,7 @@ struct ath9k_htc_priv {
        bool reconfig_beacon;
        unsigned int rxfilter;
        unsigned long op_flags;
+       unsigned long fw_flags;
 
        struct ath9k_hw_cal_data caldata;
        struct ath_spec_scan_priv spec_priv;
index fd229409f6762249061f1215a5c4e635de3ec9ae..d7beefe60683df8bd22b134e6a4418d9f19e5bbd 100644 (file)
@@ -376,17 +376,139 @@ static void ath9k_regwrite_flush(void *hw_priv)
        mutex_unlock(&priv->wmi->multi_write_mutex);
 }
 
-static u32 ath9k_reg_rmw(void *hw_priv, u32 reg_offset, u32 set, u32 clr)
+static void ath9k_reg_rmw_buffer(void *hw_priv,
+                                u32 reg_offset, u32 set, u32 clr)
+{
+       struct ath_hw *ah = (struct ath_hw *) hw_priv;
+       struct ath_common *common = ath9k_hw_common(ah);
+       struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) common->priv;
+       u32 rsp_status;
+       int r;
+
+       mutex_lock(&priv->wmi->multi_rmw_mutex);
+
+       /* Store the register/value */
+       priv->wmi->multi_rmw[priv->wmi->multi_rmw_idx].reg =
+               cpu_to_be32(reg_offset);
+       priv->wmi->multi_rmw[priv->wmi->multi_rmw_idx].set =
+               cpu_to_be32(set);
+       priv->wmi->multi_rmw[priv->wmi->multi_rmw_idx].clr =
+               cpu_to_be32(clr);
+
+       priv->wmi->multi_rmw_idx++;
+
+       /* If the buffer is full, send it out. */
+       if (priv->wmi->multi_rmw_idx == MAX_RMW_CMD_NUMBER) {
+               r = ath9k_wmi_cmd(priv->wmi, WMI_REG_RMW_CMDID,
+                         (u8 *) &priv->wmi->multi_rmw,
+                         sizeof(struct register_write) * priv->wmi->multi_rmw_idx,
+                         (u8 *) &rsp_status, sizeof(rsp_status),
+                         100);
+               if (unlikely(r)) {
+                       ath_dbg(common, WMI,
+                               "REGISTER RMW FAILED, multi len: %d\n",
+                               priv->wmi->multi_rmw_idx);
+               }
+               priv->wmi->multi_rmw_idx = 0;
+       }
+
+       mutex_unlock(&priv->wmi->multi_rmw_mutex);
+}
+
+static void ath9k_reg_rmw_flush(void *hw_priv)
 {
-       u32 val;
+       struct ath_hw *ah = (struct ath_hw *) hw_priv;
+       struct ath_common *common = ath9k_hw_common(ah);
+       struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) common->priv;
+       u32 rsp_status;
+       int r;
+
+       if (test_bit(HTC_FWFLAG_NO_RMW, &priv->fw_flags))
+               return;
+
+       atomic_dec(&priv->wmi->m_rmw_cnt);
 
-       val = ath9k_regread(hw_priv, reg_offset);
-       val &= ~clr;
-       val |= set;
-       ath9k_regwrite(hw_priv, val, reg_offset);
+       mutex_lock(&priv->wmi->multi_rmw_mutex);
+
+       if (priv->wmi->multi_rmw_idx) {
+               r = ath9k_wmi_cmd(priv->wmi, WMI_REG_RMW_CMDID,
+                         (u8 *) &priv->wmi->multi_rmw,
+                         sizeof(struct register_rmw) * priv->wmi->multi_rmw_idx,
+                         (u8 *) &rsp_status, sizeof(rsp_status),
+                         100);
+               if (unlikely(r)) {
+                       ath_dbg(common, WMI,
+                               "REGISTER RMW FAILED, multi len: %d\n",
+                               priv->wmi->multi_rmw_idx);
+               }
+               priv->wmi->multi_rmw_idx = 0;
+       }
+
+       mutex_unlock(&priv->wmi->multi_rmw_mutex);
+}
+
+static void ath9k_enable_rmw_buffer(void *hw_priv)
+{
+       struct ath_hw *ah = (struct ath_hw *) hw_priv;
+       struct ath_common *common = ath9k_hw_common(ah);
+       struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) common->priv;
+
+       if (test_bit(HTC_FWFLAG_NO_RMW, &priv->fw_flags))
+               return;
+
+       atomic_inc(&priv->wmi->m_rmw_cnt);
+}
+
+static u32 ath9k_reg_rmw_single(void *hw_priv,
+                                u32 reg_offset, u32 set, u32 clr)
+{
+       struct ath_hw *ah = (struct ath_hw *) hw_priv;
+       struct ath_common *common = ath9k_hw_common(ah);
+       struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) common->priv;
+       struct register_rmw buf, buf_ret;
+       int ret;
+       u32 val = 0;
+
+       buf.reg = cpu_to_be32(reg_offset);
+       buf.set = cpu_to_be32(set);
+       buf.clr = cpu_to_be32(clr);
+
+       ret = ath9k_wmi_cmd(priv->wmi, WMI_REG_RMW_CMDID,
+                         (u8 *) &buf, sizeof(buf),
+                         (u8 *) &buf_ret, sizeof(buf_ret),
+                         100);
+       if (unlikely(ret)) {
+               ath_dbg(common, WMI, "REGISTER RMW FAILED:(0x%04x, %d)\n",
+                       reg_offset, ret);
+       }
        return val;
 }
 
+static u32 ath9k_reg_rmw(void *hw_priv, u32 reg_offset, u32 set, u32 clr)
+{
+       struct ath_hw *ah = (struct ath_hw *) hw_priv;
+       struct ath_common *common = ath9k_hw_common(ah);
+       struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) common->priv;
+
+       if (test_bit(HTC_FWFLAG_NO_RMW, &priv->fw_flags)) {
+               u32 val;
+
+               val = REG_READ(ah, reg_offset);
+               val &= ~clr;
+               val |= set;
+               REG_WRITE(ah, reg_offset, val);
+
+               return 0;
+       }
+
+       if (atomic_read(&priv->wmi->m_rmw_cnt))
+               ath9k_reg_rmw_buffer(hw_priv, reg_offset, set, clr);
+       else
+               ath9k_reg_rmw_single(hw_priv, reg_offset, set, clr);
+
+       return 0;
+}
+
 static void ath_usb_read_cachesize(struct ath_common *common, int *csz)
 {
        *csz = L1_CACHE_BYTES >> 2;
@@ -501,6 +623,8 @@ static int ath9k_init_priv(struct ath9k_htc_priv *priv,
        ah->reg_ops.write = ath9k_regwrite;
        ah->reg_ops.enable_write_buffer = ath9k_enable_regwrite_buffer;
        ah->reg_ops.write_flush = ath9k_regwrite_flush;
+       ah->reg_ops.enable_rmw_buffer = ath9k_enable_rmw_buffer;
+       ah->reg_ops.rmw_flush = ath9k_reg_rmw_flush;
        ah->reg_ops.rmw = ath9k_reg_rmw;
        priv->ah = ah;
 
@@ -686,6 +810,12 @@ static int ath9k_init_firmware_version(struct ath9k_htc_priv *priv)
                return -EINVAL;
        }
 
+       if (priv->fw_version_major == 1 && priv->fw_version_minor < 4)
+               set_bit(HTC_FWFLAG_NO_RMW, &priv->fw_flags);
+
+       dev_info(priv->dev, "FW RMW support: %s\n",
+               test_bit(HTC_FWFLAG_NO_RMW, &priv->fw_flags) ? "Off" : "On");
+
        return 0;
 }
 
index 92d5a6c5a2253b6fbc54e2ad4cb79d15b7b0e45b..564923c0df87cdad5226da74a0d9e575cc66bfd4 100644 (file)
@@ -149,7 +149,7 @@ static void ath9k_htc_set_mac_bssid_mask(struct ath9k_htc_priv *priv,
         * when matching addresses.
         */
        iter_data.hw_macaddr = NULL;
-       memset(&iter_data.mask, 0xff, ETH_ALEN);
+       eth_broadcast_addr(iter_data.mask);
 
        if (vif)
                ath9k_htc_bssid_iter(&iter_data, vif->addr, vif);
index 88769b64b20b29d9f9cb71c6ffbca27cc3d314d4..232339b0554020f0875b8bc4bb9d255b66413e61 100644 (file)
@@ -108,6 +108,14 @@ static inline void ath9k_hw_set_bt_ant_diversity(struct ath_hw *ah, bool enable)
                ath9k_hw_ops(ah)->set_bt_ant_diversity(ah, enable);
 }
 
+static inline bool ath9k_hw_is_aic_enabled(struct ath_hw *ah)
+{
+       if (ath9k_hw_private_ops(ah)->is_aic_enabled)
+               return ath9k_hw_private_ops(ah)->is_aic_enabled(ah);
+
+       return false;
+}
+
 #endif
 
 /* Private hardware call ops */
index 8529014e1a5e1c1b4637abc1625fd177b407f602..5cdbdb0383710d024397af69abe35738c05820ea 100644 (file)
@@ -121,6 +121,36 @@ void ath9k_hw_write_array(struct ath_hw *ah, const struct ar5416IniArray *array,
        REGWRITE_BUFFER_FLUSH(ah);
 }
 
+void ath9k_hw_read_array(struct ath_hw *ah, u32 array[][2], int size)
+{
+       u32 *tmp_reg_list, *tmp_data;
+       int i;
+
+       tmp_reg_list = kmalloc(size * sizeof(u32), GFP_KERNEL);
+       if (!tmp_reg_list) {
+               dev_err(ah->dev, "%s: tmp_reg_list: alloc filed\n", __func__);
+               return;
+       }
+
+       tmp_data = kmalloc(size * sizeof(u32), GFP_KERNEL);
+       if (!tmp_data) {
+               dev_err(ah->dev, "%s tmp_data: alloc filed\n", __func__);
+               goto error_tmp_data;
+       }
+
+       for (i = 0; i < size; i++)
+               tmp_reg_list[i] = array[i][0];
+
+       REG_READ_MULTI(ah, tmp_reg_list, tmp_data, size);
+
+       for (i = 0; i < size; i++)
+               array[i][1] = tmp_data[i];
+
+       kfree(tmp_data);
+error_tmp_data:
+       kfree(tmp_reg_list);
+}
+
 u32 ath9k_hw_reverse_bits(u32 val, u32 n)
 {
        u32 retval;
@@ -366,6 +396,9 @@ static void ath9k_hw_init_config(struct ath_hw *ah)
                ah->config.rimt_first = 700;
        }
 
+       if (AR_SREV_9462(ah) || AR_SREV_9565(ah))
+               ah->config.pll_pwrsave = 7;
+
        /*
         * We need this for PCI devices only (Cardbus, PCI, miniPCI)
         * _and_ if on non-uniprocessor systems (Multiprocessor/HT).
@@ -1197,6 +1230,7 @@ static void ath9k_hw_set_operating_mode(struct ath_hw *ah, int opmode)
        u32 mask = AR_STA_ID1_STA_AP | AR_STA_ID1_ADHOC;
        u32 set = AR_STA_ID1_KSRCH_MODE;
 
+       ENABLE_REG_RMW_BUFFER(ah);
        switch (opmode) {
        case NL80211_IFTYPE_ADHOC:
                if (!AR_SREV_9340_13(ah)) {
@@ -1218,6 +1252,7 @@ static void ath9k_hw_set_operating_mode(struct ath_hw *ah, int opmode)
                break;
        }
        REG_RMW(ah, AR_STA_ID1, set, mask);
+       REG_RMW_BUFFER_FLUSH(ah);
 }
 
 void ath9k_hw_get_delta_slope_vals(struct ath_hw *ah, u32 coef_scaled,
@@ -1930,6 +1965,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
        if (!ath9k_hw_mci_is_enabled(ah))
                REG_WRITE(ah, AR_OBS, 8);
 
+       ENABLE_REG_RMW_BUFFER(ah);
        if (ah->config.rx_intr_mitigation) {
                REG_RMW_FIELD(ah, AR_RIMT, AR_RIMT_LAST, ah->config.rimt_last);
                REG_RMW_FIELD(ah, AR_RIMT, AR_RIMT_FIRST, ah->config.rimt_first);
@@ -1939,6 +1975,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
                REG_RMW_FIELD(ah, AR_TIMT, AR_TIMT_LAST, 300);
                REG_RMW_FIELD(ah, AR_TIMT, AR_TIMT_FIRST, 750);
        }
+       REG_RMW_BUFFER_FLUSH(ah);
 
        ath9k_hw_init_bb(ah, chan);
 
index e82e570de330386c31e76f010c3b6a5e65b79b37..92fab1a54697e6ed77cea915c205aeae293363b6 100644 (file)
@@ -27,6 +27,7 @@
 #include "eeprom.h"
 #include "calib.h"
 #include "reg.h"
+#include "reg_mci.h"
 #include "phy.h"
 #include "btcoex.h"
 #include "dynack.h"
                        (_ah)->reg_ops.write_flush((_ah));      \
        } while (0)
 
+#define ENABLE_REG_RMW_BUFFER(_ah)                                     \
+       do {                                                            \
+               if ((_ah)->reg_ops.enable_rmw_buffer)   \
+                       (_ah)->reg_ops.enable_rmw_buffer((_ah)); \
+       } while (0)
+
+#define REG_RMW_BUFFER_FLUSH(_ah)                                      \
+       do {                                                            \
+               if ((_ah)->reg_ops.rmw_flush)           \
+                       (_ah)->reg_ops.rmw_flush((_ah));        \
+       } while (0)
+
 #define PR_EEP(_s, _val)                                               \
        do {                                                            \
                len += scnprintf(buf + len, size - len, "%20s : %10d\n",\
 
 #define REG_WRITE_ARRAY(iniarray, column, regWr) \
        ath9k_hw_write_array(ah, iniarray, column, &(regWr))
+#define REG_READ_ARRAY(ah, array, size) \
+       ath9k_hw_read_array(ah, array, size)
 
 #define AR_GPIO_OUTPUT_MUX_AS_OUTPUT             0
 #define AR_GPIO_OUTPUT_MUX_AS_PCIE_ATTENTION_LED 1
@@ -308,6 +323,12 @@ enum ath9k_hw_hang_checks {
        HW_MAC_HANG               = BIT(5),
 };
 
+#define AR_PCIE_PLL_PWRSAVE_CONTROL BIT(0)
+#define AR_PCIE_PLL_PWRSAVE_ON_D3   BIT(1)
+#define AR_PCIE_PLL_PWRSAVE_ON_D0   BIT(2)
+#define AR_PCIE_CDR_PWRSAVE_ON_D3   BIT(3)
+#define AR_PCIE_CDR_PWRSAVE_ON_D0   BIT(4)
+
 struct ath9k_ops_config {
        int dma_beacon_response_time;
        int sw_beacon_response_time;
@@ -334,7 +355,7 @@ struct ath9k_ops_config {
        u32 ant_ctrl_comm2g_switch_enable;
        bool xatten_margin_cfg;
        bool alt_mingainidx;
-       bool no_pll_pwrsave;
+       u8 pll_pwrsave;
        bool tx_gain_buffalo;
        bool led_active_high;
 };
@@ -646,6 +667,10 @@ struct ath_hw_private_ops {
 
        /* ANI */
        void (*ani_cache_ini_regs)(struct ath_hw *ah);
+
+#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
+       bool (*is_aic_enabled)(struct ath_hw *ah);
+#endif /* CONFIG_ATH9K_BTCOEX_SUPPORT */
 };
 
 /**
@@ -1007,6 +1032,7 @@ void ath9k_hw_synth_delay(struct ath_hw *ah, struct ath9k_channel *chan,
 bool ath9k_hw_wait(struct ath_hw *ah, u32 reg, u32 mask, u32 val, u32 timeout);
 void ath9k_hw_write_array(struct ath_hw *ah, const struct ar5416IniArray *array,
                          int column, unsigned int *writecnt);
+void ath9k_hw_read_array(struct ath_hw *ah, u32 array[][2], int size);
 u32 ath9k_hw_reverse_bits(u32 val, u32 n);
 u16 ath9k_hw_computetxtime(struct ath_hw *ah,
                           u8 phy, int kbps,
@@ -1116,6 +1142,7 @@ void ath9k_hw_set_cts_timeout(struct ath_hw *ah, u32 us);
 void ath9k_hw_setslottime(struct ath_hw *ah, u32 us);
 
 #ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
+void ar9003_hw_attach_aic_ops(struct ath_hw *ah);
 static inline bool ath9k_hw_btcoex_is_enabled(struct ath_hw *ah)
 {
        return ah->btcoex_hw.enabled;
@@ -1133,6 +1160,9 @@ ath9k_hw_get_btcoex_scheme(struct ath_hw *ah)
        return ah->btcoex_hw.scheme;
 }
 #else
+static inline void ar9003_hw_attach_aic_ops(struct ath_hw *ah)
+{
+}
 static inline bool ath9k_hw_btcoex_is_enabled(struct ath_hw *ah)
 {
        return false;
index 6c6e88495394e1897edb4db3768bd6de8aee24e7..f8d11efa7b0f1fd7fef0da1fce74ea4ada23f0ee 100644 (file)
@@ -141,6 +141,16 @@ static unsigned int ath9k_ioread32(void *hw_priv, u32 reg_offset)
        return val;
 }
 
+static void ath9k_multi_ioread32(void *hw_priv, u32 *addr,
+                                u32 *val, u16 count)
+{
+       int i;
+
+       for (i = 0; i < count; i++)
+               val[i] = ath9k_ioread32(hw_priv, addr[i]);
+}
+
+
 static unsigned int __ath9k_reg_rmw(struct ath_softc *sc, u32 reg_offset,
                                    u32 set, u32 clr)
 {
@@ -437,8 +447,15 @@ static void ath9k_init_pcoem_platform(struct ath_softc *sc)
                ath_info(common, "Enable WAR for ASPM D3/L1\n");
        }
 
+       /*
+        * The default value of pll_pwrsave is 1.
+        * For certain AR9485 cards, it is set to 0.
+        * For AR9462, AR9565 it's set to 7.
+        */
+       ah->config.pll_pwrsave = 1;
+
        if (sc->driver_data & ATH9K_PCI_NO_PLL_PWRSAVE) {
-               ah->config.no_pll_pwrsave = true;
+               ah->config.pll_pwrsave = 0;
                ath_info(common, "Disable PLL PowerSave\n");
        }
 
@@ -530,6 +547,7 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc,
        ah->hw = sc->hw;
        ah->hw_version.devid = devid;
        ah->reg_ops.read = ath9k_ioread32;
+       ah->reg_ops.multi_read = ath9k_multi_ioread32;
        ah->reg_ops.write = ath9k_iowrite32;
        ah->reg_ops.rmw = ath9k_reg_rmw;
        pCap = &ah->caps;
@@ -763,7 +781,8 @@ static const struct ieee80211_iface_combination if_comb[] = {
                .num_different_channels = 1,
                .beacon_int_infra_match = true,
                .radar_detect_widths =  BIT(NL80211_CHAN_WIDTH_20_NOHT) |
-                                       BIT(NL80211_CHAN_WIDTH_20),
+                                       BIT(NL80211_CHAN_WIDTH_20) |
+                                       BIT(NL80211_CHAN_WIDTH_40),
        }
 #endif
 };
index 9ede991b8d767cfd2268a9137dcaa57d171af174..b0badef71ce793e5bc85358e0166208edbb9688b 100644 (file)
@@ -994,7 +994,7 @@ void ath9k_calculate_iter_data(struct ath_softc *sc,
         * BSSID mask when matching addresses.
         */
        memset(iter_data, 0, sizeof(*iter_data));
-       memset(&iter_data->mask, 0xff, ETH_ALEN);
+       eth_broadcast_addr(iter_data->mask);
        iter_data->slottime = ATH9K_SLOT_TIME_9;
 
        list_for_each_entry(avp, &ctx->vifs, list)
@@ -1139,7 +1139,7 @@ void ath9k_calculate_summary_state(struct ath_softc *sc,
                        ctx->primary_sta = iter_data.primary_sta;
                } else {
                        ctx->primary_sta = NULL;
-                       memset(common->curbssid, 0, ETH_ALEN);
+                       eth_zero_addr(common->curbssid);
                        common->curaid = 0;
                        ath9k_hw_write_associd(sc->sc_ah);
                        if (ath9k_hw_mci_is_enabled(sc->sc_ah))
@@ -1172,6 +1172,38 @@ void ath9k_calculate_summary_state(struct ath_softc *sc,
        ath9k_ps_restore(sc);
 }
 
+static void ath9k_tpc_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
+{
+       int *power = (int *)data;
+
+       if (*power < vif->bss_conf.txpower)
+               *power = vif->bss_conf.txpower;
+}
+
+/* Called with sc->mutex held. */
+void ath9k_set_txpower(struct ath_softc *sc, struct ieee80211_vif *vif)
+{
+       int power;
+       struct ath_hw *ah = sc->sc_ah;
+       struct ath_regulatory *reg = ath9k_hw_regulatory(ah);
+
+       ath9k_ps_wakeup(sc);
+       if (ah->tpc_enabled) {
+               power = (vif) ? vif->bss_conf.txpower : -1;
+               ieee80211_iterate_active_interfaces_atomic(
+                               sc->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
+                               ath9k_tpc_vif_iter, &power);
+               if (power == -1)
+                       power = sc->hw->conf.power_level;
+       } else {
+               power = sc->hw->conf.power_level;
+       }
+       sc->cur_chan->txpower = 2 * power;
+       ath9k_hw_set_txpowerlimit(ah, sc->cur_chan->txpower, false);
+       sc->cur_chan->cur_txpower = reg->max_power_level;
+       ath9k_ps_restore(sc);
+}
+
 static void ath9k_assign_hw_queues(struct ieee80211_hw *hw,
                                   struct ieee80211_vif *vif)
 {
@@ -1225,6 +1257,8 @@ static int ath9k_add_interface(struct ieee80211_hw *hw,
 
        ath9k_assign_hw_queues(hw, vif);
 
+       ath9k_set_txpower(sc, vif);
+
        an->sc = sc;
        an->sta = NULL;
        an->vif = vif;
@@ -1265,6 +1299,8 @@ static int ath9k_change_interface(struct ieee80211_hw *hw,
        ath9k_assign_hw_queues(hw, vif);
        ath9k_calculate_summary_state(sc, avp->chanctx);
 
+       ath9k_set_txpower(sc, vif);
+
        mutex_unlock(&sc->mutex);
        return 0;
 }
@@ -1294,6 +1330,8 @@ static void ath9k_remove_interface(struct ieee80211_hw *hw,
 
        ath9k_calculate_summary_state(sc, avp->chanctx);
 
+       ath9k_set_txpower(sc, NULL);
+
        mutex_unlock(&sc->mutex);
 }
 
@@ -1397,14 +1435,6 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
                ath_chanctx_set_channel(sc, ctx, &hw->conf.chandef);
        }
 
-       if (changed & IEEE80211_CONF_CHANGE_POWER) {
-               ath_dbg(common, CONFIG, "Set power: %d\n", conf->power_level);
-               sc->cur_chan->txpower = 2 * conf->power_level;
-               ath9k_cmn_update_txpow(ah, sc->cur_chan->cur_txpower,
-                                      sc->cur_chan->txpower,
-                                      &sc->cur_chan->cur_txpower);
-       }
-
        mutex_unlock(&sc->mutex);
        ath9k_ps_restore(sc);
 
@@ -1764,6 +1794,12 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
        if (changed & CHECK_ANI)
                ath_check_ani(sc);
 
+       if (changed & BSS_CHANGED_TXPOWER) {
+               ath_dbg(common, CONFIG, "vif %pM power %d dbm power_type %d\n",
+                       vif->addr, bss_conf->txpower, bss_conf->txpower_type);
+               ath9k_set_txpower(sc, vif);
+       }
+
        mutex_unlock(&sc->mutex);
        ath9k_ps_restore(sc);
 
index 3f7a11edb82a77dedcfdc5f38fa4c6c832e9579e..66596b95273fe6f98eef7756c8ca55297e35d023 100644 (file)
@@ -495,7 +495,7 @@ void ath_mci_intr(struct ath_softc *sc)
        ar9003_mci_get_interrupt(sc->sc_ah, &mci_int, &mci_int_rxmsg);
 
        if (ar9003_mci_state(ah, MCI_STATE_ENABLE) == 0) {
-               ar9003_mci_get_next_gpm_offset(ah, true, NULL);
+               ar9003_mci_state(ah, MCI_STATE_INIT_GPM_OFFSET);
                return;
        }
 
@@ -559,8 +559,7 @@ void ath_mci_intr(struct ath_softc *sc)
                                return;
 
                        pgpm = mci->gpm_buf.bf_addr;
-                       offset = ar9003_mci_get_next_gpm_offset(ah, false,
-                                                               &more_data);
+                       offset = ar9003_mci_get_next_gpm_offset(ah, &more_data);
 
                        if (offset == MCI_GPM_INVALID)
                                break;
index 9587ec655680a281c4ed0338196989f385219900..1234399a43dd78692a52507a78185729a219a329 100644 (file)
@@ -2044,279 +2044,4 @@ enum {
 #define AR_PHY_AGC_CONTROL_YCOK_MAX            0x000003c0
 #define AR_PHY_AGC_CONTROL_YCOK_MAX_S          6
 
-/* MCI Registers */
-
-#define AR_MCI_COMMAND0                                0x1800
-#define AR_MCI_COMMAND0_HEADER                 0xFF
-#define AR_MCI_COMMAND0_HEADER_S               0
-#define AR_MCI_COMMAND0_LEN                    0x1f00
-#define AR_MCI_COMMAND0_LEN_S                  8
-#define AR_MCI_COMMAND0_DISABLE_TIMESTAMP      0x2000
-#define AR_MCI_COMMAND0_DISABLE_TIMESTAMP_S    13
-
-#define AR_MCI_COMMAND1                                0x1804
-
-#define AR_MCI_COMMAND2                                0x1808
-#define AR_MCI_COMMAND2_RESET_TX               0x01
-#define AR_MCI_COMMAND2_RESET_TX_S             0
-#define AR_MCI_COMMAND2_RESET_RX               0x02
-#define AR_MCI_COMMAND2_RESET_RX_S             1
-#define AR_MCI_COMMAND2_RESET_RX_NUM_CYCLES     0x3FC
-#define AR_MCI_COMMAND2_RESET_RX_NUM_CYCLES_S   2
-#define AR_MCI_COMMAND2_RESET_REQ_WAKEUP        0x400
-#define AR_MCI_COMMAND2_RESET_REQ_WAKEUP_S      10
-
-#define AR_MCI_RX_CTRL                         0x180c
-
-#define AR_MCI_TX_CTRL                         0x1810
-/* 0 = no division, 1 = divide by 2, 2 = divide by 4, 3 = divide by 8 */
-#define AR_MCI_TX_CTRL_CLK_DIV                 0x03
-#define AR_MCI_TX_CTRL_CLK_DIV_S               0
-#define AR_MCI_TX_CTRL_DISABLE_LNA_UPDATE      0x04
-#define AR_MCI_TX_CTRL_DISABLE_LNA_UPDATE_S    2
-#define AR_MCI_TX_CTRL_GAIN_UPDATE_FREQ                0xFFFFF8
-#define AR_MCI_TX_CTRL_GAIN_UPDATE_FREQ_S      3
-#define AR_MCI_TX_CTRL_GAIN_UPDATE_NUM         0xF000000
-#define AR_MCI_TX_CTRL_GAIN_UPDATE_NUM_S       24
-
-#define AR_MCI_MSG_ATTRIBUTES_TABLE                    0x1814
-#define AR_MCI_MSG_ATTRIBUTES_TABLE_CHECKSUM           0xFFFF
-#define AR_MCI_MSG_ATTRIBUTES_TABLE_CHECKSUM_S         0
-#define AR_MCI_MSG_ATTRIBUTES_TABLE_INVALID_HDR                0xFFFF0000
-#define AR_MCI_MSG_ATTRIBUTES_TABLE_INVALID_HDR_S      16
-
-#define AR_MCI_SCHD_TABLE_0                            0x1818
-#define AR_MCI_SCHD_TABLE_1                            0x181c
-#define AR_MCI_GPM_0                                   0x1820
-#define AR_MCI_GPM_1                                   0x1824
-#define AR_MCI_GPM_WRITE_PTR                           0xFFFF0000
-#define AR_MCI_GPM_WRITE_PTR_S                         16
-#define AR_MCI_GPM_BUF_LEN                             0x0000FFFF
-#define AR_MCI_GPM_BUF_LEN_S                           0
-
-#define AR_MCI_INTERRUPT_RAW                           0x1828
-#define AR_MCI_INTERRUPT_EN                            0x182c
-#define AR_MCI_INTERRUPT_SW_MSG_DONE                   0x00000001
-#define AR_MCI_INTERRUPT_SW_MSG_DONE_S                 0
-#define AR_MCI_INTERRUPT_CPU_INT_MSG                   0x00000002
-#define AR_MCI_INTERRUPT_CPU_INT_MSG_S                 1
-#define AR_MCI_INTERRUPT_RX_CKSUM_FAIL                 0x00000004
-#define AR_MCI_INTERRUPT_RX_CKSUM_FAIL_S               2
-#define AR_MCI_INTERRUPT_RX_INVALID_HDR                        0x00000008
-#define AR_MCI_INTERRUPT_RX_INVALID_HDR_S              3
-#define AR_MCI_INTERRUPT_RX_HW_MSG_FAIL                        0x00000010
-#define AR_MCI_INTERRUPT_RX_HW_MSG_FAIL_S              4
-#define AR_MCI_INTERRUPT_RX_SW_MSG_FAIL                        0x00000020
-#define AR_MCI_INTERRUPT_RX_SW_MSG_FAIL_S              5
-#define AR_MCI_INTERRUPT_TX_HW_MSG_FAIL                        0x00000080
-#define AR_MCI_INTERRUPT_TX_HW_MSG_FAIL_S              7
-#define AR_MCI_INTERRUPT_TX_SW_MSG_FAIL                        0x00000100
-#define AR_MCI_INTERRUPT_TX_SW_MSG_FAIL_S              8
-#define AR_MCI_INTERRUPT_RX_MSG                                0x00000200
-#define AR_MCI_INTERRUPT_RX_MSG_S                      9
-#define AR_MCI_INTERRUPT_REMOTE_SLEEP_UPDATE           0x00000400
-#define AR_MCI_INTERRUPT_REMOTE_SLEEP_UPDATE_S         10
-#define AR_MCI_INTERRUPT_BT_PRI                                0x07fff800
-#define AR_MCI_INTERRUPT_BT_PRI_S                      11
-#define AR_MCI_INTERRUPT_BT_PRI_THRESH                 0x08000000
-#define AR_MCI_INTERRUPT_BT_PRI_THRESH_S               27
-#define AR_MCI_INTERRUPT_BT_FREQ                       0x10000000
-#define AR_MCI_INTERRUPT_BT_FREQ_S                     28
-#define AR_MCI_INTERRUPT_BT_STOMP                      0x20000000
-#define AR_MCI_INTERRUPT_BT_STOMP_S                    29
-#define AR_MCI_INTERRUPT_BB_AIC_IRQ                    0x40000000
-#define AR_MCI_INTERRUPT_BB_AIC_IRQ_S                  30
-#define AR_MCI_INTERRUPT_CONT_INFO_TIMEOUT             0x80000000
-#define AR_MCI_INTERRUPT_CONT_INFO_TIMEOUT_S           31
-
-#define AR_MCI_INTERRUPT_DEFAULT    (AR_MCI_INTERRUPT_SW_MSG_DONE        | \
-                                    AR_MCI_INTERRUPT_RX_INVALID_HDR      | \
-                                    AR_MCI_INTERRUPT_RX_HW_MSG_FAIL      | \
-                                    AR_MCI_INTERRUPT_RX_SW_MSG_FAIL      | \
-                                    AR_MCI_INTERRUPT_TX_HW_MSG_FAIL      | \
-                                    AR_MCI_INTERRUPT_TX_SW_MSG_FAIL      | \
-                                    AR_MCI_INTERRUPT_RX_MSG              | \
-                                    AR_MCI_INTERRUPT_REMOTE_SLEEP_UPDATE | \
-                                    AR_MCI_INTERRUPT_CONT_INFO_TIMEOUT)
-
-#define AR_MCI_INTERRUPT_MSG_FAIL_MASK (AR_MCI_INTERRUPT_RX_HW_MSG_FAIL | \
-                                       AR_MCI_INTERRUPT_RX_SW_MSG_FAIL | \
-                                       AR_MCI_INTERRUPT_TX_HW_MSG_FAIL | \
-                                       AR_MCI_INTERRUPT_TX_SW_MSG_FAIL)
-
-#define AR_MCI_REMOTE_CPU_INT                          0x1830
-#define AR_MCI_REMOTE_CPU_INT_EN                       0x1834
-#define AR_MCI_INTERRUPT_RX_MSG_RAW                    0x1838
-#define AR_MCI_INTERRUPT_RX_MSG_EN                     0x183c
-#define AR_MCI_INTERRUPT_RX_MSG_REMOTE_RESET           0x00000001
-#define AR_MCI_INTERRUPT_RX_MSG_REMOTE_RESET_S         0
-#define AR_MCI_INTERRUPT_RX_MSG_LNA_CONTROL            0x00000002
-#define AR_MCI_INTERRUPT_RX_MSG_LNA_CONTROL_S          1
-#define AR_MCI_INTERRUPT_RX_MSG_CONT_NACK              0x00000004
-#define AR_MCI_INTERRUPT_RX_MSG_CONT_NACK_S            2
-#define AR_MCI_INTERRUPT_RX_MSG_CONT_INFO              0x00000008
-#define AR_MCI_INTERRUPT_RX_MSG_CONT_INFO_S            3
-#define AR_MCI_INTERRUPT_RX_MSG_CONT_RST               0x00000010
-#define AR_MCI_INTERRUPT_RX_MSG_CONT_RST_S             4
-#define AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO              0x00000020
-#define AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO_S            5
-#define AR_MCI_INTERRUPT_RX_MSG_CPU_INT                        0x00000040
-#define AR_MCI_INTERRUPT_RX_MSG_CPU_INT_S              6
-#define AR_MCI_INTERRUPT_RX_MSG_GPM                    0x00000100
-#define AR_MCI_INTERRUPT_RX_MSG_GPM_S                  8
-#define AR_MCI_INTERRUPT_RX_MSG_LNA_INFO               0x00000200
-#define AR_MCI_INTERRUPT_RX_MSG_LNA_INFO_S             9
-#define AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING           0x00000400
-#define AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING_S         10
-#define AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING             0x00000800
-#define AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING_S           11
-#define AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE               0x00001000
-#define AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE_S             12
-#define AR_MCI_INTERRUPT_RX_HW_MSG_MASK         (AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO  | \
-                                         AR_MCI_INTERRUPT_RX_MSG_LNA_CONTROL| \
-                                         AR_MCI_INTERRUPT_RX_MSG_LNA_INFO   | \
-                                         AR_MCI_INTERRUPT_RX_MSG_CONT_NACK  | \
-                                         AR_MCI_INTERRUPT_RX_MSG_CONT_INFO  | \
-                                         AR_MCI_INTERRUPT_RX_MSG_CONT_RST)
-
-#define AR_MCI_INTERRUPT_RX_MSG_DEFAULT (AR_MCI_INTERRUPT_RX_MSG_GPM    | \
-                                        AR_MCI_INTERRUPT_RX_MSG_REMOTE_RESET| \
-                                        AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING  | \
-                                        AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING| \
-                                        AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE)
-
-#define AR_MCI_CPU_INT                                 0x1840
-
-#define AR_MCI_RX_STATUS                       0x1844
-#define AR_MCI_RX_LAST_SCHD_MSG_INDEX          0x00000F00
-#define AR_MCI_RX_LAST_SCHD_MSG_INDEX_S                8
-#define AR_MCI_RX_REMOTE_SLEEP                 0x00001000
-#define AR_MCI_RX_REMOTE_SLEEP_S               12
-#define AR_MCI_RX_MCI_CLK_REQ                  0x00002000
-#define AR_MCI_RX_MCI_CLK_REQ_S                        13
-
-#define AR_MCI_CONT_STATUS                     0x1848
-#define AR_MCI_CONT_RSSI_POWER                 0x000000FF
-#define AR_MCI_CONT_RSSI_POWER_S               0
-#define AR_MCI_CONT_PRIORITY                   0x0000FF00
-#define AR_MCI_CONT_PRIORITY_S                 8
-#define AR_MCI_CONT_TXRX                       0x00010000
-#define AR_MCI_CONT_TXRX_S                     16
-
-#define AR_MCI_BT_PRI0                         0x184c
-#define AR_MCI_BT_PRI1                         0x1850
-#define AR_MCI_BT_PRI2                         0x1854
-#define AR_MCI_BT_PRI3                         0x1858
-#define AR_MCI_BT_PRI                          0x185c
-#define AR_MCI_WL_FREQ0                                0x1860
-#define AR_MCI_WL_FREQ1                                0x1864
-#define AR_MCI_WL_FREQ2                                0x1868
-#define AR_MCI_GAIN                            0x186c
-#define AR_MCI_WBTIMER1                                0x1870
-#define AR_MCI_WBTIMER2                                0x1874
-#define AR_MCI_WBTIMER3                                0x1878
-#define AR_MCI_WBTIMER4                                0x187c
-#define AR_MCI_MAXGAIN                         0x1880
-#define AR_MCI_HW_SCHD_TBL_CTL                 0x1884
-#define AR_MCI_HW_SCHD_TBL_D0                  0x1888
-#define AR_MCI_HW_SCHD_TBL_D1                  0x188c
-#define AR_MCI_HW_SCHD_TBL_D2                  0x1890
-#define AR_MCI_HW_SCHD_TBL_D3                  0x1894
-#define AR_MCI_TX_PAYLOAD0                     0x1898
-#define AR_MCI_TX_PAYLOAD1                     0x189c
-#define AR_MCI_TX_PAYLOAD2                     0x18a0
-#define AR_MCI_TX_PAYLOAD3                     0x18a4
-#define AR_BTCOEX_WBTIMER                      0x18a8
-
-#define AR_BTCOEX_CTRL                                 0x18ac
-#define AR_BTCOEX_CTRL_AR9462_MODE                     0x00000001
-#define AR_BTCOEX_CTRL_AR9462_MODE_S                   0
-#define AR_BTCOEX_CTRL_WBTIMER_EN                      0x00000002
-#define AR_BTCOEX_CTRL_WBTIMER_EN_S                    1
-#define AR_BTCOEX_CTRL_MCI_MODE_EN                     0x00000004
-#define AR_BTCOEX_CTRL_MCI_MODE_EN_S                   2
-#define AR_BTCOEX_CTRL_LNA_SHARED                      0x00000008
-#define AR_BTCOEX_CTRL_LNA_SHARED_S                    3
-#define AR_BTCOEX_CTRL_PA_SHARED                       0x00000010
-#define AR_BTCOEX_CTRL_PA_SHARED_S                     4
-#define AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN          0x00000020
-#define AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN_S                5
-#define AR_BTCOEX_CTRL_TIME_TO_NEXT_BT_THRESH_EN       0x00000040
-#define AR_BTCOEX_CTRL_TIME_TO_NEXT_BT_THRESH_EN_S     6
-#define AR_BTCOEX_CTRL_NUM_ANTENNAS                    0x00000180
-#define AR_BTCOEX_CTRL_NUM_ANTENNAS_S                  7
-#define AR_BTCOEX_CTRL_RX_CHAIN_MASK                   0x00000E00
-#define AR_BTCOEX_CTRL_RX_CHAIN_MASK_S                 9
-#define AR_BTCOEX_CTRL_AGGR_THRESH                     0x00007000
-#define AR_BTCOEX_CTRL_AGGR_THRESH_S                   12
-#define AR_BTCOEX_CTRL_1_CHAIN_BCN                     0x00080000
-#define AR_BTCOEX_CTRL_1_CHAIN_BCN_S                   19
-#define AR_BTCOEX_CTRL_1_CHAIN_ACK                     0x00100000
-#define AR_BTCOEX_CTRL_1_CHAIN_ACK_S                   20
-#define AR_BTCOEX_CTRL_WAIT_BA_MARGIN                  0x1FE00000
-#define AR_BTCOEX_CTRL_WAIT_BA_MARGIN_S                        28
-#define AR_BTCOEX_CTRL_REDUCE_TXPWR                    0x20000000
-#define AR_BTCOEX_CTRL_REDUCE_TXPWR_S                  29
-#define AR_BTCOEX_CTRL_SPDT_ENABLE_10                  0x40000000
-#define AR_BTCOEX_CTRL_SPDT_ENABLE_10_S                        30
-#define AR_BTCOEX_CTRL_SPDT_POLARITY                   0x80000000
-#define AR_BTCOEX_CTRL_SPDT_POLARITY_S                 31
-
-#define AR_BTCOEX_MAX_TXPWR(_x)                                (0x18c0 + ((_x) << 2))
-#define AR_BTCOEX_WL_LNA                               0x1940
-#define AR_BTCOEX_RFGAIN_CTRL                          0x1944
-#define AR_BTCOEX_WL_LNA_TIMEOUT                       0x003FFFFF
-#define AR_BTCOEX_WL_LNA_TIMEOUT_S                     0
-
-#define AR_BTCOEX_CTRL2                                        0x1948
-#define AR_BTCOEX_CTRL2_TXPWR_THRESH                   0x0007F800
-#define AR_BTCOEX_CTRL2_TXPWR_THRESH_S                 11
-#define AR_BTCOEX_CTRL2_TX_CHAIN_MASK                  0x00380000
-#define AR_BTCOEX_CTRL2_TX_CHAIN_MASK_S                        19
-#define AR_BTCOEX_CTRL2_RX_DEWEIGHT                    0x00400000
-#define AR_BTCOEX_CTRL2_RX_DEWEIGHT_S                  22
-#define AR_BTCOEX_CTRL2_GPIO_OBS_SEL                   0x00800000
-#define AR_BTCOEX_CTRL2_GPIO_OBS_SEL_S                 23
-#define AR_BTCOEX_CTRL2_MAC_BB_OBS_SEL                 0x01000000
-#define AR_BTCOEX_CTRL2_MAC_BB_OBS_SEL_S               24
-#define AR_BTCOEX_CTRL2_DESC_BASED_TXPWR_ENABLE                0x02000000
-#define AR_BTCOEX_CTRL2_DESC_BASED_TXPWR_ENABLE_S      25
-
-#define AR_BTCOEX_CTRL_SPDT_ENABLE          0x00000001
-#define AR_BTCOEX_CTRL_SPDT_ENABLE_S        0
-#define AR_BTCOEX_CTRL_BT_OWN_SPDT_CTRL     0x00000002
-#define AR_BTCOEX_CTRL_BT_OWN_SPDT_CTRL_S   1
-#define AR_BTCOEX_CTRL_USE_LATCHED_BT_ANT   0x00000004
-#define AR_BTCOEX_CTRL_USE_LATCHED_BT_ANT_S 2
-#define AR_GLB_WLAN_UART_INTF_EN            0x00020000
-#define AR_GLB_WLAN_UART_INTF_EN_S          17
-#define AR_GLB_DS_JTAG_DISABLE              0x00040000
-#define AR_GLB_DS_JTAG_DISABLE_S            18
-
-#define AR_BTCOEX_RC                    0x194c
-#define AR_BTCOEX_MAX_RFGAIN(_x)        (0x1950 + ((_x) << 2))
-#define AR_BTCOEX_DBG                   0x1a50
-#define AR_MCI_LAST_HW_MSG_HDR          0x1a54
-#define AR_MCI_LAST_HW_MSG_BDY          0x1a58
-
-#define AR_MCI_SCHD_TABLE_2             0x1a5c
-#define AR_MCI_SCHD_TABLE_2_MEM_BASED   0x00000001
-#define AR_MCI_SCHD_TABLE_2_MEM_BASED_S 0
-#define AR_MCI_SCHD_TABLE_2_HW_BASED    0x00000002
-#define AR_MCI_SCHD_TABLE_2_HW_BASED_S  1
-
-#define AR_BTCOEX_CTRL3               0x1a60
-#define AR_BTCOEX_CTRL3_CONT_INFO_TIMEOUT      0x00000fff
-#define AR_BTCOEX_CTRL3_CONT_INFO_TIMEOUT_S    0
-
-#define AR_GLB_SWREG_DISCONT_MODE         0x2002c
-#define AR_GLB_SWREG_DISCONT_EN_BT_WLAN   0x3
-
-#define AR_MCI_MISC                    0x1a74
-#define AR_MCI_MISC_HW_FIX_EN          0x00000001
-#define AR_MCI_MISC_HW_FIX_EN_S        0
-#define AR_MCI_DBG_CNT_CTRL            0x1a78
-#define AR_MCI_DBG_CNT_CTRL_ENABLE     0x00000001
-#define AR_MCI_DBG_CNT_CTRL_ENABLE_S   0
-
 #endif
diff --git a/drivers/net/wireless/ath/ath9k/reg_aic.h b/drivers/net/wireless/ath/ath9k/reg_aic.h
new file mode 100644 (file)
index 0000000..955147a
--- /dev/null
@@ -0,0 +1,168 @@
+/*
+ * Copyright (c) 2015 Qualcomm Atheros Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef REG_AIC_H
+#define REG_AIC_H
+
+#define AR_SM_BASE                              0xa200
+#define AR_SM1_BASE                             0xb200
+#define AR_AGC_BASE                             0x9e00
+
+#define AR_PHY_AIC_CTRL_0_B0                    (AR_SM_BASE + 0x4b0)
+#define AR_PHY_AIC_CTRL_1_B0                    (AR_SM_BASE + 0x4b4)
+#define AR_PHY_AIC_CTRL_2_B0                    (AR_SM_BASE + 0x4b8)
+#define AR_PHY_AIC_CTRL_3_B0                    (AR_SM_BASE + 0x4bc)
+#define AR_PHY_AIC_CTRL_4_B0                    (AR_SM_BASE + 0x4c0)
+
+#define AR_PHY_AIC_STAT_0_B0                    (AR_SM_BASE + 0x4c4)
+#define AR_PHY_AIC_STAT_1_B0                    (AR_SM_BASE + 0x4c8)
+#define AR_PHY_AIC_STAT_2_B0                    (AR_SM_BASE + 0x4cc)
+
+#define AR_PHY_AIC_CTRL_0_B1                    (AR_SM1_BASE + 0x4b0)
+#define AR_PHY_AIC_CTRL_1_B1                    (AR_SM1_BASE + 0x4b4)
+#define AR_PHY_AIC_CTRL_4_B1                    (AR_SM1_BASE + 0x4c0)
+
+#define AR_PHY_AIC_STAT_0_B1                    (AR_SM1_BASE + 0x4c4)
+#define AR_PHY_AIC_STAT_1_B1                    (AR_SM1_BASE + 0x4c8)
+#define AR_PHY_AIC_STAT_2_B1                    (AR_SM1_BASE + 0x4cc)
+
+#define AR_PHY_AIC_SRAM_ADDR_B0                 (AR_SM_BASE + 0x5f0)
+#define AR_PHY_AIC_SRAM_DATA_B0                 (AR_SM_BASE + 0x5f4)
+
+#define AR_PHY_AIC_SRAM_ADDR_B1                 (AR_SM1_BASE + 0x5f0)
+#define AR_PHY_AIC_SRAM_DATA_B1                 (AR_SM1_BASE + 0x5f4)
+
+#define AR_PHY_BT_COEX_4                        (AR_AGC_BASE + 0x60)
+#define AR_PHY_BT_COEX_5                        (AR_AGC_BASE + 0x64)
+
+/* AIC fields */
+#define AR_PHY_AIC_MON_ENABLE                   0x80000000
+#define AR_PHY_AIC_MON_ENABLE_S                 31
+#define AR_PHY_AIC_CAL_MAX_HOP_COUNT            0x7F000000
+#define AR_PHY_AIC_CAL_MAX_HOP_COUNT_S          24
+#define AR_PHY_AIC_CAL_MIN_VALID_COUNT          0x00FE0000
+#define AR_PHY_AIC_CAL_MIN_VALID_COUNT_S        17
+#define AR_PHY_AIC_F_WLAN                       0x0001FC00
+#define AR_PHY_AIC_F_WLAN_S                     10
+#define AR_PHY_AIC_CAL_CH_VALID_RESET           0x00000200
+#define AR_PHY_AIC_CAL_CH_VALID_RESET_S         9
+#define AR_PHY_AIC_CAL_ENABLE                   0x00000100
+#define AR_PHY_AIC_CAL_ENABLE_S                 8
+#define AR_PHY_AIC_BTTX_PWR_THR                 0x000000FE
+#define AR_PHY_AIC_BTTX_PWR_THR_S               1
+#define AR_PHY_AIC_ENABLE                       0x00000001
+#define AR_PHY_AIC_ENABLE_S                     0
+#define AR_PHY_AIC_CAL_BT_REF_DELAY             0x00F00000
+#define AR_PHY_AIC_CAL_BT_REF_DELAY_S           20
+#define AR_PHY_AIC_BT_IDLE_CFG                  0x00080000
+#define AR_PHY_AIC_BT_IDLE_CFG_S                19
+#define AR_PHY_AIC_STDBY_COND                   0x00060000
+#define AR_PHY_AIC_STDBY_COND_S                 17
+#define AR_PHY_AIC_STDBY_ROT_ATT_DB             0x0001F800
+#define AR_PHY_AIC_STDBY_ROT_ATT_DB_S           11
+#define AR_PHY_AIC_STDBY_COM_ATT_DB             0x00000700
+#define AR_PHY_AIC_STDBY_COM_ATT_DB_S           8
+#define AR_PHY_AIC_RSSI_MAX                     0x000000F0
+#define AR_PHY_AIC_RSSI_MAX_S                   4
+#define AR_PHY_AIC_RSSI_MIN                     0x0000000F
+#define AR_PHY_AIC_RSSI_MIN_S                   0
+#define AR_PHY_AIC_RADIO_DELAY                  0x7F000000
+#define AR_PHY_AIC_RADIO_DELAY_S                24
+#define AR_PHY_AIC_CAL_STEP_SIZE_CORR           0x00F00000
+#define AR_PHY_AIC_CAL_STEP_SIZE_CORR_S         20
+#define AR_PHY_AIC_CAL_ROT_IDX_CORR             0x000F8000
+#define AR_PHY_AIC_CAL_ROT_IDX_CORR_S           15
+#define AR_PHY_AIC_CAL_CONV_CHECK_FACTOR        0x00006000
+#define AR_PHY_AIC_CAL_CONV_CHECK_FACTOR_S      13
+#define AR_PHY_AIC_ROT_IDX_COUNT_MAX            0x00001C00
+#define AR_PHY_AIC_ROT_IDX_COUNT_MAX_S          10
+#define AR_PHY_AIC_CAL_SYNTH_TOGGLE             0x00000200
+#define AR_PHY_AIC_CAL_SYNTH_TOGGLE_S           9
+#define AR_PHY_AIC_CAL_SYNTH_AFTER_BTRX         0x00000100
+#define AR_PHY_AIC_CAL_SYNTH_AFTER_BTRX_S       8
+#define AR_PHY_AIC_CAL_SYNTH_SETTLING           0x000000FF
+#define AR_PHY_AIC_CAL_SYNTH_SETTLING_S         0
+#define AR_PHY_AIC_MON_MAX_HOP_COUNT            0x07F00000
+#define AR_PHY_AIC_MON_MAX_HOP_COUNT_S          20
+#define AR_PHY_AIC_MON_MIN_STALE_COUNT          0x000FE000
+#define AR_PHY_AIC_MON_MIN_STALE_COUNT_S        13
+#define AR_PHY_AIC_MON_PWR_EST_LONG             0x00001000
+#define AR_PHY_AIC_MON_PWR_EST_LONG_S           12
+#define AR_PHY_AIC_MON_PD_TALLY_SCALING         0x00000C00
+#define AR_PHY_AIC_MON_PD_TALLY_SCALING_S       10
+#define AR_PHY_AIC_MON_PERF_THR                 0x000003E0
+#define AR_PHY_AIC_MON_PERF_THR_S               5
+#define AR_PHY_AIC_CAL_TARGET_MAG_SETTING       0x00000018
+#define AR_PHY_AIC_CAL_TARGET_MAG_SETTING_S     3
+#define AR_PHY_AIC_CAL_PERF_CHECK_FACTOR        0x00000006
+#define AR_PHY_AIC_CAL_PERF_CHECK_FACTOR_S      1
+#define AR_PHY_AIC_CAL_PWR_EST_LONG             0x00000001
+#define AR_PHY_AIC_CAL_PWR_EST_LONG_S           0
+#define AR_PHY_AIC_MON_DONE                     0x80000000
+#define AR_PHY_AIC_MON_DONE_S                   31
+#define AR_PHY_AIC_MON_ACTIVE                   0x40000000
+#define AR_PHY_AIC_MON_ACTIVE_S                 30
+#define AR_PHY_AIC_MEAS_COUNT                   0x3F000000
+#define AR_PHY_AIC_MEAS_COUNT_S                 24
+#define AR_PHY_AIC_CAL_ANT_ISO_EST              0x00FC0000
+#define AR_PHY_AIC_CAL_ANT_ISO_EST_S            18
+#define AR_PHY_AIC_CAL_HOP_COUNT                0x0003F800
+#define AR_PHY_AIC_CAL_HOP_COUNT_S              11
+#define AR_PHY_AIC_CAL_VALID_COUNT              0x000007F0
+#define AR_PHY_AIC_CAL_VALID_COUNT_S            4
+#define AR_PHY_AIC_CAL_BT_TOO_WEAK_ERR          0x00000008
+#define AR_PHY_AIC_CAL_BT_TOO_WEAK_ERR_S        3
+#define AR_PHY_AIC_CAL_BT_TOO_STRONG_ERR        0x00000004
+#define AR_PHY_AIC_CAL_BT_TOO_STRONG_ERR_S      2
+#define AR_PHY_AIC_CAL_DONE                     0x00000002
+#define AR_PHY_AIC_CAL_DONE_S                   1
+#define AR_PHY_AIC_CAL_ACTIVE                   0x00000001
+#define AR_PHY_AIC_CAL_ACTIVE_S                 0
+
+#define AR_PHY_AIC_MEAS_MAG_MIN                 0xFFC00000
+#define AR_PHY_AIC_MEAS_MAG_MIN_S               22
+#define AR_PHY_AIC_MON_STALE_COUNT              0x003F8000
+#define AR_PHY_AIC_MON_STALE_COUNT_S            15
+#define AR_PHY_AIC_MON_HOP_COUNT                0x00007F00
+#define AR_PHY_AIC_MON_HOP_COUNT_S              8
+#define AR_PHY_AIC_CAL_AIC_SM                   0x000000F8
+#define AR_PHY_AIC_CAL_AIC_SM_S                 3
+#define AR_PHY_AIC_SM                           0x00000007
+#define AR_PHY_AIC_SM_S                         0
+#define AR_PHY_AIC_SRAM_VALID                   0x00000001
+#define AR_PHY_AIC_SRAM_VALID_S                 0
+#define AR_PHY_AIC_SRAM_ROT_QUAD_ATT_DB         0x0000007E
+#define AR_PHY_AIC_SRAM_ROT_QUAD_ATT_DB_S       1
+#define AR_PHY_AIC_SRAM_VGA_QUAD_SIGN           0x00000080
+#define AR_PHY_AIC_SRAM_VGA_QUAD_SIGN_S         7
+#define AR_PHY_AIC_SRAM_ROT_DIR_ATT_DB          0x00003F00
+#define AR_PHY_AIC_SRAM_ROT_DIR_ATT_DB_S        8
+#define AR_PHY_AIC_SRAM_VGA_DIR_SIGN            0x00004000
+#define AR_PHY_AIC_SRAM_VGA_DIR_SIGN_S          14
+#define AR_PHY_AIC_SRAM_COM_ATT_6DB             0x00038000
+#define AR_PHY_AIC_SRAM_COM_ATT_6DB_S           15
+#define AR_PHY_AIC_CAL_ROT_ATT_DB_EST_ISO       0x0000E000
+#define AR_PHY_AIC_CAL_ROT_ATT_DB_EST_ISO_S     13
+#define AR_PHY_AIC_CAL_COM_ATT_DB_EST_ISO       0x00001E00
+#define AR_PHY_AIC_CAL_COM_ATT_DB_EST_ISO_S     9
+#define AR_PHY_AIC_CAL_ISO_EST_INIT_SETTING     0x000001F8
+#define AR_PHY_AIC_CAL_ISO_EST_INIT_SETTING_S   3
+#define AR_PHY_AIC_CAL_COM_ATT_DB_BACKOFF       0x00000006
+#define AR_PHY_AIC_CAL_COM_ATT_DB_BACKOFF_S     1
+#define AR_PHY_AIC_CAL_COM_ATT_DB_FIXED         0x00000001
+#define AR_PHY_AIC_CAL_COM_ATT_DB_FIXED_S       0
+
+#endif /* REG_AIC_H */
diff --git a/drivers/net/wireless/ath/ath9k/reg_mci.h b/drivers/net/wireless/ath/ath9k/reg_mci.h
new file mode 100644 (file)
index 0000000..6251310
--- /dev/null
@@ -0,0 +1,310 @@
+/*
+ * Copyright (c) 2015 Qualcomm Atheros Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef REG_MCI_H
+#define REG_MCI_H
+
+#define AR_MCI_COMMAND0                                 0x1800
+#define AR_MCI_COMMAND0_HEADER                          0xFF
+#define AR_MCI_COMMAND0_HEADER_S                        0
+#define AR_MCI_COMMAND0_LEN                             0x1f00
+#define AR_MCI_COMMAND0_LEN_S                           8
+#define AR_MCI_COMMAND0_DISABLE_TIMESTAMP               0x2000
+#define AR_MCI_COMMAND0_DISABLE_TIMESTAMP_S             13
+
+#define AR_MCI_COMMAND1                                 0x1804
+
+#define AR_MCI_COMMAND2                                 0x1808
+#define AR_MCI_COMMAND2_RESET_TX                        0x01
+#define AR_MCI_COMMAND2_RESET_TX_S                      0
+#define AR_MCI_COMMAND2_RESET_RX                        0x02
+#define AR_MCI_COMMAND2_RESET_RX_S                      1
+#define AR_MCI_COMMAND2_RESET_RX_NUM_CYCLES             0x3FC
+#define AR_MCI_COMMAND2_RESET_RX_NUM_CYCLES_S           2
+#define AR_MCI_COMMAND2_RESET_REQ_WAKEUP                0x400
+#define AR_MCI_COMMAND2_RESET_REQ_WAKEUP_S              10
+
+#define AR_MCI_RX_CTRL                                  0x180c
+
+#define AR_MCI_TX_CTRL                                  0x1810
+/*
+ * 0 = no division,
+ * 1 = divide by 2,
+ * 2 = divide by 4,
+ * 3 = divide by 8
+ */
+#define AR_MCI_TX_CTRL_CLK_DIV                          0x03
+#define AR_MCI_TX_CTRL_CLK_DIV_S                        0
+#define AR_MCI_TX_CTRL_DISABLE_LNA_UPDATE               0x04
+#define AR_MCI_TX_CTRL_DISABLE_LNA_UPDATE_S             2
+#define AR_MCI_TX_CTRL_GAIN_UPDATE_FREQ                 0xFFFFF8
+#define AR_MCI_TX_CTRL_GAIN_UPDATE_FREQ_S               3
+#define AR_MCI_TX_CTRL_GAIN_UPDATE_NUM                  0xF000000
+#define AR_MCI_TX_CTRL_GAIN_UPDATE_NUM_S                24
+
+#define AR_MCI_MSG_ATTRIBUTES_TABLE                     0x1814
+#define AR_MCI_MSG_ATTRIBUTES_TABLE_CHECKSUM            0xFFFF
+#define AR_MCI_MSG_ATTRIBUTES_TABLE_CHECKSUM_S          0
+#define AR_MCI_MSG_ATTRIBUTES_TABLE_INVALID_HDR         0xFFFF0000
+#define AR_MCI_MSG_ATTRIBUTES_TABLE_INVALID_HDR_S       16
+
+#define AR_MCI_SCHD_TABLE_0                             0x1818
+#define AR_MCI_SCHD_TABLE_1                             0x181c
+#define AR_MCI_GPM_0                                    0x1820
+#define AR_MCI_GPM_1                                    0x1824
+#define AR_MCI_GPM_WRITE_PTR                            0xFFFF0000
+#define AR_MCI_GPM_WRITE_PTR_S                          16
+#define AR_MCI_GPM_BUF_LEN                              0x0000FFFF
+#define AR_MCI_GPM_BUF_LEN_S                            0
+
+#define AR_MCI_INTERRUPT_RAW                            0x1828
+
+#define AR_MCI_INTERRUPT_EN                             0x182c
+#define AR_MCI_INTERRUPT_SW_MSG_DONE                    0x00000001
+#define AR_MCI_INTERRUPT_SW_MSG_DONE_S                  0
+#define AR_MCI_INTERRUPT_CPU_INT_MSG                    0x00000002
+#define AR_MCI_INTERRUPT_CPU_INT_MSG_S                  1
+#define AR_MCI_INTERRUPT_RX_CKSUM_FAIL                  0x00000004
+#define AR_MCI_INTERRUPT_RX_CKSUM_FAIL_S                2
+#define AR_MCI_INTERRUPT_RX_INVALID_HDR                 0x00000008
+#define AR_MCI_INTERRUPT_RX_INVALID_HDR_S               3
+#define AR_MCI_INTERRUPT_RX_HW_MSG_FAIL                 0x00000010
+#define AR_MCI_INTERRUPT_RX_HW_MSG_FAIL_S               4
+#define AR_MCI_INTERRUPT_RX_SW_MSG_FAIL                 0x00000020
+#define AR_MCI_INTERRUPT_RX_SW_MSG_FAIL_S               5
+#define AR_MCI_INTERRUPT_TX_HW_MSG_FAIL                 0x00000080
+#define AR_MCI_INTERRUPT_TX_HW_MSG_FAIL_S               7
+#define AR_MCI_INTERRUPT_TX_SW_MSG_FAIL                 0x00000100
+#define AR_MCI_INTERRUPT_TX_SW_MSG_FAIL_S               8
+#define AR_MCI_INTERRUPT_RX_MSG                         0x00000200
+#define AR_MCI_INTERRUPT_RX_MSG_S                       9
+#define AR_MCI_INTERRUPT_REMOTE_SLEEP_UPDATE            0x00000400
+#define AR_MCI_INTERRUPT_REMOTE_SLEEP_UPDATE_S          10
+#define AR_MCI_INTERRUPT_BT_PRI                         0x07fff800
+#define AR_MCI_INTERRUPT_BT_PRI_S                       11
+#define AR_MCI_INTERRUPT_BT_PRI_THRESH                  0x08000000
+#define AR_MCI_INTERRUPT_BT_PRI_THRESH_S                27
+#define AR_MCI_INTERRUPT_BT_FREQ                        0x10000000
+#define AR_MCI_INTERRUPT_BT_FREQ_S                      28
+#define AR_MCI_INTERRUPT_BT_STOMP                       0x20000000
+#define AR_MCI_INTERRUPT_BT_STOMP_S                     29
+#define AR_MCI_INTERRUPT_BB_AIC_IRQ                     0x40000000
+#define AR_MCI_INTERRUPT_BB_AIC_IRQ_S                   30
+#define AR_MCI_INTERRUPT_CONT_INFO_TIMEOUT              0x80000000
+#define AR_MCI_INTERRUPT_CONT_INFO_TIMEOUT_S            31
+
+#define AR_MCI_REMOTE_CPU_INT                           0x1830
+#define AR_MCI_REMOTE_CPU_INT_EN                        0x1834
+#define AR_MCI_INTERRUPT_RX_MSG_RAW                     0x1838
+#define AR_MCI_INTERRUPT_RX_MSG_EN                      0x183c
+#define AR_MCI_INTERRUPT_RX_MSG_REMOTE_RESET            0x00000001
+#define AR_MCI_INTERRUPT_RX_MSG_REMOTE_RESET_S          0
+#define AR_MCI_INTERRUPT_RX_MSG_LNA_CONTROL             0x00000002
+#define AR_MCI_INTERRUPT_RX_MSG_LNA_CONTROL_S           1
+#define AR_MCI_INTERRUPT_RX_MSG_CONT_NACK               0x00000004
+#define AR_MCI_INTERRUPT_RX_MSG_CONT_NACK_S             2
+#define AR_MCI_INTERRUPT_RX_MSG_CONT_INFO               0x00000008
+#define AR_MCI_INTERRUPT_RX_MSG_CONT_INFO_S             3
+#define AR_MCI_INTERRUPT_RX_MSG_CONT_RST                0x00000010
+#define AR_MCI_INTERRUPT_RX_MSG_CONT_RST_S              4
+#define AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO               0x00000020
+#define AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO_S             5
+#define AR_MCI_INTERRUPT_RX_MSG_CPU_INT                 0x00000040
+#define AR_MCI_INTERRUPT_RX_MSG_CPU_INT_S               6
+#define AR_MCI_INTERRUPT_RX_MSG_GPM                     0x00000100
+#define AR_MCI_INTERRUPT_RX_MSG_GPM_S                   8
+#define AR_MCI_INTERRUPT_RX_MSG_LNA_INFO                0x00000200
+#define AR_MCI_INTERRUPT_RX_MSG_LNA_INFO_S              9
+#define AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING            0x00000400
+#define AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING_S          10
+#define AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING              0x00000800
+#define AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING_S            11
+#define AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE                0x00001000
+#define AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE_S              12
+
+#define AR_MCI_CPU_INT                                  0x1840
+
+#define AR_MCI_RX_STATUS                                0x1844
+#define AR_MCI_RX_LAST_SCHD_MSG_INDEX                   0x00000F00
+#define AR_MCI_RX_LAST_SCHD_MSG_INDEX_S                 8
+#define AR_MCI_RX_REMOTE_SLEEP                          0x00001000
+#define AR_MCI_RX_REMOTE_SLEEP_S                        12
+#define AR_MCI_RX_MCI_CLK_REQ                           0x00002000
+#define AR_MCI_RX_MCI_CLK_REQ_S                         13
+
+#define AR_MCI_CONT_STATUS                              0x1848
+#define AR_MCI_CONT_RSSI_POWER                          0x000000FF
+#define AR_MCI_CONT_RSSI_POWER_S                        0
+#define AR_MCI_CONT_PRIORITY                            0x0000FF00
+#define AR_MCI_CONT_PRIORITY_S                          8
+#define AR_MCI_CONT_TXRX                                0x00010000
+#define AR_MCI_CONT_TXRX_S                              16
+
+#define AR_MCI_BT_PRI0                                  0x184c
+#define AR_MCI_BT_PRI1                                  0x1850
+#define AR_MCI_BT_PRI2                                  0x1854
+#define AR_MCI_BT_PRI3                                  0x1858
+#define AR_MCI_BT_PRI                                   0x185c
+#define AR_MCI_WL_FREQ0                                 0x1860
+#define AR_MCI_WL_FREQ1                                 0x1864
+#define AR_MCI_WL_FREQ2                                 0x1868
+#define AR_MCI_GAIN                                     0x186c
+#define AR_MCI_WBTIMER1                                 0x1870
+#define AR_MCI_WBTIMER2                                 0x1874
+#define AR_MCI_WBTIMER3                                 0x1878
+#define AR_MCI_WBTIMER4                                 0x187c
+#define AR_MCI_MAXGAIN                                  0x1880
+#define AR_MCI_HW_SCHD_TBL_CTL                          0x1884
+#define AR_MCI_HW_SCHD_TBL_D0                           0x1888
+#define AR_MCI_HW_SCHD_TBL_D1                           0x188c
+#define AR_MCI_HW_SCHD_TBL_D2                           0x1890
+#define AR_MCI_HW_SCHD_TBL_D3                           0x1894
+#define AR_MCI_TX_PAYLOAD0                              0x1898
+#define AR_MCI_TX_PAYLOAD1                              0x189c
+#define AR_MCI_TX_PAYLOAD2                              0x18a0
+#define AR_MCI_TX_PAYLOAD3                              0x18a4
+#define AR_BTCOEX_WBTIMER                               0x18a8
+
+#define AR_BTCOEX_CTRL                                  0x18ac
+#define AR_BTCOEX_CTRL_AR9462_MODE                      0x00000001
+#define AR_BTCOEX_CTRL_AR9462_MODE_S                    0
+#define AR_BTCOEX_CTRL_WBTIMER_EN                       0x00000002
+#define AR_BTCOEX_CTRL_WBTIMER_EN_S                     1
+#define AR_BTCOEX_CTRL_MCI_MODE_EN                      0x00000004
+#define AR_BTCOEX_CTRL_MCI_MODE_EN_S                    2
+#define AR_BTCOEX_CTRL_LNA_SHARED                       0x00000008
+#define AR_BTCOEX_CTRL_LNA_SHARED_S                     3
+#define AR_BTCOEX_CTRL_PA_SHARED                        0x00000010
+#define AR_BTCOEX_CTRL_PA_SHARED_S                      4
+#define AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN           0x00000020
+#define AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN_S         5
+#define AR_BTCOEX_CTRL_TIME_TO_NEXT_BT_THRESH_EN        0x00000040
+#define AR_BTCOEX_CTRL_TIME_TO_NEXT_BT_THRESH_EN_S      6
+#define AR_BTCOEX_CTRL_NUM_ANTENNAS                     0x00000180
+#define AR_BTCOEX_CTRL_NUM_ANTENNAS_S                   7
+#define AR_BTCOEX_CTRL_RX_CHAIN_MASK                    0x00000E00
+#define AR_BTCOEX_CTRL_RX_CHAIN_MASK_S                  9
+#define AR_BTCOEX_CTRL_AGGR_THRESH                      0x00007000
+#define AR_BTCOEX_CTRL_AGGR_THRESH_S                    12
+#define AR_BTCOEX_CTRL_1_CHAIN_BCN                      0x00080000
+#define AR_BTCOEX_CTRL_1_CHAIN_BCN_S                    19
+#define AR_BTCOEX_CTRL_1_CHAIN_ACK                      0x00100000
+#define AR_BTCOEX_CTRL_1_CHAIN_ACK_S                    20
+#define AR_BTCOEX_CTRL_WAIT_BA_MARGIN                   0x1FE00000
+#define AR_BTCOEX_CTRL_WAIT_BA_MARGIN_S                 28
+#define AR_BTCOEX_CTRL_REDUCE_TXPWR                     0x20000000
+#define AR_BTCOEX_CTRL_REDUCE_TXPWR_S                   29
+#define AR_BTCOEX_CTRL_SPDT_ENABLE_10                   0x40000000
+#define AR_BTCOEX_CTRL_SPDT_ENABLE_10_S                 30
+#define AR_BTCOEX_CTRL_SPDT_POLARITY                    0x80000000
+#define AR_BTCOEX_CTRL_SPDT_POLARITY_S                  31
+
+#define AR_BTCOEX_WL_WEIGHTS0                           0x18b0
+#define AR_BTCOEX_WL_WEIGHTS1                           0x18b4
+#define AR_BTCOEX_WL_WEIGHTS2                           0x18b8
+#define AR_BTCOEX_WL_WEIGHTS3                           0x18bc
+
+#define AR_BTCOEX_MAX_TXPWR(_x)                         (0x18c0 + ((_x) << 2))
+#define AR_BTCOEX_WL_LNA                                0x1940
+#define AR_BTCOEX_RFGAIN_CTRL                           0x1944
+#define AR_BTCOEX_WL_LNA_TIMEOUT                        0x003FFFFF
+#define AR_BTCOEX_WL_LNA_TIMEOUT_S                      0
+
+#define AR_BTCOEX_CTRL2                                 0x1948
+#define AR_BTCOEX_CTRL2_TXPWR_THRESH                    0x0007F800
+#define AR_BTCOEX_CTRL2_TXPWR_THRESH_S                  11
+#define AR_BTCOEX_CTRL2_TX_CHAIN_MASK                   0x00380000
+#define AR_BTCOEX_CTRL2_TX_CHAIN_MASK_S                 19
+#define AR_BTCOEX_CTRL2_RX_DEWEIGHT                     0x00400000
+#define AR_BTCOEX_CTRL2_RX_DEWEIGHT_S                   22
+#define AR_BTCOEX_CTRL2_GPIO_OBS_SEL                    0x00800000
+#define AR_BTCOEX_CTRL2_GPIO_OBS_SEL_S                  23
+#define AR_BTCOEX_CTRL2_MAC_BB_OBS_SEL                  0x01000000
+#define AR_BTCOEX_CTRL2_MAC_BB_OBS_SEL_S                24
+#define AR_BTCOEX_CTRL2_DESC_BASED_TXPWR_ENABLE         0x02000000
+#define AR_BTCOEX_CTRL2_DESC_BASED_TXPWR_ENABLE_S       25
+
+#define AR_BTCOEX_CTRL_SPDT_ENABLE                      0x00000001
+#define AR_BTCOEX_CTRL_SPDT_ENABLE_S                    0
+#define AR_BTCOEX_CTRL_BT_OWN_SPDT_CTRL                 0x00000002
+#define AR_BTCOEX_CTRL_BT_OWN_SPDT_CTRL_S               1
+#define AR_BTCOEX_CTRL_USE_LATCHED_BT_ANT               0x00000004
+#define AR_BTCOEX_CTRL_USE_LATCHED_BT_ANT_S             2
+#define AR_GLB_WLAN_UART_INTF_EN                        0x00020000
+#define AR_GLB_WLAN_UART_INTF_EN_S                      17
+#define AR_GLB_DS_JTAG_DISABLE                          0x00040000
+#define AR_GLB_DS_JTAG_DISABLE_S                        18
+
+#define AR_BTCOEX_RC                                    0x194c
+#define AR_BTCOEX_MAX_RFGAIN(_x)                        (0x1950 + ((_x) << 2))
+#define AR_BTCOEX_DBG                                   0x1a50
+#define AR_MCI_LAST_HW_MSG_HDR                          0x1a54
+#define AR_MCI_LAST_HW_MSG_BDY                          0x1a58
+
+#define AR_MCI_SCHD_TABLE_2                             0x1a5c
+#define AR_MCI_SCHD_TABLE_2_MEM_BASED                   0x00000001
+#define AR_MCI_SCHD_TABLE_2_MEM_BASED_S                 0
+#define AR_MCI_SCHD_TABLE_2_HW_BASED                    0x00000002
+#define AR_MCI_SCHD_TABLE_2_HW_BASED_S                  1
+
+#define AR_BTCOEX_CTRL3                                 0x1a60
+#define AR_BTCOEX_CTRL3_CONT_INFO_TIMEOUT               0x00000fff
+#define AR_BTCOEX_CTRL3_CONT_INFO_TIMEOUT_S             0
+
+#define AR_GLB_SWREG_DISCONT_MODE                       0x2002c
+#define AR_GLB_SWREG_DISCONT_EN_BT_WLAN                 0x3
+
+#define AR_MCI_MISC                                     0x1a74
+#define AR_MCI_MISC_HW_FIX_EN                           0x00000001
+#define AR_MCI_MISC_HW_FIX_EN_S                         0
+
+#define AR_MCI_DBG_CNT_CTRL                             0x1a78
+#define AR_MCI_DBG_CNT_CTRL_ENABLE                      0x00000001
+#define AR_MCI_DBG_CNT_CTRL_ENABLE_S                    0
+#define AR_MCI_DBG_CNT_CTRL_BT_LINKID                   0x000007f8
+#define AR_MCI_DBG_CNT_CTRL_BT_LINKID_S                 3
+
+#define MCI_STAT_ALL_BT_LINKID                          0xffff
+
+#define AR_MCI_INTERRUPT_DEFAULT (AR_MCI_INTERRUPT_SW_MSG_DONE         | \
+                                 AR_MCI_INTERRUPT_RX_INVALID_HDR      | \
+                                 AR_MCI_INTERRUPT_RX_HW_MSG_FAIL      | \
+                                 AR_MCI_INTERRUPT_RX_SW_MSG_FAIL      | \
+                                 AR_MCI_INTERRUPT_TX_HW_MSG_FAIL      | \
+                                 AR_MCI_INTERRUPT_TX_SW_MSG_FAIL      | \
+                                 AR_MCI_INTERRUPT_RX_MSG              | \
+                                 AR_MCI_INTERRUPT_REMOTE_SLEEP_UPDATE | \
+                                 AR_MCI_INTERRUPT_CONT_INFO_TIMEOUT)
+
+#define AR_MCI_INTERRUPT_MSG_FAIL_MASK (AR_MCI_INTERRUPT_RX_HW_MSG_FAIL | \
+                                        AR_MCI_INTERRUPT_RX_SW_MSG_FAIL | \
+                                        AR_MCI_INTERRUPT_TX_HW_MSG_FAIL | \
+                                        AR_MCI_INTERRUPT_TX_SW_MSG_FAIL)
+
+#define AR_MCI_INTERRUPT_RX_HW_MSG_MASK (AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO   | \
+                                        AR_MCI_INTERRUPT_RX_MSG_LNA_CONTROL | \
+                                        AR_MCI_INTERRUPT_RX_MSG_LNA_INFO    | \
+                                        AR_MCI_INTERRUPT_RX_MSG_CONT_NACK   | \
+                                        AR_MCI_INTERRUPT_RX_MSG_CONT_INFO   | \
+                                        AR_MCI_INTERRUPT_RX_MSG_CONT_RST)
+
+#define AR_MCI_INTERRUPT_RX_MSG_DEFAULT (AR_MCI_INTERRUPT_RX_MSG_GPM           | \
+                                         AR_MCI_INTERRUPT_RX_MSG_REMOTE_RESET  | \
+                                         AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING    | \
+                                         AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING  | \
+                                         AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE)
+
+#endif /* REG_MCI_H */
index 3abfca56ca5846acd89c6664879081e9f6f248de..453054078cc4785c770ea29a16c92cc5881d38bc 100644 (file)
@@ -72,7 +72,7 @@
 #define AR_WOW_MAC_INTR_EN              0x00040000
 #define AR_WOW_MAGIC_EN                 0x00010000
 #define AR_WOW_PATTERN_EN(x)            (x & 0xff)
-#define AR_WOW_PAT_FOUND_SHIFT  8
+#define AR_WOW_PAT_FOUND_SHIFT          8
 #define AR_WOW_PATTERN_FOUND(x)         (x & (0xff << AR_WOW_PAT_FOUND_SHIFT))
 #define AR_WOW_PATTERN_FOUND_MASK       ((0xff) << AR_WOW_PAT_FOUND_SHIFT)
 #define AR_WOW_MAGIC_PAT_FOUND          0x00020000
                                                AR_WOW_BEACON_FAIL |    \
                                                AR_WOW_KEEP_ALIVE_FAIL))
 
+#define AR_WOW2_PATTERN_EN(x)           ((x & 0xff) << 0)
+#define AR_WOW2_PATTERN_FOUND_SHIFT     8
+#define AR_WOW2_PATTERN_FOUND(x)        (x & (0xff << AR_WOW2_PATTERN_FOUND_SHIFT))
+#define AR_WOW2_PATTERN_FOUND_MASK      ((0xff) << AR_WOW2_PATTERN_FOUND_SHIFT)
+
+#define AR_WOW_STATUS2(x)               (x & AR_WOW2_PATTERN_FOUND_MASK)
+#define AR_WOW_CLEAR_EVENTS2(x)         (x & ~(AR_WOW2_PATTERN_EN(0xff)))
+
 #define AR_WOW_AIFS_CNT(x)              (x & 0xff)
 #define AR_WOW_SLOT_CNT(x)              ((x & 0xff) << 8)
 #define AR_WOW_KEEP_ALIVE_CNT(x)        ((x & 0xff) << 16)
index 65c8894c5f81040361fbdd5da9aa7823e6d38900..67a2f8c888292162ad8329dfb5c5131e9f347d69 100644 (file)
@@ -61,6 +61,8 @@ static const char *wmi_cmd_to_name(enum wmi_cmd_id wmi_cmd)
                return "WMI_REG_READ_CMDID";
        case WMI_REG_WRITE_CMDID:
                return "WMI_REG_WRITE_CMDID";
+       case WMI_REG_RMW_CMDID:
+               return "WMI_REG_RMW_CMDID";
        case WMI_RC_STATE_CHANGE_CMDID:
                return "WMI_RC_STATE_CHANGE_CMDID";
        case WMI_RC_RATE_UPDATE_CMDID:
@@ -101,6 +103,7 @@ struct wmi *ath9k_init_wmi(struct ath9k_htc_priv *priv)
        spin_lock_init(&wmi->event_lock);
        mutex_init(&wmi->op_mutex);
        mutex_init(&wmi->multi_write_mutex);
+       mutex_init(&wmi->multi_rmw_mutex);
        init_completion(&wmi->cmd_wait);
        INIT_LIST_HEAD(&wmi->pending_tx_events);
        tasklet_init(&wmi->wmi_event_tasklet, ath9k_wmi_event_tasklet,
index 0db37f230018ee2692ef4fcbff7cf3f0b3135e8e..aa84a335289a59841f3623e9203fac52aa3d13cb 100644 (file)
@@ -112,6 +112,7 @@ enum wmi_cmd_id {
        WMI_TX_STATS_CMDID,
        WMI_RX_STATS_CMDID,
        WMI_BITRATE_MASK_CMDID,
+       WMI_REG_RMW_CMDID,
 };
 
 enum wmi_event_id {
@@ -125,12 +126,19 @@ enum wmi_event_id {
 };
 
 #define MAX_CMD_NUMBER 62
+#define MAX_RMW_CMD_NUMBER 15
 
 struct register_write {
        __be32 reg;
        __be32 val;
 };
 
+struct register_rmw {
+       __be32 reg;
+       __be32 set;
+       __be32 clr;
+} __packed;
+
 struct ath9k_htc_tx_event {
        int count;
        struct __wmi_event_txstatus txs;
@@ -156,10 +164,18 @@ struct wmi {
 
        spinlock_t wmi_lock;
 
+       /* multi write section */
        atomic_t mwrite_cnt;
        struct register_write multi_write[MAX_CMD_NUMBER];
        u32 multi_write_idx;
        struct mutex multi_write_mutex;
+
+       /* multi rmw section */
+       atomic_t m_rmw_cnt;
+       struct register_rmw multi_rmw[MAX_RMW_CMD_NUMBER];
+       u32 multi_rmw_idx;
+       struct mutex multi_rmw_mutex;
+
 };
 
 struct wmi *ath9k_init_wmi(struct ath9k_htc_priv *priv);
index 1b8e75c4d2c2d6659b8901dd915b0209bfb928f7..0acd079ba96bd3d2f60602ebf5f889f36da9f908 100644 (file)
@@ -1103,14 +1103,28 @@ static u8 ath_get_rate_txpower(struct ath_softc *sc, struct ath_buf *bf,
        struct sk_buff *skb;
        struct ath_frame_info *fi;
        struct ieee80211_tx_info *info;
+       struct ieee80211_vif *vif;
        struct ath_hw *ah = sc->sc_ah;
 
        if (sc->tx99_state || !ah->tpc_enabled)
                return MAX_RATE_POWER;
 
        skb = bf->bf_mpdu;
-       fi = get_frame_info(skb);
        info = IEEE80211_SKB_CB(skb);
+       vif = info->control.vif;
+
+       if (!vif) {
+               max_power = sc->cur_chan->cur_txpower;
+               goto out;
+       }
+
+       if (vif->bss_conf.txpower_type != NL80211_TX_POWER_LIMITED) {
+               max_power = min_t(u8, sc->cur_chan->cur_txpower,
+                                 2 * vif->bss_conf.txpower);
+               goto out;
+       }
+
+       fi = get_frame_info(skb);
 
        if (!AR_SREV_9300_20_OR_LATER(ah)) {
                int txpower = fi->tx_power;
@@ -1147,25 +1161,25 @@ static u8 ath_get_rate_txpower(struct ath_softc *sc, struct ath_buf *bf,
                        txpower -= 2;
 
                txpower = max(txpower, 0);
-               max_power = min_t(u8, ah->tx_power[rateidx], txpower);
-
-               /* XXX: clamp minimum TX power at 1 for AR9160 since if
-                * max_power is set to 0, frames are transmitted at max
-                * TX power
-                */
-               if (!max_power && !AR_SREV_9280_20_OR_LATER(ah))
-                       max_power = 1;
+               max_power = min_t(u8, ah->tx_power[rateidx],
+                                 2 * vif->bss_conf.txpower);
+               max_power = min_t(u8, max_power, txpower);
        } else if (!bf->bf_state.bfs_paprd) {
                if (rateidx < 8 && (info->flags & IEEE80211_TX_CTL_STBC))
-                       max_power = min(ah->tx_power_stbc[rateidx],
-                                       fi->tx_power);
+                       max_power = min_t(u8, ah->tx_power_stbc[rateidx],
+                                         2 * vif->bss_conf.txpower);
                else
-                       max_power = min(ah->tx_power[rateidx], fi->tx_power);
+                       max_power = min_t(u8, ah->tx_power[rateidx],
+                                         2 * vif->bss_conf.txpower);
+               max_power = min(max_power, fi->tx_power);
        } else {
                max_power = ah->paprd_training_power;
        }
-
-       return max_power;
+out:
+       /* XXX: clamp minimum TX power at 1 for AR9160 since if max_power
+        * is set to 0, frames are transmitted at max TX power
+        */
+       return (!max_power && !AR_SREV_9280_20_OR_LATER(ah)) ? 1 : max_power;
 }
 
 static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf,
index 3d57f877238921b979fea04c7f038f48bb0abcc9..c657ca26a71a7c8e2d75dd931ce22d87a372dd45 100644 (file)
@@ -289,7 +289,7 @@ dpd_add_pulse(struct dfs_pattern_detector *dpd, struct pulse_event *event)
                                "count=%d, count_false=%d\n",
                                event->freq, pd->rs->type_id,
                                ps->pri, ps->count, ps->count_falses);
-                       channel_detector_reset(dpd, cd);
+                       pd->reset(pd, dpd->last_pulse_ts);
                        return true;
                }
        }
index 2d5ea21be47e592af98599e214697d6b022718f2..b97172667bc7b3c5c3ea3463c9b6573811a6d064 100644 (file)
@@ -14,6 +14,7 @@
  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  */
 
+#include <linux/etherdevice.h>
 #include "wil6210.h"
 #include "wmi.h"
 
@@ -217,7 +218,7 @@ static int wil_cfg80211_dump_station(struct wiphy *wiphy,
        if (cid < 0)
                return -ENOENT;
 
-       memcpy(mac, wil->sta[cid].addr, ETH_ALEN);
+       ether_addr_copy(mac, wil->sta[cid].addr);
        wil_dbg_misc(wil, "%s(%pM) CID %d\n", __func__, mac, cid);
 
        rc = wil_cid_fill_sinfo(wil, cid, sinfo);
@@ -387,15 +388,29 @@ static int wil_cfg80211_connect(struct wiphy *wiphy,
        int ch;
        int rc = 0;
 
+       wil_print_connect_params(wil, sme);
+
        if (test_bit(wil_status_fwconnecting, wil->status) ||
            test_bit(wil_status_fwconnected, wil->status))
                return -EALREADY;
 
-       wil_print_connect_params(wil, sme);
+       if (sme->ie_len > WMI_MAX_IE_LEN) {
+               wil_err(wil, "IE too large (%td bytes)\n", sme->ie_len);
+               return -ERANGE;
+       }
+
+       rsn_eid = sme->ie ?
+                       cfg80211_find_ie(WLAN_EID_RSN, sme->ie, sme->ie_len) :
+                       NULL;
+
+       if (sme->privacy && !rsn_eid) {
+               wil_err(wil, "Missing RSN IE for secure connection\n");
+               return -EINVAL;
+       }
 
        bss = cfg80211_get_bss(wiphy, sme->channel, sme->bssid,
                               sme->ssid, sme->ssid_len,
-                              WLAN_CAPABILITY_ESS, WLAN_CAPABILITY_ESS);
+                              IEEE80211_BSS_TYPE_ESS, IEEE80211_PRIVACY_ANY);
        if (!bss) {
                wil_err(wil, "Unable to find BSS\n");
                return -ENOENT;
@@ -407,17 +422,9 @@ static int wil_cfg80211_connect(struct wiphy *wiphy,
                rc = -ENOENT;
                goto out;
        }
+       wil->privacy = sme->privacy;
 
-       rsn_eid = sme->ie ?
-                       cfg80211_find_ie(WLAN_EID_RSN, sme->ie, sme->ie_len) :
-                       NULL;
-       if (rsn_eid) {
-               if (sme->ie_len > WMI_MAX_IE_LEN) {
-                       rc = -ERANGE;
-                       wil_err(wil, "IE too large (%td bytes)\n",
-                               sme->ie_len);
-                       goto out;
-               }
+       if (wil->privacy) {
                /* For secure assoc, send WMI_DELETE_CIPHER_KEY_CMD */
                rc = wmi_del_cipher_key(wil, 0, bss->bssid);
                if (rc) {
@@ -450,7 +457,7 @@ static int wil_cfg80211_connect(struct wiphy *wiphy,
                        bss->capability);
                goto out;
        }
-       if (rsn_eid) {
+       if (wil->privacy) {
                conn.dot11_auth_mode = WMI_AUTH11_SHARED;
                conn.auth_mode = WMI_AUTH_WPA2_PSK;
                conn.pairwise_crypto_type = WMI_CRYPT_AES_GCMP;
@@ -472,8 +479,8 @@ static int wil_cfg80211_connect(struct wiphy *wiphy,
        }
        conn.channel = ch - 1;
 
-       memcpy(conn.bssid, bss->bssid, ETH_ALEN);
-       memcpy(conn.dst_mac, bss->bssid, ETH_ALEN);
+       ether_addr_copy(conn.bssid, bss->bssid);
+       ether_addr_copy(conn.dst_mac, bss->bssid);
 
        set_bit(wil_status_fwconnecting, wil->status);
 
@@ -769,15 +776,24 @@ static int wil_cfg80211_start_ap(struct wiphy *wiphy,
        wmi_set_ie(wil, WMI_FRAME_ASSOC_RESP, bcon->assocresp_ies_len,
                   bcon->assocresp_ies);
 
-       wil->secure_pcp = info->privacy;
+       wil->privacy = info->privacy;
 
        netif_carrier_on(ndev);
 
        rc = wmi_pcp_start(wil, info->beacon_interval, wmi_nettype,
                           channel->hw_value);
        if (rc)
-               netif_carrier_off(ndev);
+               goto err_pcp_start;
+
+       rc = wil_bcast_init(wil);
+       if (rc)
+               goto err_bcast;
 
+       goto out; /* success */
+err_bcast:
+       wmi_pcp_stop(wil);
+err_pcp_start:
+       netif_carrier_off(ndev);
 out:
        mutex_unlock(&wil->mutex);
        return rc;
@@ -911,6 +927,21 @@ static int wil_cfg80211_probe_client(struct wiphy *wiphy,
        return 0;
 }
 
+static int wil_cfg80211_change_bss(struct wiphy *wiphy,
+                                  struct net_device *dev,
+                                  struct bss_parameters *params)
+{
+       struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+
+       if (params->ap_isolate >= 0) {
+               wil_dbg_misc(wil, "%s(ap_isolate %d => %d)\n", __func__,
+                            wil->ap_isolate, params->ap_isolate);
+               wil->ap_isolate = params->ap_isolate;
+       }
+
+       return 0;
+}
+
 static struct cfg80211_ops wil_cfg80211_ops = {
        .scan = wil_cfg80211_scan,
        .connect = wil_cfg80211_connect,
@@ -931,6 +962,7 @@ static struct cfg80211_ops wil_cfg80211_ops = {
        .stop_ap = wil_cfg80211_stop_ap,
        .del_station = wil_cfg80211_del_station,
        .probe_client = wil_cfg80211_probe_client,
+       .change_bss = wil_cfg80211_change_bss,
 };
 
 static void wil_wiphy_init(struct wiphy *wiphy)
index 45c3558ec8042e3db83203bd03a36a51f124d3eb..bbc22d88f78f27dadd73ea64134f5484ca9df497 100644 (file)
@@ -29,6 +29,7 @@
 static u32 mem_addr;
 static u32 dbg_txdesc_index;
 static u32 dbg_vring_index; /* 24+ for Rx, 0..23 for Tx */
+u32 vring_idle_trsh = 16; /* HW fetches up to 16 descriptors at once */
 
 enum dbg_off_type {
        doff_u32 = 0,
@@ -102,23 +103,36 @@ static int wil_vring_debugfs_show(struct seq_file *s, void *data)
                                   % vring->size;
                        int avail = vring->size - used - 1;
                        char name[10];
+                       char sidle[10];
                        /* performance monitoring */
                        cycles_t now = get_cycles();
                        uint64_t idle = txdata->idle * 100;
                        uint64_t total = now - txdata->begin;
 
-                       do_div(idle, total);
+                       if (total != 0) {
+                               do_div(idle, total);
+                               snprintf(sidle, sizeof(sidle), "%3d%%",
+                                        (int)idle);
+                       } else {
+                               snprintf(sidle, sizeof(sidle), "N/A");
+                       }
                        txdata->begin = now;
                        txdata->idle = 0ULL;
 
                        snprintf(name, sizeof(name), "tx_%2d", i);
 
-                       seq_printf(s,
-                                  "\n%pM CID %d TID %d BACK([%d] %d TU A%s) [%3d|%3d] idle %3d%%\n",
-                                  wil->sta[cid].addr, cid, tid,
-                                  txdata->agg_wsize, txdata->agg_timeout,
-                                  txdata->agg_amsdu ? "+" : "-",
-                                  used, avail, (int)idle);
+                       if (cid < WIL6210_MAX_CID)
+                               seq_printf(s,
+                                          "\n%pM CID %d TID %d BACK([%u] %u TU A%s) [%3d|%3d] idle %s\n",
+                                          wil->sta[cid].addr, cid, tid,
+                                          txdata->agg_wsize,
+                                          txdata->agg_timeout,
+                                          txdata->agg_amsdu ? "+" : "-",
+                                          used, avail, sidle);
+                       else
+                               seq_printf(s,
+                                          "\nBroadcast [%3d|%3d] idle %s\n",
+                                          used, avail, sidle);
 
                        wil_print_vring(s, wil, name, vring, '_', 'H');
                }
@@ -549,7 +563,7 @@ static ssize_t wil_write_file_reset(struct file *file, const char __user *buf,
        dev_close(ndev);
        ndev->flags &= ~IFF_UP;
        rtnl_unlock();
-       wil_reset(wil);
+       wil_reset(wil, true);
 
        return len;
 }
@@ -618,7 +632,7 @@ static ssize_t wil_write_back(struct file *file, const char __user *buf,
        struct wil6210_priv *wil = file->private_data;
        int rc;
        char *kbuf = kmalloc(len + 1, GFP_KERNEL);
-       char cmd[8];
+       char cmd[9];
        int p1, p2, p3;
 
        if (!kbuf)
@@ -1392,11 +1406,12 @@ static void wil6210_debugfs_init_isr(struct wil6210_priv *wil,
 
 /* fields in struct wil6210_priv */
 static const struct dbg_off dbg_wil_off[] = {
-       WIL_FIELD(secure_pcp,   S_IRUGO | S_IWUSR,      doff_u32),
+       WIL_FIELD(privacy,      S_IRUGO,                doff_u32),
        WIL_FIELD(status[0],    S_IRUGO | S_IWUSR,      doff_ulong),
        WIL_FIELD(fw_version,   S_IRUGO,                doff_u32),
        WIL_FIELD(hw_version,   S_IRUGO,                doff_x32),
        WIL_FIELD(recovery_count, S_IRUGO,              doff_u32),
+       WIL_FIELD(ap_isolate,   S_IRUGO,                doff_u32),
        {},
 };
 
@@ -1412,6 +1427,8 @@ static const struct dbg_off dbg_statics[] = {
        {"desc_index",  S_IRUGO | S_IWUSR, (ulong)&dbg_txdesc_index, doff_u32},
        {"vring_index", S_IRUGO | S_IWUSR, (ulong)&dbg_vring_index, doff_u32},
        {"mem_addr",    S_IRUGO | S_IWUSR, (ulong)&mem_addr, doff_u32},
+       {"vring_idle_trsh", S_IRUGO | S_IWUSR, (ulong)&vring_idle_trsh,
+        doff_u32},
        {},
 };
 
index 4c44a82c34d79577e6e454bd42bbb9b8697d828e..0ea695ff98adeda1185382bfda7b58958b579275 100644 (file)
@@ -50,27 +50,19 @@ static int wil_ethtoolops_get_coalesce(struct net_device *ndev,
 
        wil_dbg_misc(wil, "%s()\n", __func__);
 
-       if (test_bit(hw_capability_advanced_itr_moderation,
-                    wil->hw_capabilities)) {
-               tx_itr_en = ioread32(wil->csr +
-                                    HOSTADDR(RGF_DMA_ITR_TX_CNT_CTL));
-               if (tx_itr_en & BIT_DMA_ITR_TX_CNT_CTL_EN)
-                       tx_itr_val =
-                               ioread32(wil->csr +
-                                        HOSTADDR(RGF_DMA_ITR_TX_CNT_TRSH));
-
-               rx_itr_en = ioread32(wil->csr +
-                                    HOSTADDR(RGF_DMA_ITR_RX_CNT_CTL));
-               if (rx_itr_en & BIT_DMA_ITR_RX_CNT_CTL_EN)
-                       rx_itr_val =
-                               ioread32(wil->csr +
-                                        HOSTADDR(RGF_DMA_ITR_RX_CNT_TRSH));
-       } else {
-               rx_itr_en = ioread32(wil->csr + HOSTADDR(RGF_DMA_ITR_CNT_CRL));
-               if (rx_itr_en & BIT_DMA_ITR_CNT_CRL_EN)
-                       rx_itr_val = ioread32(wil->csr +
-                                             HOSTADDR(RGF_DMA_ITR_CNT_TRSH));
-       }
+       tx_itr_en = ioread32(wil->csr +
+                            HOSTADDR(RGF_DMA_ITR_TX_CNT_CTL));
+       if (tx_itr_en & BIT_DMA_ITR_TX_CNT_CTL_EN)
+               tx_itr_val =
+                       ioread32(wil->csr +
+                                HOSTADDR(RGF_DMA_ITR_TX_CNT_TRSH));
+
+       rx_itr_en = ioread32(wil->csr +
+                            HOSTADDR(RGF_DMA_ITR_RX_CNT_CTL));
+       if (rx_itr_en & BIT_DMA_ITR_RX_CNT_CTL_EN)
+               rx_itr_val =
+                       ioread32(wil->csr +
+                                HOSTADDR(RGF_DMA_ITR_RX_CNT_TRSH));
 
        cp->tx_coalesce_usecs = tx_itr_val;
        cp->rx_coalesce_usecs = rx_itr_val;
index 93c5cc16c515c8df5bfc3e2be9dea61e3249ee5a..4428345e5a470360560ceb82772349cf8f754a7f 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014 Qualcomm Atheros, Inc.
+ * Copyright (c) 2014-2015 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
@@ -20,6 +20,7 @@
 #include "fw.h"
 
 MODULE_FIRMWARE(WIL_FW_NAME);
+MODULE_FIRMWARE(WIL_FW2_NAME);
 
 /* target operations */
 /* register read */
index d4acf93a9a02b9fbfd9f5d063726bdd5e84d47c5..157f5ef384e0cc2804229044f369ac813c69a28d 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014 Qualcomm Atheros, Inc.
+ * Copyright (c) 2014-2015 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
@@ -451,8 +451,6 @@ static int wil_fw_load(struct wil6210_priv *wil, const void *data, size_t size)
                }
                return -EINVAL;
        }
-       /* Mark FW as loaded from host */
-       S(RGF_USER_USAGE_6, 1);
 
        return rc;
 }
index a6f923086f310d5795ef5beb4c3ebf61315001e9..28ffc18466c4b1e1d2887f1887fc3decf36e73e2 100644 (file)
@@ -166,9 +166,16 @@ void wil_unmask_irq(struct wil6210_priv *wil)
 /* target write operation */
 #define W(a, v) do { iowrite32(v, wil->csr + HOSTADDR(a)); wmb(); } while (0)
 
-static
-void wil_configure_interrupt_moderation_new(struct wil6210_priv *wil)
+void wil_configure_interrupt_moderation(struct wil6210_priv *wil)
 {
+       wil_dbg_irq(wil, "%s()\n", __func__);
+
+       /* disable interrupt moderation for monitor
+        * to get better timestamp precision
+        */
+       if (wil->wdev->iftype == NL80211_IFTYPE_MONITOR)
+               return;
+
        /* Disable and clear tx counter before (re)configuration */
        W(RGF_DMA_ITR_TX_CNT_CTL, BIT_DMA_ITR_TX_CNT_CTL_CLR);
        W(RGF_DMA_ITR_TX_CNT_TRSH, wil->tx_max_burst_duration);
@@ -206,42 +213,8 @@ void wil_configure_interrupt_moderation_new(struct wil6210_priv *wil)
                                      BIT_DMA_ITR_RX_IDL_CNT_CTL_EXT_TIC_SEL);
 }
 
-static
-void wil_configure_interrupt_moderation_lgc(struct wil6210_priv *wil)
-{
-       /* disable, use usec resolution */
-       W(RGF_DMA_ITR_CNT_CRL, BIT_DMA_ITR_CNT_CRL_CLR);
-
-       wil_info(wil, "set ITR_TRSH = %d usec\n", wil->rx_max_burst_duration);
-       W(RGF_DMA_ITR_CNT_TRSH, wil->rx_max_burst_duration);
-       /* start it */
-       W(RGF_DMA_ITR_CNT_CRL,
-         BIT_DMA_ITR_CNT_CRL_EN | BIT_DMA_ITR_CNT_CRL_EXT_TICK);
-}
-
 #undef W
 
-void wil_configure_interrupt_moderation(struct wil6210_priv *wil)
-{
-       wil_dbg_irq(wil, "%s()\n", __func__);
-
-       /* disable interrupt moderation for monitor
-        * to get better timestamp precision
-        */
-       if (wil->wdev->iftype == NL80211_IFTYPE_MONITOR)
-               return;
-
-       if (test_bit(hw_capability_advanced_itr_moderation,
-                    wil->hw_capabilities))
-               wil_configure_interrupt_moderation_new(wil);
-       else {
-               /* Advanced interrupt moderation is not available before
-                * Sparrow v2. Will use legacy interrupt moderation
-                */
-               wil_configure_interrupt_moderation_lgc(wil);
-       }
-}
-
 static irqreturn_t wil6210_irq_rx(int irq, void *cookie)
 {
        struct wil6210_priv *wil = cookie;
@@ -253,7 +226,7 @@ static irqreturn_t wil6210_irq_rx(int irq, void *cookie)
        trace_wil6210_irq_rx(isr);
        wil_dbg_irq(wil, "ISR RX 0x%08x\n", isr);
 
-       if (!isr) {
+       if (unlikely(!isr)) {
                wil_err(wil, "spurious IRQ: RX\n");
                return IRQ_NONE;
        }
@@ -266,17 +239,18 @@ static irqreturn_t wil6210_irq_rx(int irq, void *cookie)
         * action is always the same - should empty the accumulated
         * packets from the RX ring.
         */
-       if (isr & (BIT_DMA_EP_RX_ICR_RX_DONE | BIT_DMA_EP_RX_ICR_RX_HTRSH)) {
+       if (likely(isr & (BIT_DMA_EP_RX_ICR_RX_DONE |
+                         BIT_DMA_EP_RX_ICR_RX_HTRSH))) {
                wil_dbg_irq(wil, "RX done\n");
 
-               if (isr & BIT_DMA_EP_RX_ICR_RX_HTRSH)
+               if (unlikely(isr & BIT_DMA_EP_RX_ICR_RX_HTRSH))
                        wil_err_ratelimited(wil,
                                            "Received \"Rx buffer is in risk of overflow\" interrupt\n");
 
                isr &= ~(BIT_DMA_EP_RX_ICR_RX_DONE |
                         BIT_DMA_EP_RX_ICR_RX_HTRSH);
-               if (test_bit(wil_status_reset_done, wil->status)) {
-                       if (test_bit(wil_status_napi_en, wil->status)) {
+               if (likely(test_bit(wil_status_reset_done, wil->status))) {
+                       if (likely(test_bit(wil_status_napi_en, wil->status))) {
                                wil_dbg_txrx(wil, "NAPI(Rx) schedule\n");
                                need_unmask = false;
                                napi_schedule(&wil->napi_rx);
@@ -289,7 +263,7 @@ static irqreturn_t wil6210_irq_rx(int irq, void *cookie)
                }
        }
 
-       if (isr)
+       if (unlikely(isr))
                wil_err(wil, "un-handled RX ISR bits 0x%08x\n", isr);
 
        /* Rx IRQ will be enabled when NAPI processing finished */
@@ -313,19 +287,19 @@ static irqreturn_t wil6210_irq_tx(int irq, void *cookie)
        trace_wil6210_irq_tx(isr);
        wil_dbg_irq(wil, "ISR TX 0x%08x\n", isr);
 
-       if (!isr) {
+       if (unlikely(!isr)) {
                wil_err(wil, "spurious IRQ: TX\n");
                return IRQ_NONE;
        }
 
        wil6210_mask_irq_tx(wil);
 
-       if (isr & BIT_DMA_EP_TX_ICR_TX_DONE) {
+       if (likely(isr & BIT_DMA_EP_TX_ICR_TX_DONE)) {
                wil_dbg_irq(wil, "TX done\n");
                isr &= ~BIT_DMA_EP_TX_ICR_TX_DONE;
                /* clear also all VRING interrupts */
                isr &= ~(BIT(25) - 1UL);
-               if (test_bit(wil_status_reset_done, wil->status)) {
+               if (likely(test_bit(wil_status_reset_done, wil->status))) {
                        wil_dbg_txrx(wil, "NAPI(Tx) schedule\n");
                        need_unmask = false;
                        napi_schedule(&wil->napi_tx);
@@ -334,7 +308,7 @@ static irqreturn_t wil6210_irq_tx(int irq, void *cookie)
                }
        }
 
-       if (isr)
+       if (unlikely(isr))
                wil_err(wil, "un-handled TX ISR bits 0x%08x\n", isr);
 
        /* Tx IRQ will be enabled when NAPI processing finished */
@@ -523,11 +497,11 @@ static irqreturn_t wil6210_hardirq(int irq, void *cookie)
        /**
         * pseudo_cause is Clear-On-Read, no need to ACK
         */
-       if ((pseudo_cause == 0) || ((pseudo_cause & 0xff) == 0xff))
+       if (unlikely((pseudo_cause == 0) || ((pseudo_cause & 0xff) == 0xff)))
                return IRQ_NONE;
 
        /* FIXME: IRQ mask debug */
-       if (wil6210_debug_irq_mask(wil, pseudo_cause))
+       if (unlikely(wil6210_debug_irq_mask(wil, pseudo_cause)))
                return IRQ_NONE;
 
        trace_wil6210_irq_pseudo(pseudo_cause);
index b04e0afdcb216724b1329085f038c4da2d335016..c2a238426425462c7ff40f61c8e98fec9dadca6d 100644 (file)
@@ -29,10 +29,6 @@ bool no_fw_recovery;
 module_param(no_fw_recovery, bool, S_IRUGO | S_IWUSR);
 MODULE_PARM_DESC(no_fw_recovery, " disable automatic FW error recovery");
 
-static bool no_fw_load = true;
-module_param(no_fw_load, bool, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(no_fw_load, " do not download FW, use one in on-card flash.");
-
 /* if not set via modparam, will be set to default value of 1/8 of
  * rx ring size during init flow
  */
@@ -72,6 +68,7 @@ MODULE_PARM_DESC(mtu_max, " Max MTU value.");
 
 static uint rx_ring_order = WIL_RX_RING_SIZE_ORDER_DEFAULT;
 static uint tx_ring_order = WIL_TX_RING_SIZE_ORDER_DEFAULT;
+static uint bcast_ring_order = WIL_BCAST_RING_SIZE_ORDER_DEFAULT;
 
 static int ring_order_set(const char *val, const struct kernel_param *kp)
 {
@@ -220,6 +217,7 @@ static void _wil6210_disconnect(struct wil6210_priv *wil, const u8 *bssid,
        switch (wdev->iftype) {
        case NL80211_IFTYPE_STATION:
        case NL80211_IFTYPE_P2P_CLIENT:
+               wil_bcast_fini(wil);
                netif_tx_stop_all_queues(ndev);
                netif_carrier_off(ndev);
 
@@ -364,6 +362,35 @@ static int wil_find_free_vring(struct wil6210_priv *wil)
        return -EINVAL;
 }
 
+int wil_bcast_init(struct wil6210_priv *wil)
+{
+       int ri = wil->bcast_vring, rc;
+
+       if ((ri >= 0) && wil->vring_tx[ri].va)
+               return 0;
+
+       ri = wil_find_free_vring(wil);
+       if (ri < 0)
+               return ri;
+
+       rc = wil_vring_init_bcast(wil, ri, 1 << bcast_ring_order);
+       if (rc == 0)
+               wil->bcast_vring = ri;
+
+       return rc;
+}
+
+void wil_bcast_fini(struct wil6210_priv *wil)
+{
+       int ri = wil->bcast_vring;
+
+       if (ri < 0)
+               return;
+
+       wil->bcast_vring = -1;
+       wil_vring_fini_tx(wil, ri);
+}
+
 static void wil_connect_worker(struct work_struct *work)
 {
        int rc;
@@ -411,6 +438,7 @@ int wil_priv_init(struct wil6210_priv *wil)
        init_completion(&wil->wmi_call);
 
        wil->pending_connect_cid = -1;
+       wil->bcast_vring = -1;
        setup_timer(&wil->connect_timer, wil_connect_timer_fn, (ulong)wil);
        setup_timer(&wil->scan_timer, wil_scan_timer_fn, (ulong)wil);
 
@@ -520,8 +548,6 @@ static int wil_target_reset(struct wil6210_priv *wil)
 {
        int delay = 0;
        u32 x;
-       bool is_reset_v2 = test_bit(hw_capability_reset_v2,
-                                   wil->hw_capabilities);
 
        wil_dbg_misc(wil, "Resetting \"%s\"...\n", wil->hw_name);
 
@@ -532,82 +558,67 @@ static int wil_target_reset(struct wil6210_priv *wil)
 
        wil_halt_cpu(wil);
 
+       /* clear all boot loader "ready" bits */
+       W(RGF_USER_BL + offsetof(struct RGF_BL, ready), 0);
        /* Clear Fw Download notification */
        C(RGF_USER_USAGE_6, BIT(0));
 
-       if (is_reset_v2) {
-               S(RGF_CAF_OSC_CONTROL, BIT_CAF_OSC_XTAL_EN);
-               /* XTAL stabilization should take about 3ms */
-               usleep_range(5000, 7000);
-               x = R(RGF_CAF_PLL_LOCK_STATUS);
-               if (!(x & BIT_CAF_OSC_DIG_XTAL_STABLE)) {
-                       wil_err(wil, "Xtal stabilization timeout\n"
-                               "RGF_CAF_PLL_LOCK_STATUS = 0x%08x\n", x);
-                       return -ETIME;
-               }
-               /* switch 10k to XTAL*/
-               C(RGF_USER_SPARROW_M_4, BIT_SPARROW_M_4_SEL_SLEEP_OR_REF);
-               /* 40 MHz */
-               C(RGF_USER_CLKS_CTL_0, BIT_USER_CLKS_CAR_AHB_SW_SEL);
-
-               W(RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_0, 0x3ff81f);
-               W(RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_1, 0xf);
+       S(RGF_CAF_OSC_CONTROL, BIT_CAF_OSC_XTAL_EN);
+       /* XTAL stabilization should take about 3ms */
+       usleep_range(5000, 7000);
+       x = R(RGF_CAF_PLL_LOCK_STATUS);
+       if (!(x & BIT_CAF_OSC_DIG_XTAL_STABLE)) {
+               wil_err(wil, "Xtal stabilization timeout\n"
+                       "RGF_CAF_PLL_LOCK_STATUS = 0x%08x\n", x);
+               return -ETIME;
        }
+       /* switch 10k to XTAL*/
+       C(RGF_USER_SPARROW_M_4, BIT_SPARROW_M_4_SEL_SLEEP_OR_REF);
+       /* 40 MHz */
+       C(RGF_USER_CLKS_CTL_0, BIT_USER_CLKS_CAR_AHB_SW_SEL);
+
+       W(RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_0, 0x3ff81f);
+       W(RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_1, 0xf);
 
        W(RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0xFE000000);
        W(RGF_USER_CLKS_CTL_SW_RST_VEC_1, 0x0000003F);
-       W(RGF_USER_CLKS_CTL_SW_RST_VEC_3,
-         is_reset_v2 ? 0x000000f0 : 0x00000170);
+       W(RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0x000000f0);
        W(RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0xFFE7FE00);
 
-       if (is_reset_v2) {
-               W(RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_0, 0x0);
-               W(RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_1, 0x0);
-       }
+       W(RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_0, 0x0);
+       W(RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_1, 0x0);
 
        W(RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0);
        W(RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0);
        W(RGF_USER_CLKS_CTL_SW_RST_VEC_1, 0);
        W(RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0);
 
-       if (is_reset_v2) {
-               W(RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0x00000003);
-               /* reset A2 PCIE AHB */
-               W(RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0x00008000);
-       } else {
-               W(RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0x00000001);
-               W(RGF_PCIE_LOS_COUNTER_CTL, BIT(6) | BIT(8));
-               W(RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0x00008000);
-       }
+       W(RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0x00000003);
+       W(RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0x00008000); /* reset A2 PCIE AHB */
 
-       /* TODO: check order here!!! Erez code is different */
        W(RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0);
 
-       /* wait until device ready. typical time is 200..250 msec */
+       /* wait until device ready. typical time is 20..80 msec */
        do {
                msleep(RST_DELAY);
-               x = R(RGF_USER_HW_MACHINE_STATE);
+               x = R(RGF_USER_BL + offsetof(struct RGF_BL, ready));
                if (delay++ > RST_COUNT) {
-                       wil_err(wil, "Reset not completed, hw_state 0x%08x\n",
+                       wil_err(wil, "Reset not completed, bl.ready 0x%08x\n",
                                x);
                        return -ETIME;
                }
-       } while (x != HW_MACHINE_BOOT_DONE);
-
-       if (!is_reset_v2)
-               W(RGF_PCIE_LOS_COUNTER_CTL, BIT(8));
+       } while (!(x & BIT_BL_READY));
 
        C(RGF_USER_CLKS_CTL_0, BIT_USER_CLKS_RST_PWGD);
 
+       /* enable fix for HW bug related to the SA/DA swap in AP Rx */
+       S(RGF_DMA_OFUL_NID_0, BIT_DMA_OFUL_NID_0_RX_EXT_TR_EN |
+         BIT_DMA_OFUL_NID_0_RX_EXT_A3_SRC);
+
        wil_dbg_misc(wil, "Reset completed in %d ms\n", delay * RST_DELAY);
        return 0;
 }
 
-#undef R
-#undef W
-#undef S
-#undef C
-
 void wil_mbox_ring_le2cpus(struct wil6210_mbox_ring *r)
 {
        le32_to_cpus(&r->base);
@@ -617,6 +628,32 @@ void wil_mbox_ring_le2cpus(struct wil6210_mbox_ring *r)
        le32_to_cpus(&r->head);
 }
 
+static int wil_get_bl_info(struct wil6210_priv *wil)
+{
+       struct net_device *ndev = wil_to_ndev(wil);
+       struct RGF_BL bl;
+
+       wil_memcpy_fromio_32(&bl, wil->csr + HOSTADDR(RGF_USER_BL), sizeof(bl));
+       le32_to_cpus(&bl.ready);
+       le32_to_cpus(&bl.version);
+       le32_to_cpus(&bl.rf_type);
+       le32_to_cpus(&bl.baseband_type);
+
+       if (!is_valid_ether_addr(bl.mac_address)) {
+               wil_err(wil, "BL: Invalid MAC %pM\n", bl.mac_address);
+               return -EINVAL;
+       }
+
+       ether_addr_copy(ndev->perm_addr, bl.mac_address);
+       if (!is_valid_ether_addr(ndev->dev_addr))
+               ether_addr_copy(ndev->dev_addr, bl.mac_address);
+       wil_info(wil,
+                "Boot Loader: ver = %d MAC = %pM RF = 0x%08x bband = 0x%08x\n",
+                bl.version, bl.mac_address, bl.rf_type, bl.baseband_type);
+
+       return 0;
+}
+
 static int wil_wait_for_fw_ready(struct wil6210_priv *wil)
 {
        ulong to = msecs_to_jiffies(1000);
@@ -637,7 +674,7 @@ static int wil_wait_for_fw_ready(struct wil6210_priv *wil)
  * After calling this routine, you're expected to reload
  * the firmware.
  */
-int wil_reset(struct wil6210_priv *wil)
+int wil_reset(struct wil6210_priv *wil, bool load_fw)
 {
        int rc;
 
@@ -651,6 +688,7 @@ int wil_reset(struct wil6210_priv *wil)
 
        cancel_work_sync(&wil->disconnect_worker);
        wil6210_disconnect(wil, NULL, WLAN_REASON_DEAUTH_LEAVING, false);
+       wil_bcast_fini(wil);
 
        /* prevent NAPI from being scheduled */
        bitmap_zero(wil->status, wil_status_last);
@@ -675,46 +713,62 @@ int wil_reset(struct wil6210_priv *wil)
        if (rc)
                return rc;
 
-       if (!no_fw_load) {
-               wil_info(wil, "Use firmware <%s>\n", WIL_FW_NAME);
+       rc = wil_get_bl_info(wil);
+       if (rc)
+               return rc;
+
+       if (load_fw) {
+               wil_info(wil, "Use firmware <%s> + board <%s>\n", WIL_FW_NAME,
+                        WIL_FW2_NAME);
+
                wil_halt_cpu(wil);
                /* Loading f/w from the file */
                rc = wil_request_firmware(wil, WIL_FW_NAME);
                if (rc)
                        return rc;
+               rc = wil_request_firmware(wil, WIL_FW2_NAME);
+               if (rc)
+                       return rc;
+
+               /* Mark FW as loaded from host */
+               S(RGF_USER_USAGE_6, 1);
 
-               /* clear any interrupts which on-card-firmware may have set */
+               /* clear any interrupts which on-card-firmware
+                * may have set
+                */
                wil6210_clear_irq(wil);
-               { /* CAF_ICR - clear and mask */
-                       u32 a = HOSTADDR(RGF_CAF_ICR) +
-                               offsetof(struct RGF_ICR, ICR);
-                       u32 m = HOSTADDR(RGF_CAF_ICR) +
-                               offsetof(struct RGF_ICR, IMV);
-                       u32 icr = ioread32(wil->csr + a);
-
-                       iowrite32(icr, wil->csr + a); /* W1C */
-                       iowrite32(~0, wil->csr + m);
-                       wmb(); /* wait for completion */
-               }
+               /* CAF_ICR - clear and mask */
+               /* it is W1C, clear by writing back same value */
+               S(RGF_CAF_ICR + offsetof(struct RGF_ICR, ICR), 0);
+               W(RGF_CAF_ICR + offsetof(struct RGF_ICR, IMV), ~0);
+
                wil_release_cpu(wil);
-       } else {
-               wil_info(wil, "Use firmware from on-card flash\n");
        }
 
        /* init after reset */
        wil->pending_connect_cid = -1;
+       wil->ap_isolate = 0;
        reinit_completion(&wil->wmi_ready);
        reinit_completion(&wil->wmi_call);
 
-       wil_configure_interrupt_moderation(wil);
-       wil_unmask_irq(wil);
+       if (load_fw) {
+               wil_configure_interrupt_moderation(wil);
+               wil_unmask_irq(wil);
 
-       /* we just started MAC, wait for FW ready */
-       rc = wil_wait_for_fw_ready(wil);
+               /* we just started MAC, wait for FW ready */
+               rc = wil_wait_for_fw_ready(wil);
+               if (rc == 0) /* check FW is responsive */
+                       rc = wmi_echo(wil);
+       }
 
        return rc;
 }
 
+#undef R
+#undef W
+#undef S
+#undef C
+
 void wil_fw_error_recovery(struct wil6210_priv *wil)
 {
        wil_dbg_misc(wil, "starting fw error recovery\n");
@@ -730,7 +784,7 @@ int __wil_up(struct wil6210_priv *wil)
 
        WARN_ON(!mutex_is_locked(&wil->mutex));
 
-       rc = wil_reset(wil);
+       rc = wil_reset(wil, true);
        if (rc)
                return rc;
 
@@ -837,7 +891,7 @@ int __wil_down(struct wil6210_priv *wil)
        if (!iter)
                wil_err(wil, "timeout waiting for idle FW/HW\n");
 
-       wil_rx_fini(wil);
+       wil_reset(wil, false);
 
        return 0;
 }
index ace30c1b5c64133210e88597c183b71e9aaa5e25..f2f7ea29558e058f27d7f934ba36769db36306c2 100644 (file)
@@ -82,7 +82,7 @@ static int wil6210_netdev_poll_rx(struct napi_struct *napi, int budget)
        wil_rx_handle(wil, &quota);
        done = budget - quota;
 
-       if (done <= 1) { /* burst ends - only one packet processed */
+       if (done < budget) {
                napi_complete(napi);
                wil6210_unmask_irq_rx(wil);
                wil_dbg_txrx(wil, "NAPI RX complete\n");
@@ -110,7 +110,7 @@ static int wil6210_netdev_poll_tx(struct napi_struct *napi, int budget)
                tx_done += wil_tx_complete(wil, i);
        }
 
-       if (tx_done <= 1) { /* burst ends - only one packet processed */
+       if (tx_done < budget) {
                napi_complete(napi);
                wil6210_unmask_irq_tx(wil);
                wil_dbg_txrx(wil, "NAPI TX complete\n");
index 3dd26709ccb29a075ac6f21c6a4626fc22718af6..109986114abfa10d6bd6083fa52fb29de2912fea 100644 (file)
@@ -39,18 +39,6 @@ void wil_set_capabilities(struct wil6210_priv *wil)
        bitmap_zero(wil->hw_capabilities, hw_capability_last);
 
        switch (rev_id) {
-       case JTAG_DEV_ID_MARLON_B0:
-               wil->hw_name = "Marlon B0";
-               wil->hw_version = HW_VER_MARLON_B0;
-               break;
-       case JTAG_DEV_ID_SPARROW_A0:
-               wil->hw_name = "Sparrow A0";
-               wil->hw_version = HW_VER_SPARROW_A0;
-               break;
-       case JTAG_DEV_ID_SPARROW_A1:
-               wil->hw_name = "Sparrow A1";
-               wil->hw_version = HW_VER_SPARROW_A1;
-               break;
        case JTAG_DEV_ID_SPARROW_B0:
                wil->hw_name = "Sparrow B0";
                wil->hw_version = HW_VER_SPARROW_B0;
@@ -62,13 +50,6 @@ void wil_set_capabilities(struct wil6210_priv *wil)
        }
 
        wil_info(wil, "Board hardware is %s\n", wil->hw_name);
-
-       if (wil->hw_version >= HW_VER_SPARROW_A0)
-               set_bit(hw_capability_reset_v2, wil->hw_capabilities);
-
-       if (wil->hw_version >= HW_VER_SPARROW_B0)
-               set_bit(hw_capability_advanced_itr_moderation,
-                       wil->hw_capabilities);
 }
 
 void wil_disable_irq(struct wil6210_priv *wil)
@@ -150,7 +131,7 @@ static int wil_if_pcie_enable(struct wil6210_priv *wil)
 
        /* need reset here to obtain MAC */
        mutex_lock(&wil->mutex);
-       rc = wil_reset(wil);
+       rc = wil_reset(wil, false);
        mutex_unlock(&wil->mutex);
        if (debug_fw)
                rc = 0;
@@ -265,8 +246,6 @@ static int wil_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 
        wil6210_debugfs_init(wil);
 
-       /* check FW is alive */
-       wmi_echo(wil);
 
        return 0;
 
@@ -305,7 +284,6 @@ static void wil_pcie_remove(struct pci_dev *pdev)
 }
 
 static const struct pci_device_id wil6210_pcie_ids[] = {
-       { PCI_DEVICE(0x1ae9, 0x0301) },
        { PCI_DEVICE(0x1ae9, 0x0310) },
        { PCI_DEVICE(0x1ae9, 0x0302) }, /* same as above, firmware broken */
        { /* end: all zeroes */ },
index 8439f65db259728b35586769d82a428b51d3aeae..e8bd512d81a9b7bbd70b7d56e65e0133e39881b0 100644 (file)
@@ -33,6 +33,15 @@ module_param(rtap_include_phy_info, bool, S_IRUGO);
 MODULE_PARM_DESC(rtap_include_phy_info,
                 " Include PHY info in the radiotap header, default - no");
 
+bool rx_align_2;
+module_param(rx_align_2, bool, S_IRUGO);
+MODULE_PARM_DESC(rx_align_2, " align Rx buffers on 4*n+2, default - no");
+
+static inline uint wil_rx_snaplen(void)
+{
+       return rx_align_2 ? 6 : 0;
+}
+
 static inline int wil_vring_is_empty(struct vring *vring)
 {
        return vring->swhead == vring->swtail;
@@ -53,34 +62,38 @@ static inline int wil_vring_is_full(struct vring *vring)
        return wil_vring_next_tail(vring) == vring->swhead;
 }
 
-/*
- * Available space in Tx Vring
- */
-static inline int wil_vring_avail_tx(struct vring *vring)
+/* Used space in Tx Vring */
+static inline int wil_vring_used_tx(struct vring *vring)
 {
        u32 swhead = vring->swhead;
        u32 swtail = vring->swtail;
-       int used = (vring->size + swhead - swtail) % vring->size;
+       return (vring->size + swhead - swtail) % vring->size;
+}
 
-       return vring->size - used - 1;
+/* Available space in Tx Vring */
+static inline int wil_vring_avail_tx(struct vring *vring)
+{
+       return vring->size - wil_vring_used_tx(vring) - 1;
 }
 
-/**
- * wil_vring_wmark_low - low watermark for available descriptor space
- */
+/* wil_vring_wmark_low - low watermark for available descriptor space */
 static inline int wil_vring_wmark_low(struct vring *vring)
 {
        return vring->size/8;
 }
 
-/**
- * wil_vring_wmark_high - high watermark for available descriptor space
- */
+/* wil_vring_wmark_high - high watermark for available descriptor space */
 static inline int wil_vring_wmark_high(struct vring *vring)
 {
        return vring->size/4;
 }
 
+/* wil_val_in_range - check if value in [min,max) */
+static inline bool wil_val_in_range(int val, int min, int max)
+{
+       return val >= min && val < max;
+}
+
 static int wil_vring_alloc(struct wil6210_priv *wil, struct vring *vring)
 {
        struct device *dev = wil_to_dev(wil);
@@ -98,8 +111,7 @@ static int wil_vring_alloc(struct wil6210_priv *wil, struct vring *vring)
                vring->va = NULL;
                return -ENOMEM;
        }
-       /*
-        * vring->va should be aligned on its size rounded up to power of 2
+       /* vring->va should be aligned on its size rounded up to power of 2
         * This is granted by the dma_alloc_coherent
         */
        vring->va = dma_alloc_coherent(dev, sz, &vring->pa, GFP_KERNEL);
@@ -206,7 +218,7 @@ static int wil_vring_alloc_skb(struct wil6210_priv *wil, struct vring *vring,
                               u32 i, int headroom)
 {
        struct device *dev = wil_to_dev(wil);
-       unsigned int sz = mtu_max + ETH_HLEN;
+       unsigned int sz = mtu_max + ETH_HLEN + wil_rx_snaplen();
        struct vring_rx_desc dd, *d = &dd;
        volatile struct vring_rx_desc *_d = &vring->va[i].rx;
        dma_addr_t pa;
@@ -346,27 +358,6 @@ static void wil_rx_add_radiotap_header(struct wil6210_priv *wil,
        }
 }
 
-/*
- * Fast swap in place between 2 registers
- */
-static void wil_swap_u16(u16 *a, u16 *b)
-{
-       *a ^= *b;
-       *b ^= *a;
-       *a ^= *b;
-}
-
-static void wil_swap_ethaddr(void *data)
-{
-       struct ethhdr *eth = data;
-       u16 *s = (u16 *)eth->h_source;
-       u16 *d = (u16 *)eth->h_dest;
-
-       wil_swap_u16(s++, d++);
-       wil_swap_u16(s++, d++);
-       wil_swap_u16(s, d);
-}
-
 /**
  * reap 1 frame from @swhead
  *
@@ -383,40 +374,45 @@ static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil,
        struct vring_rx_desc *d;
        struct sk_buff *skb;
        dma_addr_t pa;
-       unsigned int sz = mtu_max + ETH_HLEN;
+       unsigned int snaplen = wil_rx_snaplen();
+       unsigned int sz = mtu_max + ETH_HLEN + snaplen;
        u16 dmalen;
        u8 ftype;
-       u8 ds_bits;
        int cid;
+       int i = (int)vring->swhead;
        struct wil_net_stats *stats;
 
        BUILD_BUG_ON(sizeof(struct vring_rx_desc) > sizeof(skb->cb));
 
-       if (wil_vring_is_empty(vring))
+       if (unlikely(wil_vring_is_empty(vring)))
                return NULL;
 
-       _d = &vring->va[vring->swhead].rx;
-       if (!(_d->dma.status & RX_DMA_STATUS_DU)) {
+       _d = &vring->va[i].rx;
+       if (unlikely(!(_d->dma.status & RX_DMA_STATUS_DU))) {
                /* it is not error, we just reached end of Rx done area */
                return NULL;
        }
 
-       skb = vring->ctx[vring->swhead].skb;
+       skb = vring->ctx[i].skb;
+       vring->ctx[i].skb = NULL;
+       wil_vring_advance_head(vring, 1);
+       if (!skb) {
+               wil_err(wil, "No Rx skb at [%d]\n", i);
+               return NULL;
+       }
        d = wil_skb_rxdesc(skb);
        *d = *_d;
        pa = wil_desc_addr(&d->dma.addr);
-       vring->ctx[vring->swhead].skb = NULL;
-       wil_vring_advance_head(vring, 1);
 
        dma_unmap_single(dev, pa, sz, DMA_FROM_DEVICE);
        dmalen = le16_to_cpu(d->dma.length);
 
-       trace_wil6210_rx(vring->swhead, d);
-       wil_dbg_txrx(wil, "Rx[%3d] : %d bytes\n", vring->swhead, dmalen);
+       trace_wil6210_rx(i, d);
+       wil_dbg_txrx(wil, "Rx[%3d] : %d bytes\n", i, dmalen);
        wil_hex_dump_txrx("Rx ", DUMP_PREFIX_NONE, 32, 4,
                          (const void *)d, sizeof(*d), false);
 
-       if (dmalen > sz) {
+       if (unlikely(dmalen > sz)) {
                wil_err(wil, "Rx size too large: %d bytes!\n", dmalen);
                kfree_skb(skb);
                return NULL;
@@ -445,14 +441,14 @@ static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil,
         * in Rx descriptor. If type is not data, it is 802.11 frame as is
         */
        ftype = wil_rxdesc_ftype(d) << 2;
-       if (ftype != IEEE80211_FTYPE_DATA) {
+       if (unlikely(ftype != IEEE80211_FTYPE_DATA)) {
                wil_dbg_txrx(wil, "Non-data frame ftype 0x%08x\n", ftype);
                /* TODO: process it */
                kfree_skb(skb);
                return NULL;
        }
 
-       if (skb->len < ETH_HLEN) {
+       if (unlikely(skb->len < ETH_HLEN + snaplen)) {
                wil_err(wil, "Short frame, len = %d\n", skb->len);
                /* TODO: process it (i.e. BAR) */
                kfree_skb(skb);
@@ -463,9 +459,9 @@ static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil,
         * and in case of error drop the packet
         * higher stack layers will handle retransmission (if required)
         */
-       if (d->dma.status & RX_DMA_STATUS_L4I) {
+       if (likely(d->dma.status & RX_DMA_STATUS_L4I)) {
                /* L4 protocol identified, csum calculated */
-               if ((d->dma.error & RX_DMA_ERROR_L4_ERR) == 0)
+               if (likely((d->dma.error & RX_DMA_ERROR_L4_ERR) == 0))
                        skb->ip_summed = CHECKSUM_UNNECESSARY;
                /* If HW reports bad checksum, let IP stack re-check it
                 * For example, HW don't understand Microsoft IP stack that
@@ -474,13 +470,15 @@ static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil,
                 */
        }
 
-       ds_bits = wil_rxdesc_ds_bits(d);
-       if (ds_bits == 1) {
-               /*
-                * HW bug - in ToDS mode, i.e. Rx on AP side,
-                * addresses get swapped
+       if (snaplen) {
+               /* Packet layout
+                * +-------+-------+---------+------------+------+
+                * | SA(6) | DA(6) | SNAP(6) | ETHTYPE(2) | DATA |
+                * +-------+-------+---------+------------+------+
+                * Need to remove SNAP, shifting SA and DA forward
                 */
-               wil_swap_ethaddr(skb->data);
+               memmove(skb->data + snaplen, skb->data, 2 * ETH_ALEN);
+               skb_pull(skb, snaplen);
        }
 
        return skb;
@@ -503,7 +501,7 @@ static int wil_rx_refill(struct wil6210_priv *wil, int count)
                        (next_tail != v->swhead) && (count-- > 0);
                        v->swtail = next_tail) {
                rc = wil_vring_alloc_skb(wil, v, v->swtail, headroom);
-               if (rc) {
+               if (unlikely(rc)) {
                        wil_err(wil, "Error %d in wil_rx_refill[%d]\n",
                                rc, v->swtail);
                        break;
@@ -520,17 +518,71 @@ static int wil_rx_refill(struct wil6210_priv *wil, int count)
  */
 void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev)
 {
-       gro_result_t rc;
+       gro_result_t rc = GRO_NORMAL;
        struct wil6210_priv *wil = ndev_to_wil(ndev);
+       struct wireless_dev *wdev = wil_to_wdev(wil);
        unsigned int len = skb->len;
        struct vring_rx_desc *d = wil_skb_rxdesc(skb);
-       int cid = wil_rxdesc_cid(d);
+       int cid = wil_rxdesc_cid(d); /* always 0..7, no need to check */
+       struct ethhdr *eth = (void *)skb->data;
+       /* here looking for DA, not A1, thus Rxdesc's 'mcast' indication
+        * is not suitable, need to look at data
+        */
+       int mcast = is_multicast_ether_addr(eth->h_dest);
        struct wil_net_stats *stats = &wil->sta[cid].stats;
+       struct sk_buff *xmit_skb = NULL;
+       static const char * const gro_res_str[] = {
+               [GRO_MERGED]            = "GRO_MERGED",
+               [GRO_MERGED_FREE]       = "GRO_MERGED_FREE",
+               [GRO_HELD]              = "GRO_HELD",
+               [GRO_NORMAL]            = "GRO_NORMAL",
+               [GRO_DROP]              = "GRO_DROP",
+       };
 
        skb_orphan(skb);
 
-       rc = napi_gro_receive(&wil->napi_rx, skb);
+       if (wdev->iftype == NL80211_IFTYPE_AP && !wil->ap_isolate) {
+               if (mcast) {
+                       /* send multicast frames both to higher layers in
+                        * local net stack and back to the wireless medium
+                        */
+                       xmit_skb = skb_copy(skb, GFP_ATOMIC);
+               } else {
+                       int xmit_cid = wil_find_cid(wil, eth->h_dest);
+
+                       if (xmit_cid >= 0) {
+                               /* The destination station is associated to
+                                * this AP (in this VLAN), so send the frame
+                                * directly to it and do not pass it to local
+                                * net stack.
+                                */
+                               xmit_skb = skb;
+                               skb = NULL;
+                       }
+               }
+       }
+       if (xmit_skb) {
+               /* Send to wireless media and increase priority by 256 to
+                * keep the received priority instead of reclassifying
+                * the frame (see cfg80211_classify8021d).
+                */
+               xmit_skb->dev = ndev;
+               xmit_skb->priority += 256;
+               xmit_skb->protocol = htons(ETH_P_802_3);
+               skb_reset_network_header(xmit_skb);
+               skb_reset_mac_header(xmit_skb);
+               wil_dbg_txrx(wil, "Rx -> Tx %d bytes\n", len);
+               dev_queue_xmit(xmit_skb);
+       }
 
+       if (skb) { /* deliver to local stack */
+
+               skb->protocol = eth_type_trans(skb, ndev);
+               rc = napi_gro_receive(&wil->napi_rx, skb);
+               wil_dbg_txrx(wil, "Rx complete %d bytes => %s\n",
+                            len, gro_res_str[rc]);
+       }
+       /* statistics. rc set to GRO_NORMAL for AP bridging */
        if (unlikely(rc == GRO_DROP)) {
                ndev->stats.rx_dropped++;
                stats->rx_dropped++;
@@ -540,17 +592,8 @@ void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev)
                stats->rx_packets++;
                ndev->stats.rx_bytes += len;
                stats->rx_bytes += len;
-       }
-       {
-               static const char * const gro_res_str[] = {
-                       [GRO_MERGED]            = "GRO_MERGED",
-                       [GRO_MERGED_FREE]       = "GRO_MERGED_FREE",
-                       [GRO_HELD]              = "GRO_HELD",
-                       [GRO_NORMAL]            = "GRO_NORMAL",
-                       [GRO_DROP]              = "GRO_DROP",
-               };
-               wil_dbg_txrx(wil, "Rx complete %d bytes => %s\n",
-                            len, gro_res_str[rc]);
+               if (mcast)
+                       ndev->stats.multicast++;
        }
 }
 
@@ -565,7 +608,7 @@ void wil_rx_handle(struct wil6210_priv *wil, int *quota)
        struct vring *v = &wil->vring_rx;
        struct sk_buff *skb;
 
-       if (!v->va) {
+       if (unlikely(!v->va)) {
                wil_err(wil, "Rx IRQ while Rx not yet initialized\n");
                return;
        }
@@ -581,7 +624,6 @@ void wil_rx_handle(struct wil6210_priv *wil, int *quota)
                        skb->protocol = htons(ETH_P_802_2);
                        wil_netif_rx_any(skb, ndev);
                } else {
-                       skb->protocol = eth_type_trans(skb, ndev);
                        wil_rx_reorder(wil, skb);
                }
        }
@@ -707,6 +749,72 @@ int wil_vring_init_tx(struct wil6210_priv *wil, int id, int size,
        return rc;
 }
 
+int wil_vring_init_bcast(struct wil6210_priv *wil, int id, int size)
+{
+       int rc;
+       struct wmi_bcast_vring_cfg_cmd cmd = {
+               .action = cpu_to_le32(WMI_VRING_CMD_ADD),
+               .vring_cfg = {
+                       .tx_sw_ring = {
+                               .max_mpdu_size =
+                                       cpu_to_le16(wil_mtu2macbuf(mtu_max)),
+                               .ring_size = cpu_to_le16(size),
+                       },
+                       .ringid = id,
+                       .encap_trans_type = WMI_VRING_ENC_TYPE_802_3,
+               },
+       };
+       struct {
+               struct wil6210_mbox_hdr_wmi wmi;
+               struct wmi_vring_cfg_done_event cmd;
+       } __packed reply;
+       struct vring *vring = &wil->vring_tx[id];
+       struct vring_tx_data *txdata = &wil->vring_tx_data[id];
+
+       wil_dbg_misc(wil, "%s() max_mpdu_size %d\n", __func__,
+                    cmd.vring_cfg.tx_sw_ring.max_mpdu_size);
+
+       if (vring->va) {
+               wil_err(wil, "Tx ring [%d] already allocated\n", id);
+               rc = -EINVAL;
+               goto out;
+       }
+
+       memset(txdata, 0, sizeof(*txdata));
+       spin_lock_init(&txdata->lock);
+       vring->size = size;
+       rc = wil_vring_alloc(wil, vring);
+       if (rc)
+               goto out;
+
+       wil->vring2cid_tid[id][0] = WIL6210_MAX_CID; /* CID */
+       wil->vring2cid_tid[id][1] = 0; /* TID */
+
+       cmd.vring_cfg.tx_sw_ring.ring_mem_base = cpu_to_le64(vring->pa);
+
+       rc = wmi_call(wil, WMI_BCAST_VRING_CFG_CMDID, &cmd, sizeof(cmd),
+                     WMI_VRING_CFG_DONE_EVENTID, &reply, sizeof(reply), 100);
+       if (rc)
+               goto out_free;
+
+       if (reply.cmd.status != WMI_FW_STATUS_SUCCESS) {
+               wil_err(wil, "Tx config failed, status 0x%02x\n",
+                       reply.cmd.status);
+               rc = -EINVAL;
+               goto out_free;
+       }
+       vring->hwtail = le32_to_cpu(reply.cmd.tx_vring_tail_ptr);
+
+       txdata->enabled = 1;
+
+       return 0;
+ out_free:
+       wil_vring_free(wil, vring, 1);
+ out:
+
+       return rc;
+}
+
 void wil_vring_fini_tx(struct wil6210_priv *wil, int id)
 {
        struct vring *vring = &wil->vring_tx[id];
@@ -730,7 +838,7 @@ void wil_vring_fini_tx(struct wil6210_priv *wil, int id)
        memset(txdata, 0, sizeof(*txdata));
 }
 
-static struct vring *wil_find_tx_vring(struct wil6210_priv *wil,
+static struct vring *wil_find_tx_ucast(struct wil6210_priv *wil,
                                       struct sk_buff *skb)
 {
        int i;
@@ -763,15 +871,6 @@ static struct vring *wil_find_tx_vring(struct wil6210_priv *wil,
        return NULL;
 }
 
-static void wil_set_da_for_vring(struct wil6210_priv *wil,
-                                struct sk_buff *skb, int vring_index)
-{
-       struct ethhdr *eth = (void *)skb->data;
-       int cid = wil->vring2cid_tid[vring_index][0];
-
-       memcpy(eth->h_dest, wil->sta[cid].addr, ETH_ALEN);
-}
-
 static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
                        struct sk_buff *skb);
 
@@ -792,6 +891,9 @@ static struct vring *wil_find_tx_vring_sta(struct wil6210_priv *wil,
                        continue;
 
                cid = wil->vring2cid_tid[i][0];
+               if (cid >= WIL6210_MAX_CID) /* skip BCAST */
+                       continue;
+
                if (!wil->sta[cid].data_port_open &&
                    (skb->protocol != cpu_to_be16(ETH_P_PAE)))
                        break;
@@ -806,17 +908,51 @@ static struct vring *wil_find_tx_vring_sta(struct wil6210_priv *wil,
        return NULL;
 }
 
-/*
- * Find 1-st vring and return it; set dest address for this vring in skb
- * duplicate skb and send it to other active vrings
+/* Use one of 2 strategies:
+ *
+ * 1. New (real broadcast):
+ *    use dedicated broadcast vring
+ * 2. Old (pseudo-DMS):
+ *    Find 1-st vring and return it;
+ *    duplicate skb and send it to other active vrings;
+ *    in all cases override dest address to unicast peer's address
+ * Use old strategy when new is not supported yet:
+ *  - for PBSS
+ *  - for secure link
  */
-static struct vring *wil_tx_bcast(struct wil6210_priv *wil,
-                                 struct sk_buff *skb)
+static struct vring *wil_find_tx_bcast_1(struct wil6210_priv *wil,
+                                        struct sk_buff *skb)
+{
+       struct vring *v;
+       int i = wil->bcast_vring;
+
+       if (i < 0)
+               return NULL;
+       v = &wil->vring_tx[i];
+       if (!v->va)
+               return NULL;
+
+       return v;
+}
+
+static void wil_set_da_for_vring(struct wil6210_priv *wil,
+                                struct sk_buff *skb, int vring_index)
+{
+       struct ethhdr *eth = (void *)skb->data;
+       int cid = wil->vring2cid_tid[vring_index][0];
+
+       ether_addr_copy(eth->h_dest, wil->sta[cid].addr);
+}
+
+static struct vring *wil_find_tx_bcast_2(struct wil6210_priv *wil,
+                                        struct sk_buff *skb)
 {
        struct vring *v, *v2;
        struct sk_buff *skb2;
        int i;
        u8 cid;
+       struct ethhdr *eth = (void *)skb->data;
+       char *src = eth->h_source;
 
        /* find 1-st vring eligible for data */
        for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) {
@@ -825,9 +961,15 @@ static struct vring *wil_tx_bcast(struct wil6210_priv *wil,
                        continue;
 
                cid = wil->vring2cid_tid[i][0];
+               if (cid >= WIL6210_MAX_CID) /* skip BCAST */
+                       continue;
                if (!wil->sta[cid].data_port_open)
                        continue;
 
+               /* don't Tx back to source when re-routing Rx->Tx at the AP */
+               if (0 == memcmp(wil->sta[cid].addr, src, ETH_ALEN))
+                       continue;
+
                goto found;
        }
 
@@ -845,9 +987,14 @@ found:
                if (!v2->va)
                        continue;
                cid = wil->vring2cid_tid[i][0];
+               if (cid >= WIL6210_MAX_CID) /* skip BCAST */
+                       continue;
                if (!wil->sta[cid].data_port_open)
                        continue;
 
+               if (0 == memcmp(wil->sta[cid].addr, src, ETH_ALEN))
+                       continue;
+
                skb2 = skb_copy(skb, GFP_ATOMIC);
                if (skb2) {
                        wil_dbg_txrx(wil, "BCAST DUP -> ring %d\n", i);
@@ -861,6 +1008,20 @@ found:
        return v;
 }
 
+static struct vring *wil_find_tx_bcast(struct wil6210_priv *wil,
+                                      struct sk_buff *skb)
+{
+       struct wireless_dev *wdev = wil->wdev;
+
+       if (wdev->iftype != NL80211_IFTYPE_AP)
+               return wil_find_tx_bcast_2(wil, skb);
+
+       if (wil->privacy)
+               return wil_find_tx_bcast_2(wil, skb);
+
+       return wil_find_tx_bcast_1(wil, skb);
+}
+
 static int wil_tx_desc_map(struct vring_tx_desc *d, dma_addr_t pa, u32 len,
                           int vring_index)
 {
@@ -952,13 +1113,16 @@ static int __wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
        struct vring_tx_data *txdata = &wil->vring_tx_data[vring_index];
        uint i = swhead;
        dma_addr_t pa;
+       int used;
+       bool mcast = (vring_index == wil->bcast_vring);
+       uint len = skb_headlen(skb);
 
        wil_dbg_txrx(wil, "%s()\n", __func__);
 
        if (unlikely(!txdata->enabled))
                return -EINVAL;
 
-       if (avail < 1 + nr_frags) {
+       if (unlikely(avail < 1 + nr_frags)) {
                wil_err_ratelimited(wil,
                                    "Tx ring[%2d] full. No space for %d fragments\n",
                                    vring_index, 1 + nr_frags);
@@ -977,9 +1141,19 @@ static int __wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
                return -EINVAL;
        vring->ctx[i].mapped_as = wil_mapped_as_single;
        /* 1-st segment */
-       wil_tx_desc_map(d, pa, skb_headlen(skb), vring_index);
+       wil_tx_desc_map(d, pa, len, vring_index);
+       if (unlikely(mcast)) {
+               d->mac.d[0] |= BIT(MAC_CFG_DESC_TX_0_MCS_EN_POS); /* MCS 0 */
+               if (unlikely(len > WIL_BCAST_MCS0_LIMIT)) {
+                       /* set MCS 1 */
+                       d->mac.d[0] |= (1 << MAC_CFG_DESC_TX_0_MCS_INDEX_POS);
+                       /* packet mode 2 */
+                       d->mac.d[1] |= BIT(MAC_CFG_DESC_TX_1_PKT_MODE_EN_POS) |
+                                      (2 << MAC_CFG_DESC_TX_1_PKT_MODE_POS);
+               }
+       }
        /* Process TCP/UDP checksum offloading */
-       if (wil_tx_desc_offload_cksum_set(wil, d, skb)) {
+       if (unlikely(wil_tx_desc_offload_cksum_set(wil, d, skb))) {
                wil_err(wil, "Tx[%2d] Failed to set cksum, drop packet\n",
                        vring_index);
                goto dma_error;
@@ -1027,8 +1201,14 @@ static int __wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
         */
        vring->ctx[i].skb = skb_get(skb);
 
-       if (wil_vring_is_empty(vring)) /* performance monitoring */
+       /* performance monitoring */
+       used = wil_vring_used_tx(vring);
+       if (wil_val_in_range(vring_idle_trsh,
+                            used, used + nr_frags + 1)) {
                txdata->idle += get_cycles() - txdata->last_idle;
+               wil_dbg_txrx(wil,  "Ring[%2d] not idle %d -> %d\n",
+                            vring_index, used, used + nr_frags + 1);
+       }
 
        /* advance swhead */
        wil_vring_advance_head(vring, nr_frags + 1);
@@ -1077,23 +1257,24 @@ netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev)
 {
        struct wil6210_priv *wil = ndev_to_wil(ndev);
        struct ethhdr *eth = (void *)skb->data;
+       bool bcast = is_multicast_ether_addr(eth->h_dest);
        struct vring *vring;
        static bool pr_once_fw;
        int rc;
 
        wil_dbg_txrx(wil, "%s()\n", __func__);
-       if (!test_bit(wil_status_fwready, wil->status)) {
+       if (unlikely(!test_bit(wil_status_fwready, wil->status))) {
                if (!pr_once_fw) {
                        wil_err(wil, "FW not ready\n");
                        pr_once_fw = true;
                }
                goto drop;
        }
-       if (!test_bit(wil_status_fwconnected, wil->status)) {
+       if (unlikely(!test_bit(wil_status_fwconnected, wil->status))) {
                wil_err(wil, "FW not connected\n");
                goto drop;
        }
-       if (wil->wdev->iftype == NL80211_IFTYPE_MONITOR) {
+       if (unlikely(wil->wdev->iftype == NL80211_IFTYPE_MONITOR)) {
                wil_err(wil, "Xmit in monitor mode not supported\n");
                goto drop;
        }
@@ -1104,12 +1285,10 @@ netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev)
                /* in STA mode (ESS), all to same VRING */
                vring = wil_find_tx_vring_sta(wil, skb);
        } else { /* direct communication, find matching VRING */
-               if (is_unicast_ether_addr(eth->h_dest))
-                       vring = wil_find_tx_vring(wil, skb);
-               else
-                       vring = wil_tx_bcast(wil, skb);
+               vring = bcast ? wil_find_tx_bcast(wil, skb) :
+                               wil_find_tx_ucast(wil, skb);
        }
-       if (!vring) {
+       if (unlikely(!vring)) {
                wil_dbg_txrx(wil, "No Tx VRING found for %pM\n", eth->h_dest);
                goto drop;
        }
@@ -1117,7 +1296,7 @@ netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev)
        rc = wil_tx_vring(wil, vring, skb);
 
        /* do we still have enough room in the vring? */
-       if (wil_vring_avail_tx(vring) < wil_vring_wmark_low(vring)) {
+       if (unlikely(wil_vring_avail_tx(vring) < wil_vring_wmark_low(vring))) {
                netif_tx_stop_all_queues(wil_to_ndev(wil));
                wil_dbg_txrx(wil, "netif_tx_stop : ring full\n");
        }
@@ -1170,21 +1349,28 @@ int wil_tx_complete(struct wil6210_priv *wil, int ringid)
        struct vring_tx_data *txdata = &wil->vring_tx_data[ringid];
        int done = 0;
        int cid = wil->vring2cid_tid[ringid][0];
-       struct wil_net_stats *stats = &wil->sta[cid].stats;
+       struct wil_net_stats *stats = NULL;
        volatile struct vring_tx_desc *_d;
+       int used_before_complete;
+       int used_new;
 
-       if (!vring->va) {
+       if (unlikely(!vring->va)) {
                wil_err(wil, "Tx irq[%d]: vring not initialized\n", ringid);
                return 0;
        }
 
-       if (!txdata->enabled) {
+       if (unlikely(!txdata->enabled)) {
                wil_info(wil, "Tx irq[%d]: vring disabled\n", ringid);
                return 0;
        }
 
        wil_dbg_txrx(wil, "%s(%d)\n", __func__, ringid);
 
+       used_before_complete = wil_vring_used_tx(vring);
+
+       if (cid < WIL6210_MAX_CID)
+               stats = &wil->sta[cid].stats;
+
        while (!wil_vring_is_empty(vring)) {
                int new_swtail;
                struct wil_ctx *ctx = &vring->ctx[vring->swtail];
@@ -1196,7 +1382,7 @@ int wil_tx_complete(struct wil6210_priv *wil, int ringid)
                /* TODO: check we are not past head */
 
                _d = &vring->va[lf].tx;
-               if (!(_d->dma.status & TX_DMA_STATUS_DU))
+               if (unlikely(!(_d->dma.status & TX_DMA_STATUS_DU)))
                        break;
 
                new_swtail = (lf + 1) % vring->size;
@@ -1224,14 +1410,17 @@ int wil_tx_complete(struct wil6210_priv *wil, int ringid)
                        wil_txdesc_unmap(dev, d, ctx);
 
                        if (skb) {
-                               if (d->dma.error == 0) {
+                               if (likely(d->dma.error == 0)) {
                                        ndev->stats.tx_packets++;
-                                       stats->tx_packets++;
                                        ndev->stats.tx_bytes += skb->len;
-                                       stats->tx_bytes += skb->len;
+                                       if (stats) {
+                                               stats->tx_packets++;
+                                               stats->tx_bytes += skb->len;
+                                       }
                                } else {
                                        ndev->stats.tx_errors++;
-                                       stats->tx_errors++;
+                                       if (stats)
+                                               stats->tx_errors++;
                                }
                                wil_consume_skb(skb, d->dma.error == 0);
                        }
@@ -1246,8 +1435,12 @@ int wil_tx_complete(struct wil6210_priv *wil, int ringid)
                }
        }
 
-       if (wil_vring_is_empty(vring)) { /* performance monitoring */
-               wil_dbg_txrx(wil, "Ring[%2d] empty\n", ringid);
+       /* performance monitoring */
+       used_new = wil_vring_used_tx(vring);
+       if (wil_val_in_range(vring_idle_trsh,
+                            used_new, used_before_complete)) {
+               wil_dbg_txrx(wil, "Ring[%2d] idle %d -> %d\n",
+                            ringid, used_before_complete, used_new);
                txdata->last_idle = get_cycles();
        }
 
index 94611568fc9ab3384128ebbe77d066ba16502985..4310972c9e1687b5b12dc4c101c9076932d907a8 100644 (file)
@@ -27,9 +27,12 @@ extern bool no_fw_recovery;
 extern unsigned int mtu_max;
 extern unsigned short rx_ring_overflow_thrsh;
 extern int agg_wsize;
+extern u32 vring_idle_trsh;
+extern bool rx_align_2;
 
 #define WIL_NAME "wil6210"
-#define WIL_FW_NAME "wil6210.fw"
+#define WIL_FW_NAME "wil6210.fw" /* code */
+#define WIL_FW2_NAME "wil6210.board" /* board & radio parameters */
 
 #define WIL_MAX_BUS_REQUEST_KBPS 800000 /* ~6.1Gbps */
 
@@ -47,6 +50,8 @@ static inline u32 WIL_GET_BITS(u32 x, int b0, int b1)
 #define WIL_TX_Q_LEN_DEFAULT           (4000)
 #define WIL_RX_RING_SIZE_ORDER_DEFAULT (10)
 #define WIL_TX_RING_SIZE_ORDER_DEFAULT (10)
+#define WIL_BCAST_RING_SIZE_ORDER_DEFAULT      (7)
+#define WIL_BCAST_MCS0_LIMIT           (1024) /* limit for MCS0 frame size */
 /* limit ring size in range [32..32k] */
 #define WIL_RING_SIZE_ORDER_MIN        (5)
 #define WIL_RING_SIZE_ORDER_MAX        (15)
@@ -120,6 +125,16 @@ struct RGF_ICR {
        u32 IMC; /* Mask Clear, write 1 to clear */
 } __packed;
 
+struct RGF_BL {
+       u32 ready;              /* 0x880A3C bit [0] */
+#define BIT_BL_READY   BIT(0)
+       u32 version;            /* 0x880A40 version of the BL struct */
+       u32 rf_type;            /* 0x880A44 ID of the connected RF */
+       u32 baseband_type;      /* 0x880A48 ID of the baseband */
+       u8  mac_address[ETH_ALEN]; /* 0x880A4C permanent MAC */
+       u8 pad[2];
+} __packed;
+
 /* registers - FW addresses */
 #define RGF_USER_USAGE_1               (0x880004)
 #define RGF_USER_USAGE_6               (0x880018)
@@ -130,6 +145,7 @@ struct RGF_ICR {
 #define RGF_USER_MAC_CPU_0             (0x8801fc)
        #define BIT_USER_MAC_CPU_MAN_RST        BIT(1) /* mac_cpu_man_rst */
 #define RGF_USER_USER_SCRATCH_PAD      (0x8802bc)
+#define RGF_USER_BL                    (0x880A3C) /* Boot Loader */
 #define RGF_USER_FW_REV_ID             (0x880a8c) /* chip revision */
 #define RGF_USER_CLKS_CTL_0            (0x880abc)
        #define BIT_USER_CLKS_CAR_AHB_SW_SEL    BIT(1) /* ref clk/PLL */
@@ -169,6 +185,13 @@ struct RGF_ICR {
        #define BIT_DMA_ITR_CNT_CRL_CLR         BIT(3)
        #define BIT_DMA_ITR_CNT_CRL_REACH_TRSH  BIT(4)
 
+/* Offload control (Sparrow B0+) */
+#define RGF_DMA_OFUL_NID_0             (0x881cd4)
+       #define BIT_DMA_OFUL_NID_0_RX_EXT_TR_EN         BIT(0)
+       #define BIT_DMA_OFUL_NID_0_TX_EXT_TR_EN         BIT(1)
+       #define BIT_DMA_OFUL_NID_0_RX_EXT_A3_SRC        BIT(2)
+       #define BIT_DMA_OFUL_NID_0_TX_EXT_A3_SRC        BIT(3)
+
 /* New (sparrow v2+) interrupt moderation control */
 #define RGF_DMA_ITR_TX_DESQ_NO_MOD             (0x881d40)
 #define RGF_DMA_ITR_TX_CNT_TRSH                        (0x881d34)
@@ -229,16 +252,10 @@ struct RGF_ICR {
        #define BIT_CAF_OSC_DIG_XTAL_STABLE     BIT(0)
 
 #define RGF_USER_JTAG_DEV_ID   (0x880b34) /* device ID */
-       #define JTAG_DEV_ID_MARLON_B0   (0x0612072f)
-       #define JTAG_DEV_ID_SPARROW_A0  (0x0632072f)
-       #define JTAG_DEV_ID_SPARROW_A1  (0x1632072f)
        #define JTAG_DEV_ID_SPARROW_B0  (0x2632072f)
 
 enum {
        HW_VER_UNKNOWN,
-       HW_VER_MARLON_B0,  /* JTAG_DEV_ID_MARLON_B0  */
-       HW_VER_SPARROW_A0, /* JTAG_DEV_ID_SPARROW_A0 */
-       HW_VER_SPARROW_A1, /* JTAG_DEV_ID_SPARROW_A1 */
        HW_VER_SPARROW_B0, /* JTAG_DEV_ID_SPARROW_B0 */
 };
 
@@ -482,8 +499,6 @@ enum {
 };
 
 enum {
-       hw_capability_reset_v2 = 0,
-       hw_capability_advanced_itr_moderation = 1,
        hw_capability_last
 };
 
@@ -528,8 +543,9 @@ struct wil6210_priv {
        wait_queue_head_t wq; /* for all wait_event() use */
        /* profile */
        u32 monitor_flags;
-       u32 secure_pcp; /* create secure PCP? */
+       u32 privacy; /* secure connection? */
        int sinfo_gen;
+       u32 ap_isolate; /* no intra-BSS communication */
        /* interrupt moderation */
        u32 tx_max_burst_duration;
        u32 tx_interframe_timeout;
@@ -581,6 +597,7 @@ struct wil6210_priv {
        struct vring_tx_data vring_tx_data[WIL6210_MAX_TX_RINGS];
        u8 vring2cid_tid[WIL6210_MAX_TX_RINGS][2]; /* [0] - CID, [1] - TID */
        struct wil_sta_info sta[WIL6210_MAX_CID];
+       int bcast_vring;
        /* scan */
        struct cfg80211_scan_request *scan_request;
 
@@ -658,7 +675,7 @@ int wil_if_add(struct wil6210_priv *wil);
 void wil_if_remove(struct wil6210_priv *wil);
 int wil_priv_init(struct wil6210_priv *wil);
 void wil_priv_deinit(struct wil6210_priv *wil);
-int wil_reset(struct wil6210_priv *wil);
+int wil_reset(struct wil6210_priv *wil, bool no_fw);
 void wil_fw_error_recovery(struct wil6210_priv *wil);
 void wil_set_recovery_state(struct wil6210_priv *wil, int state);
 int wil_up(struct wil6210_priv *wil);
@@ -743,6 +760,9 @@ void wil_rx_fini(struct wil6210_priv *wil);
 int wil_vring_init_tx(struct wil6210_priv *wil, int id, int size,
                      int cid, int tid);
 void wil_vring_fini_tx(struct wil6210_priv *wil, int id);
+int wil_vring_init_bcast(struct wil6210_priv *wil, int id, int size);
+int wil_bcast_init(struct wil6210_priv *wil);
+void wil_bcast_fini(struct wil6210_priv *wil);
 
 netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev);
 int wil_tx_complete(struct wil6210_priv *wil, int ringid);
index 0f3e4334c8e3e6858cfa17dcefab7c87dd45e45c..9fe2085be2c5b86d77e9346f003c7f38656ff3d3 100644 (file)
@@ -281,7 +281,6 @@ int wmi_send(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len)
 /*=== Event handlers ===*/
 static void wmi_evt_ready(struct wil6210_priv *wil, int id, void *d, int len)
 {
-       struct net_device *ndev = wil_to_ndev(wil);
        struct wireless_dev *wdev = wil->wdev;
        struct wmi_ready_event *evt = d;
 
@@ -290,11 +289,7 @@ static void wmi_evt_ready(struct wil6210_priv *wil, int id, void *d, int len)
 
        wil_info(wil, "FW ver. %d; MAC %pM; %d MID's\n", wil->fw_version,
                 evt->mac, wil->n_mids);
-
-       if (!is_valid_ether_addr(ndev->dev_addr)) {
-               memcpy(ndev->dev_addr, evt->mac, ETH_ALEN);
-               memcpy(ndev->perm_addr, evt->mac, ETH_ALEN);
-       }
+       /* ignore MAC address, we already have it from the boot loader */
        snprintf(wdev->wiphy->fw_version, sizeof(wdev->wiphy->fw_version),
                 "%d", wil->fw_version);
 }
@@ -471,7 +466,7 @@ static void wmi_evt_connect(struct wil6210_priv *wil, int id, void *d, int len)
 
        /* FIXME FW can transmit only ucast frames to peer */
        /* FIXME real ring_id instead of hard coded 0 */
-       memcpy(wil->sta[evt->cid].addr, evt->bssid, ETH_ALEN);
+       ether_addr_copy(wil->sta[evt->cid].addr, evt->bssid);
        wil->sta[evt->cid].status = wil_sta_conn_pending;
 
        wil->pending_connect_cid = evt->cid;
@@ -529,8 +524,8 @@ static void wmi_evt_eapol_rx(struct wil6210_priv *wil, int id,
        }
 
        eth = (struct ethhdr *)skb_put(skb, ETH_HLEN);
-       memcpy(eth->h_dest, ndev->dev_addr, ETH_ALEN);
-       memcpy(eth->h_source, evt->src_mac, ETH_ALEN);
+       ether_addr_copy(eth->h_dest, ndev->dev_addr);
+       ether_addr_copy(eth->h_source, evt->src_mac);
        eth->h_proto = cpu_to_be16(ETH_P_PAE);
        memcpy(skb_put(skb, eapol_len), evt->eapol, eapol_len);
        skb->protocol = eth_type_trans(skb, ndev);
@@ -856,7 +851,7 @@ int wmi_set_mac_address(struct wil6210_priv *wil, void *addr)
 {
        struct wmi_set_mac_address_cmd cmd;
 
-       memcpy(cmd.mac, addr, ETH_ALEN);
+       ether_addr_copy(cmd.mac, addr);
 
        wil_dbg_wmi(wil, "Set MAC %pM\n", addr);
 
@@ -879,7 +874,7 @@ int wmi_pcp_start(struct wil6210_priv *wil, int bi, u8 wmi_nettype, u8 chan)
                struct wmi_pcp_started_event evt;
        } __packed reply;
 
-       if (!wil->secure_pcp)
+       if (!wil->privacy)
                cmd.disable_sec = 1;
 
        if ((cmd.pcp_max_assoc_sta > WIL6210_MAX_CID) ||
@@ -1114,6 +1109,11 @@ int wmi_rx_chain_add(struct wil6210_priv *wil, struct vring *vring)
                 */
                cmd.l3_l4_ctrl |= (1 << L3_L4_CTRL_TCPIP_CHECKSUM_EN_POS);
        }
+
+       if (rx_align_2)
+               cmd.l2_802_3_offload_ctrl |=
+                               L2_802_3_OFFLOAD_CTRL_SNAP_KEEP_MSK;
+
        /* typical time for secure PCP is 840ms */
        rc = wmi_call(wil, WMI_CFG_RX_CHAIN_CMDID, &cmd, sizeof(cmd),
                      WMI_CFG_RX_CHAIN_DONE_EVENTID, &evt, sizeof(evt), 2000);
@@ -1162,7 +1162,8 @@ int wmi_disconnect_sta(struct wil6210_priv *wil, const u8 *mac, u16 reason)
        struct wmi_disconnect_sta_cmd cmd = {
                .disconnect_reason = cpu_to_le16(reason),
        };
-       memcpy(cmd.dst_mac, mac, ETH_ALEN);
+
+       ether_addr_copy(cmd.dst_mac, mac);
 
        wil_dbg_wmi(wil, "%s(%pM, reason %d)\n", __func__, mac, reason);
 
index 8a4af613e191856d9db24bb4525b52302358abf5..b2905531535039f6ae91835ee592a2960f140f83 100644 (file)
@@ -70,7 +70,6 @@ enum wmi_command_id {
        WMI_SET_UCODE_IDLE_CMDID        = 0x0813,
        WMI_SET_WORK_MODE_CMDID         = 0x0815,
        WMI_LO_LEAKAGE_CALIB_CMDID      = 0x0816,
-       WMI_MARLON_R_ACTIVATE_CMDID     = 0x0817,
        WMI_MARLON_R_READ_CMDID         = 0x0818,
        WMI_MARLON_R_WRITE_CMDID        = 0x0819,
        WMI_MARLON_R_TXRX_SEL_CMDID     = 0x081a,
@@ -80,6 +79,7 @@ enum wmi_command_id {
        WMI_RF_RX_TEST_CMDID            = 0x081e,
        WMI_CFG_RX_CHAIN_CMDID          = 0x0820,
        WMI_VRING_CFG_CMDID             = 0x0821,
+       WMI_BCAST_VRING_CFG_CMDID       = 0x0822,
        WMI_VRING_BA_EN_CMDID           = 0x0823,
        WMI_VRING_BA_DIS_CMDID          = 0x0824,
        WMI_RCP_ADDBA_RESP_CMDID        = 0x0825,
@@ -99,6 +99,7 @@ enum wmi_command_id {
        WMI_BF_TXSS_MGMT_CMDID          = 0x0837,
        WMI_BF_SM_MGMT_CMDID            = 0x0838,
        WMI_BF_RXSS_MGMT_CMDID          = 0x0839,
+       WMI_BF_TRIG_CMDID               = 0x083A,
        WMI_SET_SECTORS_CMDID           = 0x0849,
        WMI_MAINTAIN_PAUSE_CMDID        = 0x0850,
        WMI_MAINTAIN_RESUME_CMDID       = 0x0851,
@@ -595,6 +596,22 @@ struct wmi_vring_cfg_cmd {
        struct wmi_vring_cfg vring_cfg;
 } __packed;
 
+/*
+ * WMI_BCAST_VRING_CFG_CMDID
+ */
+struct wmi_bcast_vring_cfg {
+       struct wmi_sw_ring_cfg tx_sw_ring;
+       u8 ringid;                              /* 0-23 vrings */
+       u8 encap_trans_type;
+       u8 ds_cfg;                              /* 802.3 DS cfg */
+       u8 nwifi_ds_trans_type;
+} __packed;
+
+struct wmi_bcast_vring_cfg_cmd {
+       __le32 action;
+       struct wmi_bcast_vring_cfg vring_cfg;
+} __packed;
+
 /*
  * WMI_VRING_BA_EN_CMDID
  */
@@ -687,6 +704,9 @@ struct wmi_cfg_rx_chain_cmd {
        #define L2_802_3_OFFLOAD_CTRL_VLAN_TAG_INSERTION_POS (0)
        #define L2_802_3_OFFLOAD_CTRL_VLAN_TAG_INSERTION_LEN (1)
        #define L2_802_3_OFFLOAD_CTRL_VLAN_TAG_INSERTION_MSK (0x1)
+       #define L2_802_3_OFFLOAD_CTRL_SNAP_KEEP_POS (1)
+       #define L2_802_3_OFFLOAD_CTRL_SNAP_KEEP_LEN (1)
+       #define L2_802_3_OFFLOAD_CTRL_SNAP_KEEP_MSK (0x2)
        u8 l2_802_3_offload_ctrl;
 
        #define L2_NWIFI_OFFLOAD_CTRL_REMOVE_QOS_POS (0)
@@ -841,7 +861,6 @@ enum wmi_event_id {
        WMI_IQ_RX_CALIB_DONE_EVENTID            = 0x1812,
        WMI_SET_WORK_MODE_DONE_EVENTID          = 0x1815,
        WMI_LO_LEAKAGE_CALIB_DONE_EVENTID       = 0x1816,
-       WMI_MARLON_R_ACTIVATE_DONE_EVENTID      = 0x1817,
        WMI_MARLON_R_READ_DONE_EVENTID          = 0x1818,
        WMI_MARLON_R_WRITE_DONE_EVENTID         = 0x1819,
        WMI_MARLON_R_TXRX_SEL_DONE_EVENTID      = 0x181a,
index 55db9f03eb2a3f25d702f88b7bde5b7d16b64441..6a1f03c271c1c04074ec76fcad031f5bce954382 100644 (file)
@@ -1004,7 +1004,7 @@ static void frag_rx_path(struct atmel_private *priv,
                        atmel_copy_to_host(priv->dev, (void *)&netcrc, rx_packet_loc + msdu_size, 4);
                        if ((crc ^ 0xffffffff) != netcrc) {
                                priv->dev->stats.rx_crc_errors++;
-                               memset(priv->frag_source, 0xff, ETH_ALEN);
+                               eth_broadcast_addr(priv->frag_source);
                        }
                }
 
@@ -1022,7 +1022,7 @@ static void frag_rx_path(struct atmel_private *priv,
                        atmel_copy_to_host(priv->dev, (void *)&netcrc, rx_packet_loc + msdu_size, 4);
                        if ((crc ^ 0xffffffff) != netcrc) {
                                priv->dev->stats.rx_crc_errors++;
-                               memset(priv->frag_source, 0xff, ETH_ALEN);
+                               eth_broadcast_addr(priv->frag_source);
                                more_frags = 1; /* don't send broken assembly */
                        }
                }
@@ -1031,7 +1031,7 @@ static void frag_rx_path(struct atmel_private *priv,
                priv->frag_no++;
 
                if (!more_frags) { /* last one */
-                       memset(priv->frag_source, 0xff, ETH_ALEN);
+                       eth_broadcast_addr(priv->frag_source);
                        if (!(skb = dev_alloc_skb(priv->frag_len + 14))) {
                                priv->dev->stats.rx_dropped++;
                        } else {
@@ -1127,7 +1127,7 @@ static void rx_done_irq(struct atmel_private *priv)
                        atmel_copy_to_host(priv->dev, (unsigned char *)&priv->rx_buf, rx_packet_loc + 24, msdu_size);
 
                        /* we use the same buffer for frag reassembly and control packets */
-                       memset(priv->frag_source, 0xff, ETH_ALEN);
+                       eth_broadcast_addr(priv->frag_source);
 
                        if (priv->do_rx_crc) {
                                /* last 4 octets is crc */
@@ -1379,7 +1379,7 @@ static int atmel_close(struct net_device *dev)
                wrqu.data.length = 0;
                wrqu.data.flags = 0;
                wrqu.ap_addr.sa_family = ARPHRD_ETHER;
-               memset(wrqu.ap_addr.sa_data, 0, ETH_ALEN);
+               eth_zero_addr(wrqu.ap_addr.sa_data);
                wireless_send_event(priv->dev, SIOCGIWAP, &wrqu, NULL);
        }
 
@@ -1555,7 +1555,7 @@ struct net_device *init_atmel_card(unsigned short irq, unsigned long port,
        priv->last_qual = jiffies;
        priv->last_beacon_timestamp = 0;
        memset(priv->frag_source, 0xff, sizeof(priv->frag_source));
-       memset(priv->BSSID, 0, ETH_ALEN);
+       eth_zero_addr(priv->BSSID);
        priv->CurrentBSSID[0] = 0xFF; /* Initialize to something invalid.... */
        priv->station_was_associated = 0;
 
@@ -2760,7 +2760,7 @@ static void atmel_scan(struct atmel_private *priv, int specific_ssid)
                u8 SSID_size;
        } cmd;
 
-       memset(cmd.BSSID, 0xff, ETH_ALEN);
+       eth_broadcast_addr(cmd.BSSID);
 
        if (priv->fast_scan) {
                cmd.SSID_size = priv->SSID_size;
@@ -4049,7 +4049,7 @@ static int reset_atmel_card(struct net_device *dev)
                wrqu.data.length = 0;
                wrqu.data.flags = 0;
                wrqu.ap_addr.sa_family = ARPHRD_ETHER;
-               memset(wrqu.ap_addr.sa_data, 0, ETH_ALEN);
+               eth_zero_addr(wrqu.ap_addr.sa_data);
                wireless_send_event(priv->dev, SIOCGIWAP, &wrqu, NULL);
        }
 
index 75345c1e8c3487468a23611a3072d525cdc8b8ca..b2f9521fe551a3e32bc85db602bd41a94e2b81ca 100644 (file)
@@ -4132,7 +4132,7 @@ static void b43_op_bss_info_changed(struct ieee80211_hw *hw,
                if (conf->bssid)
                        memcpy(wl->bssid, conf->bssid, ETH_ALEN);
                else
-                       memset(wl->bssid, 0, ETH_ALEN);
+                       eth_zero_addr(wl->bssid);
        }
 
        if (b43_status(dev) >= B43_STAT_INITIALIZED) {
@@ -4819,7 +4819,7 @@ static void b43_wireless_core_exit(struct b43_wldev *dev)
        switch (dev->dev->bus_type) {
 #ifdef CONFIG_B43_BCMA
        case B43_BUS_BCMA:
-               bcma_core_pci_down(dev->dev->bdev->bus);
+               bcma_host_pci_down(dev->dev->bdev->bus);
                break;
 #endif
 #ifdef CONFIG_B43_SSB
@@ -4866,9 +4866,9 @@ static int b43_wireless_core_init(struct b43_wldev *dev)
        switch (dev->dev->bus_type) {
 #ifdef CONFIG_B43_BCMA
        case B43_BUS_BCMA:
-               bcma_core_pci_irq_ctl(&dev->dev->bdev->bus->drv_pci[0],
+               bcma_host_pci_irq_ctl(dev->dev->bdev->bus,
                                      dev->dev->bdev, true);
-               bcma_core_pci_up(dev->dev->bdev->bus);
+               bcma_host_pci_up(dev->dev->bdev->bus);
                break;
 #endif
 #ifdef CONFIG_B43_SSB
@@ -5051,7 +5051,7 @@ static void b43_op_remove_interface(struct ieee80211_hw *hw,
        wl->operating = false;
 
        b43_adjust_opmode(dev);
-       memset(wl->mac_addr, 0, ETH_ALEN);
+       eth_zero_addr(wl->mac_addr);
        b43_upload_card_macaddress(dev);
 
        mutex_unlock(&wl->mutex);
@@ -5067,8 +5067,8 @@ static int b43_op_start(struct ieee80211_hw *hw)
        /* Kill all old instance specific information to make sure
         * the card won't use it in the short timeframe between start
         * and mac80211 reconfiguring it. */
-       memset(wl->bssid, 0, ETH_ALEN);
-       memset(wl->mac_addr, 0, ETH_ALEN);
+       eth_zero_addr(wl->bssid);
+       eth_zero_addr(wl->mac_addr);
        wl->filter_flags = 0;
        wl->radiotap_enabled = false;
        b43_qos_clear(wl);
index 4e58c0069830b698b0689203172a9a8f4d458e4b..c77b7f59505cc2eb95c13219e04b18862e42ae72 100644 (file)
@@ -2866,7 +2866,7 @@ static void b43legacy_op_bss_info_changed(struct ieee80211_hw *hw,
                if (conf->bssid)
                        memcpy(wl->bssid, conf->bssid, ETH_ALEN);
                else
-                       memset(wl->bssid, 0, ETH_ALEN);
+                       eth_zero_addr(wl->bssid);
        }
 
        if (b43legacy_status(dev) >= B43legacy_STAT_INITIALIZED) {
@@ -3470,7 +3470,7 @@ static void b43legacy_op_remove_interface(struct ieee80211_hw *hw,
 
        spin_lock_irqsave(&wl->irq_lock, flags);
        b43legacy_adjust_opmode(dev);
-       memset(wl->mac_addr, 0, ETH_ALEN);
+       eth_zero_addr(wl->mac_addr);
        b43legacy_upload_card_macaddress(dev);
        spin_unlock_irqrestore(&wl->irq_lock, flags);
 
@@ -3487,8 +3487,8 @@ static int b43legacy_op_start(struct ieee80211_hw *hw)
        /* Kill all old instance specific information to make sure
         * the card won't use it in the short timeframe between start
         * and mac80211 reconfiguring it. */
-       memset(wl->bssid, 0, ETH_ALEN);
-       memset(wl->mac_addr, 0, ETH_ALEN);
+       eth_zero_addr(wl->bssid);
+       eth_zero_addr(wl->mac_addr);
        wl->filter_flags = 0;
        wl->beacon0_uploaded = false;
        wl->beacon1_uploaded = false;
index 7944224e3fc90140deb61ae061f4296a555ac526..9b508bd3b839256e7595ed90c6ad2eb1bb4709f7 100644 (file)
@@ -29,6 +29,7 @@
 #include <linux/mmc/host.h>
 #include <linux/platform_device.h>
 #include <linux/platform_data/brcmfmac-sdio.h>
+#include <linux/pm_runtime.h>
 #include <linux/suspend.h>
 #include <linux/errno.h>
 #include <linux/module.h>
 #define BRCMF_DEFAULT_TXGLOM_SIZE      32  /* max tx frames in glom chain */
 #define BRCMF_DEFAULT_RXGLOM_SIZE      32  /* max rx frames in glom chain */
 
+struct brcmf_sdiod_freezer {
+       atomic_t freezing;
+       atomic_t thread_count;
+       u32 frozen_count;
+       wait_queue_head_t thread_freeze;
+       struct completion resumed;
+};
+
 static int brcmf_sdiod_txglomsz = BRCMF_DEFAULT_TXGLOM_SIZE;
 module_param_named(txglomsz, brcmf_sdiod_txglomsz, int, 0);
 MODULE_PARM_DESC(txglomsz, "maximum tx packet chain size [SDIO]");
@@ -197,6 +206,30 @@ int brcmf_sdiod_intr_unregister(struct brcmf_sdio_dev *sdiodev)
        return 0;
 }
 
+void brcmf_sdiod_change_state(struct brcmf_sdio_dev *sdiodev,
+                             enum brcmf_sdiod_state state)
+{
+       if (sdiodev->state == BRCMF_SDIOD_NOMEDIUM ||
+           state == sdiodev->state)
+               return;
+
+       brcmf_dbg(TRACE, "%d -> %d\n", sdiodev->state, state);
+       switch (sdiodev->state) {
+       case BRCMF_SDIOD_DATA:
+               /* any other state means bus interface is down */
+               brcmf_bus_change_state(sdiodev->bus_if, BRCMF_BUS_DOWN);
+               break;
+       case BRCMF_SDIOD_DOWN:
+               /* transition from DOWN to DATA means bus interface is up */
+               if (state == BRCMF_SDIOD_DATA)
+                       brcmf_bus_change_state(sdiodev->bus_if, BRCMF_BUS_UP);
+               break;
+       default:
+               break;
+       }
+       sdiodev->state = state;
+}
+
 static inline int brcmf_sdiod_f0_writeb(struct sdio_func *func,
                                        uint regaddr, u8 byte)
 {
@@ -269,12 +302,6 @@ static int brcmf_sdiod_request_data(struct brcmf_sdio_dev *sdiodev, u8 fn,
        return ret;
 }
 
-static void brcmf_sdiod_nomedium_state(struct brcmf_sdio_dev *sdiodev)
-{
-       sdiodev->state = BRCMF_STATE_NOMEDIUM;
-       brcmf_bus_change_state(sdiodev->bus_if, BRCMF_BUS_DOWN);
-}
-
 static int brcmf_sdiod_regrw_helper(struct brcmf_sdio_dev *sdiodev, u32 addr,
                                   u8 regsz, void *data, bool write)
 {
@@ -282,7 +309,7 @@ static int brcmf_sdiod_regrw_helper(struct brcmf_sdio_dev *sdiodev, u32 addr,
        s32 retry = 0;
        int ret;
 
-       if (sdiodev->state == BRCMF_STATE_NOMEDIUM)
+       if (sdiodev->state == BRCMF_SDIOD_NOMEDIUM)
                return -ENOMEDIUM;
 
        /*
@@ -308,7 +335,7 @@ static int brcmf_sdiod_regrw_helper(struct brcmf_sdio_dev *sdiodev, u32 addr,
                 retry++ < SDIOH_API_ACCESS_RETRY_LIMIT);
 
        if (ret == -ENOMEDIUM)
-               brcmf_sdiod_nomedium_state(sdiodev);
+               brcmf_sdiod_change_state(sdiodev, BRCMF_SDIOD_NOMEDIUM);
        else if (ret != 0) {
                /*
                 * SleepCSR register access can fail when
@@ -331,7 +358,7 @@ brcmf_sdiod_set_sbaddr_window(struct brcmf_sdio_dev *sdiodev, u32 address)
        int err = 0, i;
        u8 addr[3];
 
-       if (sdiodev->state == BRCMF_STATE_NOMEDIUM)
+       if (sdiodev->state == BRCMF_SDIOD_NOMEDIUM)
                return -ENOMEDIUM;
 
        addr[0] = (address >> 8) & SBSDIO_SBADDRLOW_MASK;
@@ -460,7 +487,7 @@ static int brcmf_sdiod_buffrw(struct brcmf_sdio_dev *sdiodev, uint fn,
                err = sdio_readsb(sdiodev->func[fn], ((u8 *)(pkt->data)), addr,
                                  req_sz);
        if (err == -ENOMEDIUM)
-               brcmf_sdiod_nomedium_state(sdiodev);
+               brcmf_sdiod_change_state(sdiodev, BRCMF_SDIOD_NOMEDIUM);
        return err;
 }
 
@@ -595,7 +622,7 @@ static int brcmf_sdiod_sglist_rw(struct brcmf_sdio_dev *sdiodev, uint fn,
 
                ret = mmc_cmd.error ? mmc_cmd.error : mmc_dat.error;
                if (ret == -ENOMEDIUM) {
-                       brcmf_sdiod_nomedium_state(sdiodev);
+                       brcmf_sdiod_change_state(sdiodev, BRCMF_SDIOD_NOMEDIUM);
                        break;
                } else if (ret != 0) {
                        brcmf_err("CMD53 sg block %s failed %d\n",
@@ -877,6 +904,87 @@ static void brcmf_sdiod_sgtable_alloc(struct brcmf_sdio_dev *sdiodev)
        sdiodev->txglomsz = brcmf_sdiod_txglomsz;
 }
 
+#ifdef CONFIG_PM_SLEEP
+static int brcmf_sdiod_freezer_attach(struct brcmf_sdio_dev *sdiodev)
+{
+       sdiodev->freezer = kzalloc(sizeof(*sdiodev->freezer), GFP_KERNEL);
+       if (!sdiodev->freezer)
+               return -ENOMEM;
+       atomic_set(&sdiodev->freezer->thread_count, 0);
+       atomic_set(&sdiodev->freezer->freezing, 0);
+       init_waitqueue_head(&sdiodev->freezer->thread_freeze);
+       init_completion(&sdiodev->freezer->resumed);
+       return 0;
+}
+
+static void brcmf_sdiod_freezer_detach(struct brcmf_sdio_dev *sdiodev)
+{
+       if (sdiodev->freezer) {
+               WARN_ON(atomic_read(&sdiodev->freezer->freezing));
+               kfree(sdiodev->freezer);
+       }
+}
+
+static int brcmf_sdiod_freezer_on(struct brcmf_sdio_dev *sdiodev)
+{
+       atomic_t *expect = &sdiodev->freezer->thread_count;
+       int res = 0;
+
+       sdiodev->freezer->frozen_count = 0;
+       reinit_completion(&sdiodev->freezer->resumed);
+       atomic_set(&sdiodev->freezer->freezing, 1);
+       brcmf_sdio_trigger_dpc(sdiodev->bus);
+       wait_event(sdiodev->freezer->thread_freeze,
+                  atomic_read(expect) == sdiodev->freezer->frozen_count);
+       sdio_claim_host(sdiodev->func[1]);
+       res = brcmf_sdio_sleep(sdiodev->bus, true);
+       sdio_release_host(sdiodev->func[1]);
+       return res;
+}
+
+static void brcmf_sdiod_freezer_off(struct brcmf_sdio_dev *sdiodev)
+{
+       sdio_claim_host(sdiodev->func[1]);
+       brcmf_sdio_sleep(sdiodev->bus, false);
+       sdio_release_host(sdiodev->func[1]);
+       atomic_set(&sdiodev->freezer->freezing, 0);
+       complete_all(&sdiodev->freezer->resumed);
+}
+
+bool brcmf_sdiod_freezing(struct brcmf_sdio_dev *sdiodev)
+{
+       return atomic_read(&sdiodev->freezer->freezing);
+}
+
+void brcmf_sdiod_try_freeze(struct brcmf_sdio_dev *sdiodev)
+{
+       if (!brcmf_sdiod_freezing(sdiodev))
+               return;
+       sdiodev->freezer->frozen_count++;
+       wake_up(&sdiodev->freezer->thread_freeze);
+       wait_for_completion(&sdiodev->freezer->resumed);
+}
+
+void brcmf_sdiod_freezer_count(struct brcmf_sdio_dev *sdiodev)
+{
+       atomic_inc(&sdiodev->freezer->thread_count);
+}
+
+void brcmf_sdiod_freezer_uncount(struct brcmf_sdio_dev *sdiodev)
+{
+       atomic_dec(&sdiodev->freezer->thread_count);
+}
+#else
+static int brcmf_sdiod_freezer_attach(struct brcmf_sdio_dev *sdiodev)
+{
+       return 0;
+}
+
+static void brcmf_sdiod_freezer_detach(struct brcmf_sdio_dev *sdiodev)
+{
+}
+#endif /* CONFIG_PM_SLEEP */
+
 static int brcmf_sdiod_remove(struct brcmf_sdio_dev *sdiodev)
 {
        if (sdiodev->bus) {
@@ -884,6 +992,8 @@ static int brcmf_sdiod_remove(struct brcmf_sdio_dev *sdiodev)
                sdiodev->bus = NULL;
        }
 
+       brcmf_sdiod_freezer_detach(sdiodev);
+
        /* Disable Function 2 */
        sdio_claim_host(sdiodev->func[2]);
        sdio_disable_func(sdiodev->func[2]);
@@ -897,6 +1007,7 @@ static int brcmf_sdiod_remove(struct brcmf_sdio_dev *sdiodev)
        sg_free_table(&sdiodev->sgtable);
        sdiodev->sbwad = 0;
 
+       pm_runtime_allow(sdiodev->func[1]->card->host->parent);
        return 0;
 }
 
@@ -955,13 +1066,17 @@ static int brcmf_sdiod_probe(struct brcmf_sdio_dev *sdiodev)
         */
        brcmf_sdiod_sgtable_alloc(sdiodev);
 
+       ret = brcmf_sdiod_freezer_attach(sdiodev);
+       if (ret)
+               goto out;
+
        /* try to attach to the target device */
        sdiodev->bus = brcmf_sdio_probe(sdiodev);
        if (!sdiodev->bus) {
                ret = -ENODEV;
                goto out;
        }
-
+       pm_runtime_forbid(host->parent);
 out:
        if (ret)
                brcmf_sdiod_remove(sdiodev);
@@ -983,6 +1098,8 @@ static const struct sdio_device_id brcmf_sdmmc_ids[] = {
        BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43341),
        BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43362),
        BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4335_4339),
+       BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43430),
+       BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4345),
        BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4354),
        { /* end: all zeroes */ }
 };
@@ -1050,9 +1167,7 @@ static int brcmf_ops_sdio_probe(struct sdio_func *func,
                bus_if->wowl_supported = true;
 #endif
 
-       sdiodev->sleeping = false;
-       atomic_set(&sdiodev->suspend, false);
-       init_waitqueue_head(&sdiodev->idle_wait);
+       brcmf_sdiod_change_state(sdiodev, BRCMF_SDIOD_DOWN);
 
        brcmf_dbg(SDIO, "F2 found, calling brcmf_sdiod_probe...\n");
        err = brcmf_sdiod_probe(sdiodev);
@@ -1083,7 +1198,7 @@ static void brcmf_ops_sdio_remove(struct sdio_func *func)
        brcmf_dbg(SDIO, "sdio device ID: 0x%04x\n", func->device);
        brcmf_dbg(SDIO, "Function: %d\n", func->num);
 
-       if (func->num != 1 && func->num != 2)
+       if (func->num != 1)
                return;
 
        bus_if = dev_get_drvdata(&func->dev);
@@ -1114,24 +1229,22 @@ void brcmf_sdio_wowl_config(struct device *dev, bool enabled)
 #ifdef CONFIG_PM_SLEEP
 static int brcmf_ops_sdio_suspend(struct device *dev)
 {
+       struct sdio_func *func;
        struct brcmf_bus *bus_if;
        struct brcmf_sdio_dev *sdiodev;
        mmc_pm_flag_t sdio_flags;
 
-       brcmf_dbg(SDIO, "Enter\n");
+       func = container_of(dev, struct sdio_func, dev);
+       brcmf_dbg(SDIO, "Enter: F%d\n", func->num);
+       if (func->num != SDIO_FUNC_1)
+               return 0;
+
 
        bus_if = dev_get_drvdata(dev);
        sdiodev = bus_if->bus_priv.sdio;
 
-       /* wait for watchdog to go idle */
-       if (wait_event_timeout(sdiodev->idle_wait, sdiodev->sleeping,
-                              msecs_to_jiffies(3 * BRCMF_WD_POLL_MS)) == 0) {
-               brcmf_err("bus still active\n");
-               return -EBUSY;
-       }
-       /* disable watchdog */
+       brcmf_sdiod_freezer_on(sdiodev);
        brcmf_sdio_wd_timer(sdiodev->bus, 0);
-       atomic_set(&sdiodev->suspend, true);
 
        if (sdiodev->wowl_enabled) {
                sdio_flags = MMC_PM_KEEP_POWER;
@@ -1149,12 +1262,13 @@ static int brcmf_ops_sdio_resume(struct device *dev)
 {
        struct brcmf_bus *bus_if = dev_get_drvdata(dev);
        struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
+       struct sdio_func *func = container_of(dev, struct sdio_func, dev);
 
-       brcmf_dbg(SDIO, "Enter\n");
-       if (sdiodev->pdata && sdiodev->pdata->oob_irq_supported)
-               disable_irq_wake(sdiodev->pdata->oob_irq_nr);
-       brcmf_sdio_wd_timer(sdiodev->bus, BRCMF_WD_POLL_MS);
-       atomic_set(&sdiodev->suspend, false);
+       brcmf_dbg(SDIO, "Enter: F%d\n", func->num);
+       if (func->num != SDIO_FUNC_2)
+               return 0;
+
+       brcmf_sdiod_freezer_off(sdiodev);
        return 0;
 }
 
index b59b8c6c42abeb1e17834b1dcd9af4a51b354b4e..8a15ebbce4a360ff054e08c197118ddf43f75381 100644 (file)
@@ -625,6 +625,7 @@ static bool brcmf_is_ibssmode(struct brcmf_cfg80211_vif *vif)
 
 static struct wireless_dev *brcmf_cfg80211_add_iface(struct wiphy *wiphy,
                                                     const char *name,
+                                                    unsigned char name_assign_type,
                                                     enum nl80211_iftype type,
                                                     u32 *flags,
                                                     struct vif_params *params)
@@ -648,7 +649,7 @@ static struct wireless_dev *brcmf_cfg80211_add_iface(struct wiphy *wiphy,
        case NL80211_IFTYPE_P2P_CLIENT:
        case NL80211_IFTYPE_P2P_GO:
        case NL80211_IFTYPE_P2P_DEVICE:
-               wdev = brcmf_p2p_add_vif(wiphy, name, type, flags, params);
+               wdev = brcmf_p2p_add_vif(wiphy, name, name_assign_type, type, flags, params);
                if (!IS_ERR(wdev))
                        brcmf_cfg80211_update_proto_addr_mode(wdev);
                return wdev;
@@ -700,7 +701,7 @@ s32 brcmf_notify_escan_complete(struct brcmf_cfg80211_info *cfg,
                /* Do a scan abort to stop the driver's scan engine */
                brcmf_dbg(SCAN, "ABORT scan in firmware\n");
                memset(&params_le, 0, sizeof(params_le));
-               memset(params_le.bssid, 0xFF, ETH_ALEN);
+               eth_broadcast_addr(params_le.bssid);
                params_le.bss_type = DOT11_BSSTYPE_ANY;
                params_le.scan_type = 0;
                params_le.channel_num = cpu_to_le32(1);
@@ -866,7 +867,7 @@ static void brcmf_escan_prep(struct brcmf_cfg80211_info *cfg,
        char *ptr;
        struct brcmf_ssid_le ssid_le;
 
-       memset(params_le->bssid, 0xFF, ETH_ALEN);
+       eth_broadcast_addr(params_le->bssid);
        params_le->bss_type = DOT11_BSSTYPE_ANY;
        params_le->scan_type = 0;
        params_le->channel_num = 0;
@@ -1050,10 +1051,6 @@ brcmf_cfg80211_escan(struct wiphy *wiphy, struct brcmf_cfg80211_vif *vif,
        if (vif == cfg->p2p.bss_idx[P2PAPI_BSSCFG_DEVICE].vif)
                vif = cfg->p2p.bss_idx[P2PAPI_BSSCFG_PRIMARY].vif;
 
-       /* Arm scan timeout timer */
-       mod_timer(&cfg->escan_timeout, jiffies +
-                       WL_ESCAN_TIMER_INTERVAL_MS * HZ / 1000);
-
        escan_req = false;
        if (request) {
                /* scan bss */
@@ -1112,12 +1109,14 @@ brcmf_cfg80211_escan(struct wiphy *wiphy, struct brcmf_cfg80211_vif *vif,
                }
        }
 
+       /* Arm scan timeout timer */
+       mod_timer(&cfg->escan_timeout, jiffies +
+                       WL_ESCAN_TIMER_INTERVAL_MS * HZ / 1000);
+
        return 0;
 
 scan_out:
        clear_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status);
-       if (timer_pending(&cfg->escan_timeout))
-               del_timer_sync(&cfg->escan_timeout);
        cfg->scan_request = NULL;
        return err;
 }
@@ -1375,8 +1374,8 @@ brcmf_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *ndev,
                                   BRCMF_ASSOC_PARAMS_FIXED_SIZE;
                memcpy(profile->bssid, params->bssid, ETH_ALEN);
        } else {
-               memset(join_params.params_le.bssid, 0xFF, ETH_ALEN);
-               memset(profile->bssid, 0, ETH_ALEN);
+               eth_broadcast_addr(join_params.params_le.bssid);
+               eth_zero_addr(profile->bssid);
        }
 
        /* Channel */
@@ -1850,7 +1849,7 @@ brcmf_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev,
        if (sme->bssid)
                memcpy(&ext_join_params->assoc_le.bssid, sme->bssid, ETH_ALEN);
        else
-               memset(&ext_join_params->assoc_le.bssid, 0xFF, ETH_ALEN);
+               eth_broadcast_addr(ext_join_params->assoc_le.bssid);
 
        if (cfg->channel) {
                ext_join_params->assoc_le.chanspec_num = cpu_to_le32(1);
@@ -1895,7 +1894,7 @@ brcmf_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev,
        if (sme->bssid)
                memcpy(join_params.params_le.bssid, sme->bssid, ETH_ALEN);
        else
-               memset(join_params.params_le.bssid, 0xFF, ETH_ALEN);
+               eth_broadcast_addr(join_params.params_le.bssid);
 
        if (cfg->channel) {
                join_params.params_le.chanspec_list[0] = cpu_to_le16(chanspec);
@@ -2252,7 +2251,6 @@ brcmf_cfg80211_del_key(struct wiphy *wiphy, struct net_device *ndev,
 
        if (key_idx >= BRCMF_MAX_DEFAULT_KEYS) {
                /* we ignore this key index in this case */
-               brcmf_err("invalid key index (%d)\n", key_idx);
                return -EINVAL;
        }
 
@@ -4272,7 +4270,7 @@ brcmf_cfg80211_del_station(struct wiphy *wiphy, struct net_device *ndev,
                return -EIO;
 
        memcpy(&scbval.ea, params->mac, ETH_ALEN);
-       scbval.val = cpu_to_le32(WLAN_REASON_DEAUTH_LEAVING);
+       scbval.val = cpu_to_le32(params->reason_code);
        err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_SCB_DEAUTHENTICATE_FOR_REASON,
                                     &scbval, sizeof(scbval));
        if (err)
index 04d2ca0d87d60b11cd0a2ca0baac23867e700ed6..ab2fac8b2760a89269ffcb549090087fe9a16f71 100644 (file)
 #define BCM4329_CORE_SOCRAM_BASE       0x18003000
 /* ARM Cortex M3 core, ID 0x82a */
 #define BCM4329_CORE_ARM_BASE          0x18002000
-#define BCM4329_RAMSIZE                        0x48000
-/* bcm43143 */
-#define BCM43143_RAMSIZE               0x70000
 
 #define CORE_SB(base, field) \
                (base + SBCONFIGOFF + offsetof(struct sbconfig, field))
@@ -150,6 +147,78 @@ struct sbconfig {
        u32 sbidhigh;   /* identification */
 };
 
+/* bankidx and bankinfo reg defines corerev >= 8 */
+#define SOCRAM_BANKINFO_RETNTRAM_MASK  0x00010000
+#define SOCRAM_BANKINFO_SZMASK         0x0000007f
+#define SOCRAM_BANKIDX_ROM_MASK                0x00000100
+
+#define SOCRAM_BANKIDX_MEMTYPE_SHIFT   8
+/* socram bankinfo memtype */
+#define SOCRAM_MEMTYPE_RAM             0
+#define SOCRAM_MEMTYPE_R0M             1
+#define SOCRAM_MEMTYPE_DEVRAM          2
+
+#define SOCRAM_BANKINFO_SZBASE         8192
+#define SRCI_LSS_MASK          0x00f00000
+#define SRCI_LSS_SHIFT         20
+#define        SRCI_SRNB_MASK          0xf0
+#define        SRCI_SRNB_SHIFT         4
+#define        SRCI_SRBSZ_MASK         0xf
+#define        SRCI_SRBSZ_SHIFT        0
+#define SR_BSZ_BASE            14
+
+struct sbsocramregs {
+       u32 coreinfo;
+       u32 bwalloc;
+       u32 extracoreinfo;
+       u32 biststat;
+       u32 bankidx;
+       u32 standbyctrl;
+
+       u32 errlogstatus;       /* rev 6 */
+       u32 errlogaddr; /* rev 6 */
+       /* used for patching rev 3 & 5 */
+       u32 cambankidx;
+       u32 cambankstandbyctrl;
+       u32 cambankpatchctrl;
+       u32 cambankpatchtblbaseaddr;
+       u32 cambankcmdreg;
+       u32 cambankdatareg;
+       u32 cambankmaskreg;
+       u32 PAD[1];
+       u32 bankinfo;   /* corev 8 */
+       u32 bankpda;
+       u32 PAD[14];
+       u32 extmemconfig;
+       u32 extmemparitycsr;
+       u32 extmemparityerrdata;
+       u32 extmemparityerrcnt;
+       u32 extmemwrctrlandsize;
+       u32 PAD[84];
+       u32 workaround;
+       u32 pwrctl;             /* corerev >= 2 */
+       u32 PAD[133];
+       u32 sr_control;     /* corerev >= 15 */
+       u32 sr_status;      /* corerev >= 15 */
+       u32 sr_address;     /* corerev >= 15 */
+       u32 sr_data;        /* corerev >= 15 */
+};
+
+#define SOCRAMREGOFFS(_f)      offsetof(struct sbsocramregs, _f)
+
+#define ARMCR4_CAP             (0x04)
+#define ARMCR4_BANKIDX         (0x40)
+#define ARMCR4_BANKINFO                (0x44)
+#define ARMCR4_BANKPDA         (0x4C)
+
+#define        ARMCR4_TCBBNB_MASK      0xf0
+#define        ARMCR4_TCBBNB_SHIFT     4
+#define        ARMCR4_TCBANB_MASK      0xf
+#define        ARMCR4_TCBANB_SHIFT     0
+
+#define        ARMCR4_BSZ_MASK         0x3f
+#define        ARMCR4_BSZ_MULT         8192
+
 struct brcmf_core_priv {
        struct brcmf_core pub;
        u32 wrapbase;
@@ -419,13 +488,13 @@ static struct brcmf_core *brcmf_chip_add_core(struct brcmf_chip_priv *ci,
        return &core->pub;
 }
 
-#ifdef DEBUG
 /* safety check for chipinfo */
 static int brcmf_chip_cores_check(struct brcmf_chip_priv *ci)
 {
        struct brcmf_core_priv *core;
        bool need_socram = false;
        bool has_socram = false;
+       bool cpu_found = false;
        int idx = 1;
 
        list_for_each_entry(core, &ci->cores, list) {
@@ -435,22 +504,24 @@ static int brcmf_chip_cores_check(struct brcmf_chip_priv *ci)
 
                switch (core->pub.id) {
                case BCMA_CORE_ARM_CM3:
+                       cpu_found = true;
                        need_socram = true;
                        break;
                case BCMA_CORE_INTERNAL_MEM:
                        has_socram = true;
                        break;
                case BCMA_CORE_ARM_CR4:
-                       if (ci->pub.rambase == 0) {
-                               brcmf_err("RAM base not provided with ARM CR4 core\n");
-                               return -ENOMEM;
-                       }
+                       cpu_found = true;
                        break;
                default:
                        break;
                }
        }
 
+       if (!cpu_found) {
+               brcmf_err("CPU core not detected\n");
+               return -ENXIO;
+       }
        /* check RAM core presence for ARM CM3 core */
        if (need_socram && !has_socram) {
                brcmf_err("RAM core not provided with ARM CM3 core\n");
@@ -458,56 +529,164 @@ static int brcmf_chip_cores_check(struct brcmf_chip_priv *ci)
        }
        return 0;
 }
-#else  /* DEBUG */
-static inline int brcmf_chip_cores_check(struct brcmf_chip_priv *ci)
+
+static u32 brcmf_chip_core_read32(struct brcmf_core_priv *core, u16 reg)
 {
-       return 0;
+       return core->chip->ops->read32(core->chip->ctx, core->pub.base + reg);
 }
-#endif
 
-static void brcmf_chip_get_raminfo(struct brcmf_chip_priv *ci)
+static void brcmf_chip_core_write32(struct brcmf_core_priv *core,
+                                   u16 reg, u32 val)
 {
-       switch (ci->pub.chip) {
-       case BRCM_CC_4329_CHIP_ID:
-               ci->pub.ramsize = BCM4329_RAMSIZE;
-               break;
-       case BRCM_CC_43143_CHIP_ID:
-               ci->pub.ramsize = BCM43143_RAMSIZE;
-               break;
-       case BRCM_CC_43241_CHIP_ID:
-               ci->pub.ramsize = 0x90000;
-               break;
-       case BRCM_CC_4330_CHIP_ID:
-               ci->pub.ramsize = 0x48000;
-               break;
+       core->chip->ops->write32(core->chip->ctx, core->pub.base + reg, val);
+}
+
+static bool brcmf_chip_socram_banksize(struct brcmf_core_priv *core, u8 idx,
+                                      u32 *banksize)
+{
+       u32 bankinfo;
+       u32 bankidx = (SOCRAM_MEMTYPE_RAM << SOCRAM_BANKIDX_MEMTYPE_SHIFT);
+
+       bankidx |= idx;
+       brcmf_chip_core_write32(core, SOCRAMREGOFFS(bankidx), bankidx);
+       bankinfo = brcmf_chip_core_read32(core, SOCRAMREGOFFS(bankinfo));
+       *banksize = (bankinfo & SOCRAM_BANKINFO_SZMASK) + 1;
+       *banksize *= SOCRAM_BANKINFO_SZBASE;
+       return !!(bankinfo & SOCRAM_BANKINFO_RETNTRAM_MASK);
+}
+
+static void brcmf_chip_socram_ramsize(struct brcmf_core_priv *sr, u32 *ramsize,
+                                     u32 *srsize)
+{
+       u32 coreinfo;
+       uint nb, banksize, lss;
+       bool retent;
+       int i;
+
+       *ramsize = 0;
+       *srsize = 0;
+
+       if (WARN_ON(sr->pub.rev < 4))
+               return;
+
+       if (!brcmf_chip_iscoreup(&sr->pub))
+               brcmf_chip_resetcore(&sr->pub, 0, 0, 0);
+
+       /* Get info for determining size */
+       coreinfo = brcmf_chip_core_read32(sr, SOCRAMREGOFFS(coreinfo));
+       nb = (coreinfo & SRCI_SRNB_MASK) >> SRCI_SRNB_SHIFT;
+
+       if ((sr->pub.rev <= 7) || (sr->pub.rev == 12)) {
+               banksize = (coreinfo & SRCI_SRBSZ_MASK);
+               lss = (coreinfo & SRCI_LSS_MASK) >> SRCI_LSS_SHIFT;
+               if (lss != 0)
+                       nb--;
+               *ramsize = nb * (1 << (banksize + SR_BSZ_BASE));
+               if (lss != 0)
+                       *ramsize += (1 << ((lss - 1) + SR_BSZ_BASE));
+       } else {
+               nb = (coreinfo & SRCI_SRNB_MASK) >> SRCI_SRNB_SHIFT;
+               for (i = 0; i < nb; i++) {
+                       retent = brcmf_chip_socram_banksize(sr, i, &banksize);
+                       *ramsize += banksize;
+                       if (retent)
+                               *srsize += banksize;
+               }
+       }
+
+       /* hardcoded save&restore memory sizes */
+       switch (sr->chip->pub.chip) {
        case BRCM_CC_4334_CHIP_ID:
-       case BRCM_CC_43340_CHIP_ID:
-               ci->pub.ramsize = 0x80000;
+               if (sr->chip->pub.chiprev < 2)
+                       *srsize = (32 * 1024);
                break;
-       case BRCM_CC_4335_CHIP_ID:
-               ci->pub.ramsize = 0xc0000;
-               ci->pub.rambase = 0x180000;
+       case BRCM_CC_43430_CHIP_ID:
+               /* assume sr for now as we can not check
+                * firmware sr capability at this point.
+                */
+               *srsize = (64 * 1024);
                break;
-       case BRCM_CC_43362_CHIP_ID:
-               ci->pub.ramsize = 0x3c000;
+       default:
                break;
+       }
+}
+
+/** Return the TCM-RAM size of the ARMCR4 core. */
+static u32 brcmf_chip_tcm_ramsize(struct brcmf_core_priv *cr4)
+{
+       u32 corecap;
+       u32 memsize = 0;
+       u32 nab;
+       u32 nbb;
+       u32 totb;
+       u32 bxinfo;
+       u32 idx;
+
+       corecap = brcmf_chip_core_read32(cr4, ARMCR4_CAP);
+
+       nab = (corecap & ARMCR4_TCBANB_MASK) >> ARMCR4_TCBANB_SHIFT;
+       nbb = (corecap & ARMCR4_TCBBNB_MASK) >> ARMCR4_TCBBNB_SHIFT;
+       totb = nab + nbb;
+
+       for (idx = 0; idx < totb; idx++) {
+               brcmf_chip_core_write32(cr4, ARMCR4_BANKIDX, idx);
+               bxinfo = brcmf_chip_core_read32(cr4, ARMCR4_BANKINFO);
+               memsize += ((bxinfo & ARMCR4_BSZ_MASK) + 1) * ARMCR4_BSZ_MULT;
+       }
+
+       return memsize;
+}
+
+static u32 brcmf_chip_tcm_rambase(struct brcmf_chip_priv *ci)
+{
+       switch (ci->pub.chip) {
+       case BRCM_CC_4345_CHIP_ID:
+               return 0x198000;
+       case BRCM_CC_4335_CHIP_ID:
        case BRCM_CC_4339_CHIP_ID:
        case BRCM_CC_4354_CHIP_ID:
        case BRCM_CC_4356_CHIP_ID:
        case BRCM_CC_43567_CHIP_ID:
        case BRCM_CC_43569_CHIP_ID:
        case BRCM_CC_43570_CHIP_ID:
-               ci->pub.ramsize = 0xc0000;
-               ci->pub.rambase = 0x180000;
-               break;
        case BRCM_CC_43602_CHIP_ID:
-               ci->pub.ramsize = 0xf0000;
-               ci->pub.rambase = 0x180000;
-               break;
+               return 0x180000;
        default:
                brcmf_err("unknown chip: %s\n", ci->pub.name);
                break;
        }
+       return 0;
+}
+
+static int brcmf_chip_get_raminfo(struct brcmf_chip_priv *ci)
+{
+       struct brcmf_core_priv *mem_core;
+       struct brcmf_core *mem;
+
+       mem = brcmf_chip_get_core(&ci->pub, BCMA_CORE_ARM_CR4);
+       if (mem) {
+               mem_core = container_of(mem, struct brcmf_core_priv, pub);
+               ci->pub.ramsize = brcmf_chip_tcm_ramsize(mem_core);
+               ci->pub.rambase = brcmf_chip_tcm_rambase(ci);
+               if (!ci->pub.rambase) {
+                       brcmf_err("RAM base not provided with ARM CR4 core\n");
+                       return -EINVAL;
+               }
+       } else {
+               mem = brcmf_chip_get_core(&ci->pub, BCMA_CORE_INTERNAL_MEM);
+               mem_core = container_of(mem, struct brcmf_core_priv, pub);
+               brcmf_chip_socram_ramsize(mem_core, &ci->pub.ramsize,
+                                         &ci->pub.srsize);
+       }
+       brcmf_dbg(INFO, "RAM: base=0x%x size=%d (0x%x) sr=%d (0x%x)\n",
+                 ci->pub.rambase, ci->pub.ramsize, ci->pub.ramsize,
+                 ci->pub.srsize, ci->pub.srsize);
+
+       if (!ci->pub.ramsize) {
+               brcmf_err("RAM size is undetermined\n");
+               return -ENOMEM;
+       }
+       return 0;
 }
 
 static u32 brcmf_chip_dmp_get_desc(struct brcmf_chip_priv *ci, u32 *eromaddr,
@@ -660,6 +839,7 @@ static int brcmf_chip_recognition(struct brcmf_chip_priv *ci)
        struct brcmf_core *core;
        u32 regdata;
        u32 socitype;
+       int ret;
 
        /* Get CC core rev
         * Chipid is assume to be at offset 0 from SI_ENUM_BASE
@@ -712,9 +892,13 @@ static int brcmf_chip_recognition(struct brcmf_chip_priv *ci)
                return -ENODEV;
        }
 
-       brcmf_chip_get_raminfo(ci);
+       ret = brcmf_chip_cores_check(ci);
+       if (ret)
+               return ret;
 
-       return brcmf_chip_cores_check(ci);
+       /* assure chip is passive for core access */
+       brcmf_chip_set_passive(&ci->pub);
+       return brcmf_chip_get_raminfo(ci);
 }
 
 static void brcmf_chip_disable_arm(struct brcmf_chip_priv *chip, u16 id)
@@ -778,12 +962,6 @@ static int brcmf_chip_setup(struct brcmf_chip_priv *chip)
        if (chip->ops->setup)
                ret = chip->ops->setup(chip->ctx, pub);
 
-       /*
-        * Make sure any on-chip ARM is off (in case strapping is wrong),
-        * or downloaded code was already running.
-        */
-       brcmf_chip_disable_arm(chip, BCMA_CORE_ARM_CM3);
-       brcmf_chip_disable_arm(chip, BCMA_CORE_ARM_CR4);
        return ret;
 }
 
@@ -799,7 +977,7 @@ struct brcmf_chip *brcmf_chip_attach(void *ctx,
                err = -EINVAL;
        if (WARN_ON(!ops->prepare))
                err = -EINVAL;
-       if (WARN_ON(!ops->exit_dl))
+       if (WARN_ON(!ops->activate))
                err = -EINVAL;
        if (err < 0)
                return ERR_PTR(-EINVAL);
@@ -897,9 +1075,10 @@ void brcmf_chip_resetcore(struct brcmf_core *pub, u32 prereset, u32 reset,
 }
 
 static void
-brcmf_chip_cm3_enterdl(struct brcmf_chip_priv *chip)
+brcmf_chip_cm3_set_passive(struct brcmf_chip_priv *chip)
 {
        struct brcmf_core *core;
+       struct brcmf_core_priv *sr;
 
        brcmf_chip_disable_arm(chip, BCMA_CORE_ARM_CM3);
        core = brcmf_chip_get_core(&chip->pub, BCMA_CORE_80211);
@@ -909,9 +1088,16 @@ brcmf_chip_cm3_enterdl(struct brcmf_chip_priv *chip)
                             D11_BCMA_IOCTL_PHYCLOCKEN);
        core = brcmf_chip_get_core(&chip->pub, BCMA_CORE_INTERNAL_MEM);
        brcmf_chip_resetcore(core, 0, 0, 0);
+
+       /* disable bank #3 remap for this device */
+       if (chip->pub.chip == BRCM_CC_43430_CHIP_ID) {
+               sr = container_of(core, struct brcmf_core_priv, pub);
+               brcmf_chip_core_write32(sr, SOCRAMREGOFFS(bankidx), 3);
+               brcmf_chip_core_write32(sr, SOCRAMREGOFFS(bankpda), 0);
+       }
 }
 
-static bool brcmf_chip_cm3_exitdl(struct brcmf_chip_priv *chip)
+static bool brcmf_chip_cm3_set_active(struct brcmf_chip_priv *chip)
 {
        struct brcmf_core *core;
 
@@ -921,7 +1107,7 @@ static bool brcmf_chip_cm3_exitdl(struct brcmf_chip_priv *chip)
                return false;
        }
 
-       chip->ops->exit_dl(chip->ctx, &chip->pub, 0);
+       chip->ops->activate(chip->ctx, &chip->pub, 0);
 
        core = brcmf_chip_get_core(&chip->pub, BCMA_CORE_ARM_CM3);
        brcmf_chip_resetcore(core, 0, 0, 0);
@@ -930,7 +1116,7 @@ static bool brcmf_chip_cm3_exitdl(struct brcmf_chip_priv *chip)
 }
 
 static inline void
-brcmf_chip_cr4_enterdl(struct brcmf_chip_priv *chip)
+brcmf_chip_cr4_set_passive(struct brcmf_chip_priv *chip)
 {
        struct brcmf_core *core;
 
@@ -943,11 +1129,11 @@ brcmf_chip_cr4_enterdl(struct brcmf_chip_priv *chip)
                             D11_BCMA_IOCTL_PHYCLOCKEN);
 }
 
-static bool brcmf_chip_cr4_exitdl(struct brcmf_chip_priv *chip, u32 rstvec)
+static bool brcmf_chip_cr4_set_active(struct brcmf_chip_priv *chip, u32 rstvec)
 {
        struct brcmf_core *core;
 
-       chip->ops->exit_dl(chip->ctx, &chip->pub, rstvec);
+       chip->ops->activate(chip->ctx, &chip->pub, rstvec);
 
        /* restore ARM */
        core = brcmf_chip_get_core(&chip->pub, BCMA_CORE_ARM_CR4);
@@ -956,7 +1142,7 @@ static bool brcmf_chip_cr4_exitdl(struct brcmf_chip_priv *chip, u32 rstvec)
        return true;
 }
 
-void brcmf_chip_enter_download(struct brcmf_chip *pub)
+void brcmf_chip_set_passive(struct brcmf_chip *pub)
 {
        struct brcmf_chip_priv *chip;
        struct brcmf_core *arm;
@@ -966,14 +1152,14 @@ void brcmf_chip_enter_download(struct brcmf_chip *pub)
        chip = container_of(pub, struct brcmf_chip_priv, pub);
        arm = brcmf_chip_get_core(pub, BCMA_CORE_ARM_CR4);
        if (arm) {
-               brcmf_chip_cr4_enterdl(chip);
+               brcmf_chip_cr4_set_passive(chip);
                return;
        }
 
-       brcmf_chip_cm3_enterdl(chip);
+       brcmf_chip_cm3_set_passive(chip);
 }
 
-bool brcmf_chip_exit_download(struct brcmf_chip *pub, u32 rstvec)
+bool brcmf_chip_set_active(struct brcmf_chip *pub, u32 rstvec)
 {
        struct brcmf_chip_priv *chip;
        struct brcmf_core *arm;
@@ -983,9 +1169,9 @@ bool brcmf_chip_exit_download(struct brcmf_chip *pub, u32 rstvec)
        chip = container_of(pub, struct brcmf_chip_priv, pub);
        arm = brcmf_chip_get_core(pub, BCMA_CORE_ARM_CR4);
        if (arm)
-               return brcmf_chip_cr4_exitdl(chip, rstvec);
+               return brcmf_chip_cr4_set_active(chip, rstvec);
 
-       return brcmf_chip_cm3_exitdl(chip);
+       return brcmf_chip_cm3_set_active(chip);
 }
 
 bool brcmf_chip_sr_capable(struct brcmf_chip *pub)
@@ -1016,6 +1202,10 @@ bool brcmf_chip_sr_capable(struct brcmf_chip *pub)
                addr = CORE_CC_REG(base, chipcontrol_data);
                reg = chip->ops->read32(chip->ctx, addr);
                return (reg & pmu_cc3_mask) != 0;
+       case BRCM_CC_43430_CHIP_ID:
+               addr = CORE_CC_REG(base, sr_control1);
+               reg = chip->ops->read32(chip->ctx, addr);
+               return reg != 0;
        default:
                addr = CORE_CC_REG(base, pmucapabilities_ext);
                reg = chip->ops->read32(chip->ctx, addr);
index c32908da90c853e7a133bc0e24a03b35fe2045b3..60dcb38fc77a3f59e1c2efcaefd3d337470d3282 100644 (file)
@@ -30,7 +30,8 @@
  * @pmucaps: PMU capabilities.
  * @pmurev: PMU revision.
  * @rambase: RAM base address (only applicable for ARM CR4 chips).
- * @ramsize: amount of RAM on chip.
+ * @ramsize: amount of RAM on chip including retention.
+ * @srsize: amount of retention RAM on chip.
  * @name: string representation of the chip identifier.
  */
 struct brcmf_chip {
@@ -41,6 +42,7 @@ struct brcmf_chip {
        u32 pmurev;
        u32 rambase;
        u32 ramsize;
+       u32 srsize;
        char name[8];
 };
 
@@ -64,7 +66,7 @@ struct brcmf_core {
  * @write32: write 32-bit value over bus.
  * @prepare: prepare bus for core configuration.
  * @setup: bus-specific core setup.
- * @exit_dl: exit download state.
+ * @active: chip becomes active.
  *     The callback should use the provided @rstvec when non-zero.
  */
 struct brcmf_buscore_ops {
@@ -72,7 +74,7 @@ struct brcmf_buscore_ops {
        void (*write32)(void *ctx, u32 addr, u32 value);
        int (*prepare)(void *ctx);
        int (*setup)(void *ctx, struct brcmf_chip *chip);
-       void (*exit_dl)(void *ctx, struct brcmf_chip *chip, u32 rstvec);
+       void (*activate)(void *ctx, struct brcmf_chip *chip, u32 rstvec);
 };
 
 struct brcmf_chip *brcmf_chip_attach(void *ctx,
@@ -84,8 +86,8 @@ bool brcmf_chip_iscoreup(struct brcmf_core *core);
 void brcmf_chip_coredisable(struct brcmf_core *core, u32 prereset, u32 reset);
 void brcmf_chip_resetcore(struct brcmf_core *core, u32 prereset, u32 reset,
                          u32 postreset);
-void brcmf_chip_enter_download(struct brcmf_chip *ci);
-bool brcmf_chip_exit_download(struct brcmf_chip *ci, u32 rstvec);
+void brcmf_chip_set_passive(struct brcmf_chip *ci);
+bool brcmf_chip_set_active(struct brcmf_chip *ci, u32 rstvec);
 bool brcmf_chip_sr_capable(struct brcmf_chip *pub);
 
 #endif /* BRCMF_AXIDMP_H */
index 2d6e2cc1b12ce98c6fd8a3039260d56c50070636..f8f47dcfa886278caf5d694bc9d7a0eadadb8ccd 100644 (file)
@@ -944,6 +944,34 @@ fail:
        return ret;
 }
 
+static int brcmf_revinfo_read(struct seq_file *s, void *data)
+{
+       struct brcmf_bus *bus_if = dev_get_drvdata(s->private);
+       struct brcmf_rev_info *ri = &bus_if->drvr->revinfo;
+       char drev[BRCMU_DOTREV_LEN];
+       char brev[BRCMU_BOARDREV_LEN];
+
+       seq_printf(s, "vendorid: 0x%04x\n", ri->vendorid);
+       seq_printf(s, "deviceid: 0x%04x\n", ri->deviceid);
+       seq_printf(s, "radiorev: %s\n", brcmu_dotrev_str(ri->radiorev, drev));
+       seq_printf(s, "chipnum: %u (%x)\n", ri->chipnum, ri->chipnum);
+       seq_printf(s, "chiprev: %u\n", ri->chiprev);
+       seq_printf(s, "chippkg: %u\n", ri->chippkg);
+       seq_printf(s, "corerev: %u\n", ri->corerev);
+       seq_printf(s, "boardid: 0x%04x\n", ri->boardid);
+       seq_printf(s, "boardvendor: 0x%04x\n", ri->boardvendor);
+       seq_printf(s, "boardrev: %s\n", brcmu_boardrev_str(ri->boardrev, brev));
+       seq_printf(s, "driverrev: %s\n", brcmu_dotrev_str(ri->driverrev, drev));
+       seq_printf(s, "ucoderev: %u\n", ri->ucoderev);
+       seq_printf(s, "bus: %u\n", ri->bus);
+       seq_printf(s, "phytype: %u\n", ri->phytype);
+       seq_printf(s, "phyrev: %u\n", ri->phyrev);
+       seq_printf(s, "anarev: %u\n", ri->anarev);
+       seq_printf(s, "nvramrev: %08x\n", ri->nvramrev);
+
+       return 0;
+}
+
 int brcmf_bus_start(struct device *dev)
 {
        int ret = -1;
@@ -974,6 +1002,8 @@ int brcmf_bus_start(struct device *dev)
        if (ret < 0)
                goto fail;
 
+       brcmf_debugfs_add_entry(drvr, "revinfo", brcmf_revinfo_read);
+
        /* assure we have chipid before feature attach */
        if (!bus_if->chip) {
                bus_if->chip = drvr->revinfo.chipnum;
index 910fbb561469e80b46147f8c18cbb14f3cd85e14..eb1325371d3a3aa9eba03e5ba83e65135f218f4e 100644 (file)
@@ -236,7 +236,7 @@ void brcmf_flowring_delete(struct brcmf_flowring *flow, u8 flowid)
        brcmf_flowring_block(flow, flowid, false);
        hash_idx = ring->hash_id;
        flow->hash[hash_idx].ifidx = BRCMF_FLOWRING_INVALID_IFIDX;
-       memset(flow->hash[hash_idx].mac, 0, ETH_ALEN);
+       eth_zero_addr(flow->hash[hash_idx].mac);
        flow->rings[flowid] = NULL;
 
        skb = skb_dequeue(&ring->skblist);
index 6262612dec450b649c3ccc1eb1b3e12205335fd3..4ec9811f49c87744458ed16cdcec32422432dc3f 100644 (file)
@@ -481,10 +481,9 @@ static int brcmf_msgbuf_ioctl_resp_wait(struct brcmf_msgbuf *msgbuf)
 
 static void brcmf_msgbuf_ioctl_resp_wake(struct brcmf_msgbuf *msgbuf)
 {
-       if (waitqueue_active(&msgbuf->ioctl_resp_wait)) {
-               msgbuf->ctl_completed = true;
+       msgbuf->ctl_completed = true;
+       if (waitqueue_active(&msgbuf->ioctl_resp_wait))
                wake_up(&msgbuf->ioctl_resp_wait);
-       }
 }
 
 
index 77a51b8c1e120824a4eb6d0b336d3875d3c16fe5..3d513e407e3d5db610792a6bea0d1d843cd15a63 100644 (file)
 
 #ifdef CONFIG_BRCMFMAC_PROTO_MSGBUF
 
-#define BRCMF_H2D_MSGRING_CONTROL_SUBMIT_MAX_ITEM      20
-#define BRCMF_H2D_MSGRING_RXPOST_SUBMIT_MAX_ITEM       256
-#define BRCMF_D2H_MSGRING_CONTROL_COMPLETE_MAX_ITEM    20
+#define BRCMF_H2D_MSGRING_CONTROL_SUBMIT_MAX_ITEM      64
+#define BRCMF_H2D_MSGRING_RXPOST_SUBMIT_MAX_ITEM       512
+#define BRCMF_D2H_MSGRING_CONTROL_COMPLETE_MAX_ITEM    64
 #define BRCMF_D2H_MSGRING_TX_COMPLETE_MAX_ITEM         1024
-#define BRCMF_D2H_MSGRING_RX_COMPLETE_MAX_ITEM         256
+#define BRCMF_D2H_MSGRING_RX_COMPLETE_MAX_ITEM         512
 #define BRCMF_H2D_TXFLOWRING_MAX_ITEM                  512
 
 #define BRCMF_H2D_MSGRING_CONTROL_SUBMIT_ITEMSIZE      40
index effb48ebd86450c41d7a3a4d46b85df4f049b946..710fbe570eb24da1d316c94679b0ed390bc1a29c 100644 (file)
@@ -697,7 +697,7 @@ static s32 brcmf_p2p_escan(struct brcmf_p2p_info *p2p, u32 num_chans,
        else
                sparams->scan_type = 1;
 
-       memset(&sparams->bssid, 0xFF, ETH_ALEN);
+       eth_broadcast_addr(sparams->bssid);
        if (ssid.SSID_len)
                memcpy(sparams->ssid_le.SSID, ssid.SSID, ssid.SSID_len);
        sparams->ssid_le.SSID_len = cpu_to_le32(ssid.SSID_len);
@@ -2246,11 +2246,13 @@ static void brcmf_p2p_delete_p2pdev(struct brcmf_p2p_info *p2p,
  *
  * @wiphy: wiphy device of new interface.
  * @name: name of the new interface.
+ * @name_assign_type: origin of the interface name
  * @type: nl80211 interface type.
  * @flags: not used.
  * @params: contains mac address for P2P device.
  */
 struct wireless_dev *brcmf_p2p_add_vif(struct wiphy *wiphy, const char *name,
+                                      unsigned char name_assign_type,
                                       enum nl80211_iftype type, u32 *flags,
                                       struct vif_params *params)
 {
@@ -2310,6 +2312,7 @@ struct wireless_dev *brcmf_p2p_add_vif(struct wiphy *wiphy, const char *name,
        }
 
        strncpy(ifp->ndev->name, name, sizeof(ifp->ndev->name) - 1);
+       ifp->ndev->name_assign_type = name_assign_type;
        err = brcmf_net_attach(ifp, true);
        if (err) {
                brcmf_err("Registering netdevice failed\n");
index 6821b26224bea5f8abed9a2596619f778a3dd01b..872f382d9e49b7531e05884d6f6b69e8fc760af0 100644 (file)
@@ -149,6 +149,7 @@ struct brcmf_p2p_info {
 s32 brcmf_p2p_attach(struct brcmf_cfg80211_info *cfg);
 void brcmf_p2p_detach(struct brcmf_p2p_info *p2p);
 struct wireless_dev *brcmf_p2p_add_vif(struct wiphy *wiphy, const char *name,
+                                      unsigned char name_assign_type,
                                       enum nl80211_iftype type, u32 *flags,
                                       struct vif_params *params);
 int brcmf_p2p_del_vif(struct wiphy *wiphy, struct wireless_dev *wdev);
index 61c053a729be01b752101dab3db55cf1e6728e9b..1831ecd0813e955dfc88c3c6eb8783f36060465b 100644 (file)
@@ -47,8 +47,6 @@ enum brcmf_pcie_state {
 
 #define BRCMF_PCIE_43602_FW_NAME               "brcm/brcmfmac43602-pcie.bin"
 #define BRCMF_PCIE_43602_NVRAM_NAME            "brcm/brcmfmac43602-pcie.txt"
-#define BRCMF_PCIE_4354_FW_NAME                        "brcm/brcmfmac4354-pcie.bin"
-#define BRCMF_PCIE_4354_NVRAM_NAME             "brcm/brcmfmac4354-pcie.txt"
 #define BRCMF_PCIE_4356_FW_NAME                        "brcm/brcmfmac4356-pcie.bin"
 #define BRCMF_PCIE_4356_NVRAM_NAME             "brcm/brcmfmac4356-pcie.txt"
 #define BRCMF_PCIE_43570_FW_NAME               "brcm/brcmfmac43570-pcie.bin"
@@ -187,8 +185,8 @@ enum brcmf_pcie_state {
 
 MODULE_FIRMWARE(BRCMF_PCIE_43602_FW_NAME);
 MODULE_FIRMWARE(BRCMF_PCIE_43602_NVRAM_NAME);
-MODULE_FIRMWARE(BRCMF_PCIE_4354_FW_NAME);
-MODULE_FIRMWARE(BRCMF_PCIE_4354_NVRAM_NAME);
+MODULE_FIRMWARE(BRCMF_PCIE_4356_FW_NAME);
+MODULE_FIRMWARE(BRCMF_PCIE_4356_NVRAM_NAME);
 MODULE_FIRMWARE(BRCMF_PCIE_43570_FW_NAME);
 MODULE_FIRMWARE(BRCMF_PCIE_43570_NVRAM_NAME);
 
@@ -509,8 +507,6 @@ static void brcmf_pcie_attach(struct brcmf_pciedev_info *devinfo)
 
 static int brcmf_pcie_enter_download_state(struct brcmf_pciedev_info *devinfo)
 {
-       brcmf_chip_enter_download(devinfo->ci);
-
        if (devinfo->ci->chip == BRCM_CC_43602_CHIP_ID) {
                brcmf_pcie_select_core(devinfo, BCMA_CORE_ARM_CR4);
                brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_ARMCR4REG_BANKIDX,
@@ -536,7 +532,7 @@ static int brcmf_pcie_exit_download_state(struct brcmf_pciedev_info *devinfo,
                brcmf_chip_resetcore(core, 0, 0, 0);
        }
 
-       return !brcmf_chip_exit_download(devinfo->ci, resetintr);
+       return !brcmf_chip_set_active(devinfo->ci, resetintr);
 }
 
 
@@ -653,10 +649,9 @@ static void brcmf_pcie_bus_console_read(struct brcmf_pciedev_info *devinfo)
                        console->log_str[console->log_idx] = ch;
                        console->log_idx++;
                }
-
                if (ch == '\n') {
                        console->log_str[console->log_idx] = 0;
-                       brcmf_dbg(PCIE, "CONSOLE: %s\n", console->log_str);
+                       brcmf_dbg(PCIE, "CONSOLE: %s", console->log_str);
                        console->log_idx = 0;
                }
        }
@@ -1328,10 +1323,6 @@ static int brcmf_pcie_get_fwnames(struct brcmf_pciedev_info *devinfo)
                fw_name = BRCMF_PCIE_43602_FW_NAME;
                nvram_name = BRCMF_PCIE_43602_NVRAM_NAME;
                break;
-       case BRCM_CC_4354_CHIP_ID:
-               fw_name = BRCMF_PCIE_4354_FW_NAME;
-               nvram_name = BRCMF_PCIE_4354_NVRAM_NAME;
-               break;
        case BRCM_CC_4356_CHIP_ID:
                fw_name = BRCMF_PCIE_4356_FW_NAME;
                nvram_name = BRCMF_PCIE_4356_NVRAM_NAME;
@@ -1566,8 +1557,8 @@ static int brcmf_pcie_buscoreprep(void *ctx)
 }
 
 
-static void brcmf_pcie_buscore_exitdl(void *ctx, struct brcmf_chip *chip,
-                                     u32 rstvec)
+static void brcmf_pcie_buscore_activate(void *ctx, struct brcmf_chip *chip,
+                                       u32 rstvec)
 {
        struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)ctx;
 
@@ -1577,7 +1568,7 @@ static void brcmf_pcie_buscore_exitdl(void *ctx, struct brcmf_chip *chip,
 
 static const struct brcmf_buscore_ops brcmf_pcie_buscore_ops = {
        .prepare = brcmf_pcie_buscoreprep,
-       .exit_dl = brcmf_pcie_buscore_exitdl,
+       .activate = brcmf_pcie_buscore_activate,
        .read32 = brcmf_pcie_buscore_read32,
        .write32 = brcmf_pcie_buscore_write32,
 };
@@ -1856,7 +1847,6 @@ cleanup:
        PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_OTHER << 8, 0xffff00, 0 }
 
 static struct pci_device_id brcmf_pcie_devid_table[] = {
-       BRCMF_PCIE_DEVICE(BRCM_PCIE_4354_DEVICE_ID),
        BRCMF_PCIE_DEVICE(BRCM_PCIE_4356_DEVICE_ID),
        BRCMF_PCIE_DEVICE(BRCM_PCIE_43567_DEVICE_ID),
        BRCMF_PCIE_DEVICE(BRCM_PCIE_43570_DEVICE_ID),
index faec35c899ec1fb50b67041dd1539d39319902eb..ab0c898330137e494a79c7418c151f9dec860f98 100644 (file)
@@ -432,8 +432,6 @@ struct brcmf_sdio {
        struct brcmf_sdio_dev *sdiodev; /* sdio device handler */
        struct brcmf_chip *ci;  /* Chip info struct */
 
-       u32 ramsize;            /* Size of RAM in SOCRAM (bytes) */
-
        u32 hostintmask;        /* Copy of Host Interrupt Mask */
        atomic_t intstatus;     /* Intstatus bits (events) pending */
        atomic_t fcstate;       /* State of dongle flow-control */
@@ -485,10 +483,9 @@ struct brcmf_sdio {
 #endif                         /* DEBUG */
 
        uint clkstate;          /* State of sd and backplane clock(s) */
-       bool activity;          /* Activity flag for clock down */
        s32 idletime;           /* Control for activity timeout */
-       s32 idlecount;  /* Activity timeout counter */
-       s32 idleclock;  /* How to set bus driver when idle */
+       s32 idlecount;          /* Activity timeout counter */
+       s32 idleclock;          /* How to set bus driver when idle */
        bool rxflow_mode;       /* Rx flow control mode */
        bool rxflow;            /* Is rx flow control on */
        bool alp_only;          /* Don't use HT clock (ALP only) */
@@ -510,11 +507,13 @@ struct brcmf_sdio {
 
        struct workqueue_struct *brcmf_wq;
        struct work_struct datawork;
-       atomic_t dpc_tskcnt;
+       bool dpc_triggered;
+       bool dpc_running;
 
        bool txoff;             /* Transmit flow-controlled */
        struct brcmf_sdio_count sdcnt;
        bool sr_enabled; /* SaveRestore enabled */
+       bool sleeping;
 
        u8 tx_hdrlen;           /* sdio bus header length for tx packet */
        bool txglom;            /* host tx glomming enable flag */
@@ -616,6 +615,10 @@ static const struct sdiod_drive_str sdiod_drvstr_tab2_3v3[] = {
 #define BCM43362_NVRAM_NAME            "brcm/brcmfmac43362-sdio.txt"
 #define BCM4339_FIRMWARE_NAME          "brcm/brcmfmac4339-sdio.bin"
 #define BCM4339_NVRAM_NAME             "brcm/brcmfmac4339-sdio.txt"
+#define BCM43430_FIRMWARE_NAME         "brcm/brcmfmac43430-sdio.bin"
+#define BCM43430_NVRAM_NAME            "brcm/brcmfmac43430-sdio.txt"
+#define BCM43455_FIRMWARE_NAME         "brcm/brcmfmac43455-sdio.bin"
+#define BCM43455_NVRAM_NAME            "brcm/brcmfmac43455-sdio.txt"
 #define BCM4354_FIRMWARE_NAME          "brcm/brcmfmac4354-sdio.bin"
 #define BCM4354_NVRAM_NAME             "brcm/brcmfmac4354-sdio.txt"
 
@@ -639,6 +642,10 @@ MODULE_FIRMWARE(BCM43362_FIRMWARE_NAME);
 MODULE_FIRMWARE(BCM43362_NVRAM_NAME);
 MODULE_FIRMWARE(BCM4339_FIRMWARE_NAME);
 MODULE_FIRMWARE(BCM4339_NVRAM_NAME);
+MODULE_FIRMWARE(BCM43430_FIRMWARE_NAME);
+MODULE_FIRMWARE(BCM43430_NVRAM_NAME);
+MODULE_FIRMWARE(BCM43455_FIRMWARE_NAME);
+MODULE_FIRMWARE(BCM43455_NVRAM_NAME);
 MODULE_FIRMWARE(BCM4354_FIRMWARE_NAME);
 MODULE_FIRMWARE(BCM4354_NVRAM_NAME);
 
@@ -668,6 +675,8 @@ static const struct brcmf_firmware_names brcmf_fwname_data[] = {
        { BRCM_CC_4335_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4335) },
        { BRCM_CC_43362_CHIP_ID, 0xFFFFFFFE, BRCMF_FIRMWARE_NVRAM(BCM43362) },
        { BRCM_CC_4339_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4339) },
+       { BRCM_CC_43430_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM43430) },
+       { BRCM_CC_4345_CHIP_ID, 0xFFFFFFC0, BRCMF_FIRMWARE_NVRAM(BCM43455) },
        { BRCM_CC_4354_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4354) }
 };
 
@@ -958,13 +967,8 @@ static int brcmf_sdio_clkctl(struct brcmf_sdio *bus, uint target, bool pendok)
        brcmf_dbg(SDIO, "Enter\n");
 
        /* Early exit if we're already there */
-       if (bus->clkstate == target) {
-               if (target == CLK_AVAIL) {
-                       brcmf_sdio_wd_timer(bus, BRCMF_WD_POLL_MS);
-                       bus->activity = true;
-               }
+       if (bus->clkstate == target)
                return 0;
-       }
 
        switch (target) {
        case CLK_AVAIL:
@@ -973,8 +977,6 @@ static int brcmf_sdio_clkctl(struct brcmf_sdio *bus, uint target, bool pendok)
                        brcmf_sdio_sdclk(bus, true);
                /* Now request HT Avail on the backplane */
                brcmf_sdio_htclk(bus, true, pendok);
-               brcmf_sdio_wd_timer(bus, BRCMF_WD_POLL_MS);
-               bus->activity = true;
                break;
 
        case CLK_SDONLY:
@@ -986,7 +988,6 @@ static int brcmf_sdio_clkctl(struct brcmf_sdio *bus, uint target, bool pendok)
                else
                        brcmf_err("request for %d -> %d\n",
                                  bus->clkstate, target);
-               brcmf_sdio_wd_timer(bus, BRCMF_WD_POLL_MS);
                break;
 
        case CLK_NONE:
@@ -995,7 +996,6 @@ static int brcmf_sdio_clkctl(struct brcmf_sdio *bus, uint target, bool pendok)
                        brcmf_sdio_htclk(bus, false, false);
                /* Now remove the SD clock */
                brcmf_sdio_sdclk(bus, false);
-               brcmf_sdio_wd_timer(bus, 0);
                break;
        }
 #ifdef DEBUG
@@ -1013,26 +1013,16 @@ brcmf_sdio_bus_sleep(struct brcmf_sdio *bus, bool sleep, bool pendok)
 
        brcmf_dbg(SDIO, "Enter: request %s currently %s\n",
                  (sleep ? "SLEEP" : "WAKE"),
-                 (bus->sdiodev->sleeping ? "SLEEP" : "WAKE"));
+                 (bus->sleeping ? "SLEEP" : "WAKE"));
 
        /* If SR is enabled control bus state with KSO */
        if (bus->sr_enabled) {
                /* Done if we're already in the requested state */
-               if (sleep == bus->sdiodev->sleeping)
+               if (sleep == bus->sleeping)
                        goto end;
 
                /* Going to sleep */
                if (sleep) {
-                       /* Don't sleep if something is pending */
-                       if (atomic_read(&bus->intstatus) ||
-                           atomic_read(&bus->ipend) > 0 ||
-                           (!atomic_read(&bus->fcstate) &&
-                           brcmu_pktq_mlen(&bus->txq, ~bus->flowcontrol) &&
-                           data_ok(bus))) {
-                                err = -EBUSY;
-                                goto done;
-                       }
-
                        clkcsr = brcmf_sdiod_regrb(bus->sdiodev,
                                                   SBSDIO_FUNC1_CHIPCLKCSR,
                                                   &err);
@@ -1043,11 +1033,7 @@ brcmf_sdio_bus_sleep(struct brcmf_sdio *bus, bool sleep, bool pendok)
                                                  SBSDIO_ALP_AVAIL_REQ, &err);
                        }
                        err = brcmf_sdio_kso_control(bus, false);
-                       /* disable watchdog */
-                       if (!err)
-                               brcmf_sdio_wd_timer(bus, 0);
                } else {
-                       bus->idlecount = 0;
                        err = brcmf_sdio_kso_control(bus, true);
                }
                if (err) {
@@ -1064,10 +1050,9 @@ end:
                        brcmf_sdio_clkctl(bus, CLK_NONE, pendok);
        } else {
                brcmf_sdio_clkctl(bus, CLK_AVAIL, pendok);
+               brcmf_sdio_wd_timer(bus, BRCMF_WD_POLL_MS);
        }
-       bus->sdiodev->sleeping = sleep;
-       if (sleep)
-               wake_up(&bus->sdiodev->idle_wait);
+       bus->sleeping = sleep;
        brcmf_dbg(SDIO, "new state %s\n",
                  (sleep ? "SLEEP" : "WAKE"));
 done:
@@ -1085,44 +1070,47 @@ static inline bool brcmf_sdio_valid_shared_address(u32 addr)
 static int brcmf_sdio_readshared(struct brcmf_sdio *bus,
                                 struct sdpcm_shared *sh)
 {
-       u32 addr;
+       u32 addr = 0;
        int rv;
        u32 shaddr = 0;
        struct sdpcm_shared_le sh_le;
        __le32 addr_le;
 
-       shaddr = bus->ci->rambase + bus->ramsize - 4;
+       sdio_claim_host(bus->sdiodev->func[1]);
+       brcmf_sdio_bus_sleep(bus, false, false);
 
        /*
         * Read last word in socram to determine
         * address of sdpcm_shared structure
         */
-       sdio_claim_host(bus->sdiodev->func[1]);
-       brcmf_sdio_bus_sleep(bus, false, false);
-       rv = brcmf_sdiod_ramrw(bus->sdiodev, false, shaddr, (u8 *)&addr_le, 4);
-       sdio_release_host(bus->sdiodev->func[1]);
+       shaddr = bus->ci->rambase + bus->ci->ramsize - 4;
+       if (!bus->ci->rambase && brcmf_chip_sr_capable(bus->ci))
+               shaddr -= bus->ci->srsize;
+       rv = brcmf_sdiod_ramrw(bus->sdiodev, false, shaddr,
+                              (u8 *)&addr_le, 4);
        if (rv < 0)
-               return rv;
-
-       addr = le32_to_cpu(addr_le);
-
-       brcmf_dbg(SDIO, "sdpcm_shared address 0x%08X\n", addr);
+               goto fail;
 
        /*
         * Check if addr is valid.
         * NVRAM length at the end of memory should have been overwritten.
         */
+       addr = le32_to_cpu(addr_le);
        if (!brcmf_sdio_valid_shared_address(addr)) {
-                       brcmf_err("invalid sdpcm_shared address 0x%08X\n",
-                                 addr);
-                       return -EINVAL;
+               brcmf_err("invalid sdpcm_shared address 0x%08X\n", addr);
+               rv = -EINVAL;
+               goto fail;
        }
 
+       brcmf_dbg(INFO, "sdpcm_shared address 0x%08X\n", addr);
+
        /* Read hndrte_shared structure */
        rv = brcmf_sdiod_ramrw(bus->sdiodev, false, addr, (u8 *)&sh_le,
                               sizeof(struct sdpcm_shared_le));
        if (rv < 0)
-               return rv;
+               goto fail;
+
+       sdio_release_host(bus->sdiodev->func[1]);
 
        /* Endianness */
        sh->flags = le32_to_cpu(sh_le.flags);
@@ -1139,8 +1127,13 @@ static int brcmf_sdio_readshared(struct brcmf_sdio *bus,
                          sh->flags & SDPCM_SHARED_VERSION_MASK);
                return -EPROTO;
        }
-
        return 0;
+
+fail:
+       brcmf_err("unable to obtain sdpcm_shared info: rv=%d (addr=0x%x)\n",
+                 rv, addr);
+       sdio_release_host(bus->sdiodev->func[1]);
+       return rv;
 }
 
 static void brcmf_sdio_get_console_addr(struct brcmf_sdio *bus)
@@ -1909,7 +1902,7 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
        bus->rxpending = true;
 
        for (rd->seq_num = bus->rx_seq, rxleft = maxframes;
-            !bus->rxskip && rxleft && bus->sdiodev->state == BRCMF_STATE_DATA;
+            !bus->rxskip && rxleft && bus->sdiodev->state == BRCMF_SDIOD_DATA;
             rd->seq_num++, rxleft--) {
 
                /* Handle glomming separately */
@@ -2415,7 +2408,7 @@ static uint brcmf_sdio_sendfromq(struct brcmf_sdio *bus, uint maxframes)
        }
 
        /* Deflow-control stack if needed */
-       if ((bus->sdiodev->state == BRCMF_STATE_DATA) &&
+       if ((bus->sdiodev->state == BRCMF_SDIOD_DATA) &&
            bus->txoff && (pktq_len(&bus->txq) < TXLOW)) {
                bus->txoff = false;
                brcmf_txflowblock(bus->sdiodev->dev, false);
@@ -2503,7 +2496,7 @@ static void brcmf_sdio_bus_stop(struct device *dev)
                bus->watchdog_tsk = NULL;
        }
 
-       if (sdiodev->state != BRCMF_STATE_NOMEDIUM) {
+       if (sdiodev->state != BRCMF_SDIOD_NOMEDIUM) {
                sdio_claim_host(sdiodev->func[1]);
 
                /* Enable clock for device interrupts */
@@ -2603,21 +2596,6 @@ static int brcmf_sdio_intr_rstatus(struct brcmf_sdio *bus)
        return ret;
 }
 
-static int brcmf_sdio_pm_resume_wait(struct brcmf_sdio_dev *sdiodev)
-{
-#ifdef CONFIG_PM_SLEEP
-       int retry;
-
-       /* Wait for possible resume to complete */
-       retry = 0;
-       while ((atomic_read(&sdiodev->suspend)) && (retry++ != 50))
-               msleep(20);
-       if (atomic_read(&sdiodev->suspend))
-               return -EIO;
-#endif
-       return 0;
-}
-
 static void brcmf_sdio_dpc(struct brcmf_sdio *bus)
 {
        u32 newstatus = 0;
@@ -2628,9 +2606,6 @@ static void brcmf_sdio_dpc(struct brcmf_sdio *bus)
 
        brcmf_dbg(TRACE, "Enter\n");
 
-       if (brcmf_sdio_pm_resume_wait(bus->sdiodev))
-               return;
-
        sdio_claim_host(bus->sdiodev->func[1]);
 
        /* If waiting for HTAVAIL, check status */
@@ -2739,11 +2714,14 @@ static void brcmf_sdio_dpc(struct brcmf_sdio *bus)
        if (bus->ctrl_frame_stat && (bus->clkstate == CLK_AVAIL) &&
            data_ok(bus)) {
                sdio_claim_host(bus->sdiodev->func[1]);
-               err = brcmf_sdio_tx_ctrlframe(bus,  bus->ctrl_frame_buf,
-                                             bus->ctrl_frame_len);
+               if (bus->ctrl_frame_stat) {
+                       err = brcmf_sdio_tx_ctrlframe(bus,  bus->ctrl_frame_buf,
+                                                     bus->ctrl_frame_len);
+                       bus->ctrl_frame_err = err;
+                       wmb();
+                       bus->ctrl_frame_stat = false;
+               }
                sdio_release_host(bus->sdiodev->func[1]);
-               bus->ctrl_frame_err = err;
-               bus->ctrl_frame_stat = false;
                brcmf_sdio_wait_event_wakeup(bus);
        }
        /* Send queued frames (limit 1 if rx may still be pending) */
@@ -2755,15 +2733,25 @@ static void brcmf_sdio_dpc(struct brcmf_sdio *bus)
                brcmf_sdio_sendfromq(bus, framecnt);
        }
 
-       if ((bus->sdiodev->state != BRCMF_STATE_DATA) || (err != 0)) {
+       if ((bus->sdiodev->state != BRCMF_SDIOD_DATA) || (err != 0)) {
                brcmf_err("failed backplane access over SDIO, halting operation\n");
                atomic_set(&bus->intstatus, 0);
+               if (bus->ctrl_frame_stat) {
+                       sdio_claim_host(bus->sdiodev->func[1]);
+                       if (bus->ctrl_frame_stat) {
+                               bus->ctrl_frame_err = -ENODEV;
+                               wmb();
+                               bus->ctrl_frame_stat = false;
+                               brcmf_sdio_wait_event_wakeup(bus);
+                       }
+                       sdio_release_host(bus->sdiodev->func[1]);
+               }
        } else if (atomic_read(&bus->intstatus) ||
                   atomic_read(&bus->ipend) > 0 ||
                   (!atomic_read(&bus->fcstate) &&
                    brcmu_pktq_mlen(&bus->txq, ~bus->flowcontrol) &&
                    data_ok(bus))) {
-               atomic_inc(&bus->dpc_tskcnt);
+               bus->dpc_triggered = true;
        }
 }
 
@@ -2862,11 +2850,7 @@ static int brcmf_sdio_bus_txdata(struct device *dev, struct sk_buff *pkt)
                qcount[prec] = pktq_plen(&bus->txq, prec);
 #endif
 
-       if (atomic_read(&bus->dpc_tskcnt) == 0) {
-               atomic_inc(&bus->dpc_tskcnt);
-               queue_work(bus->brcmf_wq, &bus->datawork);
-       }
-
+       brcmf_sdio_trigger_dpc(bus);
        return ret;
 }
 
@@ -2963,23 +2947,27 @@ brcmf_sdio_bus_txctl(struct device *dev, unsigned char *msg, uint msglen)
        /* Send from dpc */
        bus->ctrl_frame_buf = msg;
        bus->ctrl_frame_len = msglen;
+       wmb();
        bus->ctrl_frame_stat = true;
-       if (atomic_read(&bus->dpc_tskcnt) == 0) {
-               atomic_inc(&bus->dpc_tskcnt);
-               queue_work(bus->brcmf_wq, &bus->datawork);
-       }
 
+       brcmf_sdio_trigger_dpc(bus);
        wait_event_interruptible_timeout(bus->ctrl_wait, !bus->ctrl_frame_stat,
                                         msecs_to_jiffies(CTL_DONE_TIMEOUT));
-
-       if (!bus->ctrl_frame_stat) {
+       ret = 0;
+       if (bus->ctrl_frame_stat) {
+               sdio_claim_host(bus->sdiodev->func[1]);
+               if (bus->ctrl_frame_stat) {
+                       brcmf_dbg(SDIO, "ctrl_frame timeout\n");
+                       bus->ctrl_frame_stat = false;
+                       ret = -ETIMEDOUT;
+               }
+               sdio_release_host(bus->sdiodev->func[1]);
+       }
+       if (!ret) {
                brcmf_dbg(SDIO, "ctrl_frame complete, err=%d\n",
                          bus->ctrl_frame_err);
+               rmb();
                ret = bus->ctrl_frame_err;
-       } else {
-               brcmf_dbg(SDIO, "ctrl_frame timeout\n");
-               bus->ctrl_frame_stat = false;
-               ret = -ETIMEDOUT;
        }
 
        if (ret)
@@ -3383,9 +3371,6 @@ static int brcmf_sdio_download_firmware(struct brcmf_sdio *bus,
        sdio_claim_host(bus->sdiodev->func[1]);
        brcmf_sdio_clkctl(bus, CLK_AVAIL, false);
 
-       /* Keep arm in reset */
-       brcmf_chip_enter_download(bus->ci);
-
        rstvec = get_unaligned_le32(fw->data);
        brcmf_dbg(SDIO, "firmware rstvec: %x\n", rstvec);
 
@@ -3405,13 +3390,13 @@ static int brcmf_sdio_download_firmware(struct brcmf_sdio *bus,
        }
 
        /* Take arm out of reset */
-       if (!brcmf_chip_exit_download(bus->ci, rstvec)) {
+       if (!brcmf_chip_set_active(bus->ci, rstvec)) {
                brcmf_err("error getting out of ARM core reset\n");
                goto err;
        }
 
        /* Allow full data communication using DPC from now on. */
-       bus->sdiodev->state = BRCMF_STATE_DATA;
+       brcmf_sdiod_change_state(bus->sdiodev, BRCMF_SDIOD_DATA);
        bcmerror = 0;
 
 err:
@@ -3548,6 +3533,14 @@ done:
        return err;
 }
 
+void brcmf_sdio_trigger_dpc(struct brcmf_sdio *bus)
+{
+       if (!bus->dpc_triggered) {
+               bus->dpc_triggered = true;
+               queue_work(bus->brcmf_wq, &bus->datawork);
+       }
+}
+
 void brcmf_sdio_isr(struct brcmf_sdio *bus)
 {
        brcmf_dbg(TRACE, "Enter\n");
@@ -3557,7 +3550,7 @@ void brcmf_sdio_isr(struct brcmf_sdio *bus)
                return;
        }
 
-       if (bus->sdiodev->state != BRCMF_STATE_DATA) {
+       if (bus->sdiodev->state != BRCMF_SDIOD_DATA) {
                brcmf_err("bus is down. we have nothing to do\n");
                return;
        }
@@ -3574,11 +3567,11 @@ void brcmf_sdio_isr(struct brcmf_sdio *bus)
        if (!bus->intr)
                brcmf_err("isr w/o interrupt configured!\n");
 
-       atomic_inc(&bus->dpc_tskcnt);
+       bus->dpc_triggered = true;
        queue_work(bus->brcmf_wq, &bus->datawork);
 }
 
-static bool brcmf_sdio_bus_watchdog(struct brcmf_sdio *bus)
+static void brcmf_sdio_bus_watchdog(struct brcmf_sdio *bus)
 {
        brcmf_dbg(TIMER, "Enter\n");
 
@@ -3594,7 +3587,7 @@ static bool brcmf_sdio_bus_watchdog(struct brcmf_sdio *bus)
                if (!bus->intr ||
                    (bus->sdcnt.intrcount == bus->sdcnt.lastintrs)) {
 
-                       if (atomic_read(&bus->dpc_tskcnt) == 0) {
+                       if (!bus->dpc_triggered) {
                                u8 devpend;
 
                                sdio_claim_host(bus->sdiodev->func[1]);
@@ -3602,9 +3595,8 @@ static bool brcmf_sdio_bus_watchdog(struct brcmf_sdio *bus)
                                                            SDIO_CCCR_INTx,
                                                            NULL);
                                sdio_release_host(bus->sdiodev->func[1]);
-                               intstatus =
-                                   devpend & (INTR_STATUS_FUNC1 |
-                                              INTR_STATUS_FUNC2);
+                               intstatus = devpend & (INTR_STATUS_FUNC1 |
+                                                      INTR_STATUS_FUNC2);
                        }
 
                        /* If there is something, make like the ISR and
@@ -3613,7 +3605,7 @@ static bool brcmf_sdio_bus_watchdog(struct brcmf_sdio *bus)
                                bus->sdcnt.pollcnt++;
                                atomic_set(&bus->ipend, 1);
 
-                               atomic_inc(&bus->dpc_tskcnt);
+                               bus->dpc_triggered = true;
                                queue_work(bus->brcmf_wq, &bus->datawork);
                        }
                }
@@ -3623,7 +3615,7 @@ static bool brcmf_sdio_bus_watchdog(struct brcmf_sdio *bus)
        }
 #ifdef DEBUG
        /* Poll for console output periodically */
-       if (bus->sdiodev->state == BRCMF_STATE_DATA &&
+       if (bus->sdiodev->state == BRCMF_SDIOD_DATA &&
            bus->console_interval != 0) {
                bus->console.count += BRCMF_WD_POLL_MS;
                if (bus->console.count >= bus->console_interval) {
@@ -3640,22 +3632,25 @@ static bool brcmf_sdio_bus_watchdog(struct brcmf_sdio *bus)
 #endif                         /* DEBUG */
 
        /* On idle timeout clear activity flag and/or turn off clock */
-       if ((bus->idletime > 0) && (bus->clkstate == CLK_AVAIL)) {
-               if (++bus->idlecount >= bus->idletime) {
-                       bus->idlecount = 0;
-                       if (bus->activity) {
-                               bus->activity = false;
-                               brcmf_sdio_wd_timer(bus, BRCMF_WD_POLL_MS);
-                       } else {
+       if (!bus->dpc_triggered) {
+               rmb();
+               if ((!bus->dpc_running) && (bus->idletime > 0) &&
+                   (bus->clkstate == CLK_AVAIL)) {
+                       bus->idlecount++;
+                       if (bus->idlecount > bus->idletime) {
                                brcmf_dbg(SDIO, "idle\n");
                                sdio_claim_host(bus->sdiodev->func[1]);
+                               brcmf_sdio_wd_timer(bus, 0);
+                               bus->idlecount = 0;
                                brcmf_sdio_bus_sleep(bus, true, false);
                                sdio_release_host(bus->sdiodev->func[1]);
                        }
+               } else {
+                       bus->idlecount = 0;
                }
+       } else {
+               bus->idlecount = 0;
        }
-
-       return (atomic_read(&bus->ipend) > 0);
 }
 
 static void brcmf_sdio_dataworker(struct work_struct *work)
@@ -3663,9 +3658,18 @@ static void brcmf_sdio_dataworker(struct work_struct *work)
        struct brcmf_sdio *bus = container_of(work, struct brcmf_sdio,
                                              datawork);
 
-       while (atomic_read(&bus->dpc_tskcnt)) {
-               atomic_set(&bus->dpc_tskcnt, 0);
+       bus->dpc_running = true;
+       wmb();
+       while (ACCESS_ONCE(bus->dpc_triggered)) {
+               bus->dpc_triggered = false;
                brcmf_sdio_dpc(bus);
+               bus->idlecount = 0;
+       }
+       bus->dpc_running = false;
+       if (brcmf_sdiod_freezing(bus->sdiodev)) {
+               brcmf_sdiod_change_state(bus->sdiodev, BRCMF_SDIOD_DOWN);
+               brcmf_sdiod_try_freeze(bus->sdiodev);
+               brcmf_sdiod_change_state(bus->sdiodev, BRCMF_SDIOD_DATA);
        }
 }
 
@@ -3784,8 +3788,8 @@ static int brcmf_sdio_buscoreprep(void *ctx)
        return 0;
 }
 
-static void brcmf_sdio_buscore_exitdl(void *ctx, struct brcmf_chip *chip,
-                                     u32 rstvec)
+static void brcmf_sdio_buscore_activate(void *ctx, struct brcmf_chip *chip,
+                                       u32 rstvec)
 {
        struct brcmf_sdio_dev *sdiodev = ctx;
        struct brcmf_core *core;
@@ -3828,7 +3832,7 @@ static void brcmf_sdio_buscore_write32(void *ctx, u32 addr, u32 val)
 
 static const struct brcmf_buscore_ops brcmf_sdio_buscore_ops = {
        .prepare = brcmf_sdio_buscoreprep,
-       .exit_dl = brcmf_sdio_buscore_exitdl,
+       .activate = brcmf_sdio_buscore_activate,
        .read32 = brcmf_sdio_buscore_read32,
        .write32 = brcmf_sdio_buscore_write32,
 };
@@ -3882,13 +3886,6 @@ brcmf_sdio_probe_attach(struct brcmf_sdio *bus)
                drivestrength = DEFAULT_SDIO_DRIVE_STRENGTH;
        brcmf_sdio_drivestrengthinit(bus->sdiodev, bus->ci, drivestrength);
 
-       /* Get info on the SOCRAM cores... */
-       bus->ramsize = bus->ci->ramsize;
-       if (!(bus->ramsize)) {
-               brcmf_err("failed to find SOCRAM memory!\n");
-               goto fail;
-       }
-
        /* Set card control so an SDIO card reset does a WLAN backplane reset */
        reg_val = brcmf_sdiod_regrb(bus->sdiodev,
                                    SDIO_CCCR_BRCM_CARDCTRL, &err);
@@ -3944,13 +3941,19 @@ static int
 brcmf_sdio_watchdog_thread(void *data)
 {
        struct brcmf_sdio *bus = (struct brcmf_sdio *)data;
+       int wait;
 
        allow_signal(SIGTERM);
        /* Run until signal received */
+       brcmf_sdiod_freezer_count(bus->sdiodev);
        while (1) {
                if (kthread_should_stop())
                        break;
-               if (!wait_for_completion_interruptible(&bus->watchdog_wait)) {
+               brcmf_sdiod_freezer_uncount(bus->sdiodev);
+               wait = wait_for_completion_interruptible(&bus->watchdog_wait);
+               brcmf_sdiod_freezer_count(bus->sdiodev);
+               brcmf_sdiod_try_freeze(bus->sdiodev);
+               if (!wait) {
                        brcmf_sdio_bus_watchdog(bus);
                        /* Count the tick for reference */
                        bus->sdcnt.tickcnt++;
@@ -3971,7 +3974,7 @@ brcmf_sdio_watchdog(unsigned long data)
                /* Reschedule the watchdog */
                if (bus->wd_timer_valid)
                        mod_timer(&bus->timer,
-                                 jiffies + BRCMF_WD_POLL_MS * HZ / 1000);
+                                 jiffies + msecs_to_jiffies(BRCMF_WD_POLL_MS));
        }
 }
 
@@ -4089,6 +4092,7 @@ struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev)
 {
        int ret;
        struct brcmf_sdio *bus;
+       struct workqueue_struct *wq;
 
        brcmf_dbg(TRACE, "Enter\n");
 
@@ -4117,12 +4121,16 @@ struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev)
                        bus->sgentry_align = sdiodev->pdata->sd_sgentry_align;
        }
 
-       INIT_WORK(&bus->datawork, brcmf_sdio_dataworker);
-       bus->brcmf_wq = create_singlethread_workqueue("brcmf_wq");
-       if (bus->brcmf_wq == NULL) {
+       /* single-threaded workqueue */
+       wq = alloc_ordered_workqueue("brcmf_wq/%s", WQ_MEM_RECLAIM,
+                                    dev_name(&sdiodev->func[1]->dev));
+       if (!wq) {
                brcmf_err("insufficient memory to create txworkqueue\n");
                goto fail;
        }
+       brcmf_sdiod_freezer_count(sdiodev);
+       INIT_WORK(&bus->datawork, brcmf_sdio_dataworker);
+       bus->brcmf_wq = wq;
 
        /* attempt to attach to the dongle */
        if (!(brcmf_sdio_probe_attach(bus))) {
@@ -4143,13 +4151,15 @@ struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev)
        /* Initialize watchdog thread */
        init_completion(&bus->watchdog_wait);
        bus->watchdog_tsk = kthread_run(brcmf_sdio_watchdog_thread,
-                                       bus, "brcmf_watchdog");
+                                       bus, "brcmf_wdog/%s",
+                                       dev_name(&sdiodev->func[1]->dev));
        if (IS_ERR(bus->watchdog_tsk)) {
                pr_warn("brcmf_watchdog thread failed to start\n");
                bus->watchdog_tsk = NULL;
        }
        /* Initialize DPC thread */
-       atomic_set(&bus->dpc_tskcnt, 0);
+       bus->dpc_triggered = false;
+       bus->dpc_running = false;
 
        /* Assign bus interface call back */
        bus->sdiodev->bus_if->dev = bus->sdiodev->dev;
@@ -4242,16 +4252,16 @@ void brcmf_sdio_remove(struct brcmf_sdio *bus)
                        destroy_workqueue(bus->brcmf_wq);
 
                if (bus->ci) {
-                       if (bus->sdiodev->state != BRCMF_STATE_NOMEDIUM) {
+                       if (bus->sdiodev->state != BRCMF_SDIOD_NOMEDIUM) {
                                sdio_claim_host(bus->sdiodev->func[1]);
+                               brcmf_sdio_wd_timer(bus, 0);
                                brcmf_sdio_clkctl(bus, CLK_AVAIL, false);
                                /* Leave the device in state where it is
-                                * 'quiet'. This is done by putting it in
-                                * download_state which essentially resets
-                                * all necessary cores.
+                                * 'passive'. This is done by resetting all
+                                * necessary cores.
                                 */
                                msleep(20);
-                               brcmf_chip_enter_download(bus->ci);
+                               brcmf_chip_set_passive(bus->ci);
                                brcmf_sdio_clkctl(bus, CLK_NONE, false);
                                sdio_release_host(bus->sdiodev->func[1]);
                        }
@@ -4277,7 +4287,7 @@ void brcmf_sdio_wd_timer(struct brcmf_sdio *bus, uint wdtick)
        }
 
        /* don't start the wd until fw is loaded */
-       if (bus->sdiodev->state != BRCMF_STATE_DATA)
+       if (bus->sdiodev->state != BRCMF_SDIOD_DATA)
                return;
 
        if (wdtick) {
@@ -4290,16 +4300,28 @@ void brcmf_sdio_wd_timer(struct brcmf_sdio *bus, uint wdtick)
                           dynamically changed or in the first instance
                         */
                        bus->timer.expires =
-                               jiffies + BRCMF_WD_POLL_MS * HZ / 1000;
+                               jiffies + msecs_to_jiffies(BRCMF_WD_POLL_MS);
                        add_timer(&bus->timer);
 
                } else {
                        /* Re arm the timer, at last watchdog period */
                        mod_timer(&bus->timer,
-                               jiffies + BRCMF_WD_POLL_MS * HZ / 1000);
+                               jiffies + msecs_to_jiffies(BRCMF_WD_POLL_MS));
                }
 
                bus->wd_timer_valid = true;
                bus->save_ms = wdtick;
        }
 }
+
+int brcmf_sdio_sleep(struct brcmf_sdio *bus, bool sleep)
+{
+       int ret;
+
+       sdio_claim_host(bus->sdiodev->func[1]);
+       ret = brcmf_sdio_bus_sleep(bus, sleep, false);
+       sdio_release_host(bus->sdiodev->func[1]);
+
+       return ret;
+}
+
index ec2586a8425cf31d4f84bc053afb0e4a2c3ab5ea..7328478b2d7bf5b67c88428e999baf6d69c3f3bd 100644 (file)
 /* watchdog polling interval in ms */
 #define BRCMF_WD_POLL_MS       10
 
-/* The state of the bus */
-enum brcmf_sdio_state {
-       BRCMF_STATE_DOWN,       /* Device available, still initialising */
-       BRCMF_STATE_DATA,       /* Ready for data transfers, DPC enabled */
-       BRCMF_STATE_NOMEDIUM    /* No medium access to dongle possible */
+/**
+ * enum brcmf_sdiod_state - the state of the bus.
+ *
+ * @BRCMF_SDIOD_DOWN: Device can be accessed, no DPC.
+ * @BRCMF_SDIOD_DATA: Ready for data transfers, DPC enabled.
+ * @BRCMF_SDIOD_NOMEDIUM: No medium access to dongle possible.
+ */
+enum brcmf_sdiod_state {
+       BRCMF_SDIOD_DOWN,
+       BRCMF_SDIOD_DATA,
+       BRCMF_SDIOD_NOMEDIUM
 };
 
 struct brcmf_sdreg {
@@ -169,15 +175,13 @@ struct brcmf_sdreg {
 };
 
 struct brcmf_sdio;
+struct brcmf_sdiod_freezer;
 
 struct brcmf_sdio_dev {
        struct sdio_func *func[SDIO_MAX_FUNCS];
        u8 num_funcs;                   /* Supported funcs on client */
        u32 sbwad;                      /* Save backplane window address */
        struct brcmf_sdio *bus;
-       atomic_t suspend;               /* suspend flag */
-       bool sleeping;
-       wait_queue_head_t idle_wait;
        struct device *dev;
        struct brcmf_bus *bus_if;
        struct brcmfmac_sdio_platform_data *pdata;
@@ -194,7 +198,8 @@ struct brcmf_sdio_dev {
        char fw_name[BRCMF_FW_PATH_LEN + BRCMF_FW_NAME_LEN];
        char nvram_name[BRCMF_FW_PATH_LEN + BRCMF_FW_NAME_LEN];
        bool wowl_enabled;
-       enum brcmf_sdio_state state;
+       enum brcmf_sdiod_state state;
+       struct brcmf_sdiod_freezer *freezer;
 };
 
 /* sdio core registers */
@@ -337,6 +342,28 @@ int brcmf_sdiod_ramrw(struct brcmf_sdio_dev *sdiodev, bool write, u32 address,
 
 /* Issue an abort to the specified function */
 int brcmf_sdiod_abort(struct brcmf_sdio_dev *sdiodev, uint fn);
+void brcmf_sdiod_change_state(struct brcmf_sdio_dev *sdiodev,
+                             enum brcmf_sdiod_state state);
+#ifdef CONFIG_PM_SLEEP
+bool brcmf_sdiod_freezing(struct brcmf_sdio_dev *sdiodev);
+void brcmf_sdiod_try_freeze(struct brcmf_sdio_dev *sdiodev);
+void brcmf_sdiod_freezer_count(struct brcmf_sdio_dev *sdiodev);
+void brcmf_sdiod_freezer_uncount(struct brcmf_sdio_dev *sdiodev);
+#else
+static inline bool brcmf_sdiod_freezing(struct brcmf_sdio_dev *sdiodev)
+{
+       return false;
+}
+static inline void brcmf_sdiod_try_freeze(struct brcmf_sdio_dev *sdiodev)
+{
+}
+static inline void brcmf_sdiod_freezer_count(struct brcmf_sdio_dev *sdiodev)
+{
+}
+static inline void brcmf_sdiod_freezer_uncount(struct brcmf_sdio_dev *sdiodev)
+{
+}
+#endif /* CONFIG_PM_SLEEP */
 
 struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev);
 void brcmf_sdio_remove(struct brcmf_sdio *bus);
@@ -344,5 +371,7 @@ void brcmf_sdio_isr(struct brcmf_sdio *bus);
 
 void brcmf_sdio_wd_timer(struct brcmf_sdio *bus, uint wdtick);
 void brcmf_sdio_wowl_config(struct device *dev, bool enabled);
+int brcmf_sdio_sleep(struct brcmf_sdio *bus, bool sleep);
+void brcmf_sdio_trigger_dpc(struct brcmf_sdio *bus);
 
 #endif /* BRCMFMAC_SDIO_H */
index eb8584a9c49a84d28a5ec592ce943a6243e3029f..369527e2768956ee30563d8d330341c802f7bc6d 100644 (file)
@@ -4668,7 +4668,7 @@ static int brcms_b_attach(struct brcms_c_info *wlc, struct bcma_device *core,
        brcms_c_coredisable(wlc_hw);
 
        /* Match driver "down" state */
-       bcma_core_pci_down(wlc_hw->d11core->bus);
+       bcma_host_pci_down(wlc_hw->d11core->bus);
 
        /* turn off pll and xtal to match driver "down" state */
        brcms_b_xtal(wlc_hw, OFF);
@@ -4959,7 +4959,7 @@ static int brcms_b_up_prep(struct brcms_hardware *wlc_hw)
         * Configure pci/pcmcia here instead of in brcms_c_attach()
         * to allow mfg hotswap:  down, hotswap (chip power cycle), up.
         */
-       bcma_core_pci_irq_ctl(&wlc_hw->d11core->bus->drv_pci[0], wlc_hw->d11core,
+       bcma_host_pci_irq_ctl(wlc_hw->d11core->bus, wlc_hw->d11core,
                              true);
 
        /*
@@ -4969,12 +4969,12 @@ static int brcms_b_up_prep(struct brcms_hardware *wlc_hw)
         */
        if (brcms_b_radio_read_hwdisabled(wlc_hw)) {
                /* put SB PCI in down state again */
-               bcma_core_pci_down(wlc_hw->d11core->bus);
+               bcma_host_pci_down(wlc_hw->d11core->bus);
                brcms_b_xtal(wlc_hw, OFF);
                return -ENOMEDIUM;
        }
 
-       bcma_core_pci_up(wlc_hw->d11core->bus);
+       bcma_host_pci_up(wlc_hw->d11core->bus);
 
        /* reset the d11 core */
        brcms_b_corereset(wlc_hw, BRCMS_USE_COREFLAGS);
@@ -5171,7 +5171,7 @@ static int brcms_b_down_finish(struct brcms_hardware *wlc_hw)
 
                /* turn off primary xtal and pll */
                if (!wlc_hw->noreset) {
-                       bcma_core_pci_down(wlc_hw->d11core->bus);
+                       bcma_host_pci_down(wlc_hw->d11core->bus);
                        brcms_b_xtal(wlc_hw, OFF);
                }
        }
index 084f18f4f95039c921b1c82c548c904fcb6ccf67..99dac9b8a082c0bfcccf2769ef30585ea3af4e2d 100644 (file)
@@ -23041,10 +23041,7 @@ static void wlc_phy_rssi_cal_nphy_rev2(struct brcms_phy *pi, u8 rssi_type)
        else if (rssi_ctrl_state[0] == RADIO_2055_WBRSSI_G1_SEL)
                wlc_phy_rssisel_nphy(pi, RADIO_MIMO_CORESEL_CORE1,
                                     NPHY_RSSI_SEL_W1);
-       else if (rssi_ctrl_state[0] == RADIO_2055_WBRSSI_G2_SEL)
-               wlc_phy_rssisel_nphy(pi, RADIO_MIMO_CORESEL_CORE1,
-                                    NPHY_RSSI_SEL_W2);
-       else
+       else /* RADIO_2055_WBRSSI_G2_SEL */
                wlc_phy_rssisel_nphy(pi, RADIO_MIMO_CORESEL_CORE1,
                                     NPHY_RSSI_SEL_W2);
        if (rssi_ctrl_state[1] == RADIO_2055_NBRSSI_SEL)
@@ -23053,13 +23050,9 @@ static void wlc_phy_rssi_cal_nphy_rev2(struct brcms_phy *pi, u8 rssi_type)
        else if (rssi_ctrl_state[1] == RADIO_2055_WBRSSI_G1_SEL)
                wlc_phy_rssisel_nphy(pi, RADIO_MIMO_CORESEL_CORE2,
                                     NPHY_RSSI_SEL_W1);
-       else if (rssi_ctrl_state[1] == RADIO_2055_WBRSSI_G2_SEL)
+       else /* RADIO_2055_WBRSSI_G1_SEL */
                wlc_phy_rssisel_nphy(pi, RADIO_MIMO_CORESEL_CORE2,
                                     NPHY_RSSI_SEL_W2);
-       else
-               wlc_phy_rssisel_nphy(pi, RADIO_MIMO_CORESEL_CORE2,
-                                    NPHY_RSSI_SEL_W2);
-
        wlc_phy_rssisel_nphy(pi, RADIO_MIMO_CORESEL_OFF, rssi_type);
 
        write_phy_reg(pi, 0x91, rfctrlintc_state[0]);
index 2124a17d0bfdda603d34cf03a76cf550d29bf1be..4efdd51af9c8fd72ce99ea00e8bc03f6c86c271a 100644 (file)
@@ -37,6 +37,8 @@
 #define BRCM_CC_43362_CHIP_ID          43362
 #define BRCM_CC_4335_CHIP_ID           0x4335
 #define BRCM_CC_4339_CHIP_ID           0x4339
+#define BRCM_CC_43430_CHIP_ID          43430
+#define BRCM_CC_4345_CHIP_ID           0x4345
 #define BRCM_CC_4354_CHIP_ID           0x4354
 #define BRCM_CC_4356_CHIP_ID           0x4356
 #define BRCM_CC_43566_CHIP_ID          43566
index d242333b7559fca6de21e7f0b458770d149287df..e1fd499930a03a1cb0317c38de42813bea149275 100644 (file)
@@ -183,7 +183,14 @@ struct chipcregs {
        u8 uart1lsr;
        u8 uart1msr;
        u8 uart1scratch;
-       u32 PAD[126];
+       u32 PAD[62];
+
+       /* save/restore, corerev >= 48 */
+       u32 sr_capability;          /* 0x500 */
+       u32 sr_control0;            /* 0x504 */
+       u32 sr_control1;            /* 0x508 */
+       u32 gpio_control;           /* 0x50C */
+       u32 PAD[60];
 
        /* PMU registers (corerev >= 20) */
        u32 pmucontrol; /* 0x600 */
index 964b64ab7fe3a10aed8024f79abe6a23df6afb38..7603546d2de322cf8fb5dd5bf0d5ff3e48d8a6be 100644 (file)
@@ -447,7 +447,7 @@ static int cw1200_spi_disconnect(struct spi_device *func)
 }
 
 #ifdef CONFIG_PM
-static int cw1200_spi_suspend(struct device *dev, pm_message_t state)
+static int cw1200_spi_suspend(struct device *dev)
 {
        struct hwbus_priv *self = spi_get_drvdata(to_spi_device(dev));
 
@@ -458,10 +458,8 @@ static int cw1200_spi_suspend(struct device *dev, pm_message_t state)
        return 0;
 }
 
-static int cw1200_spi_resume(struct device *dev)
-{
-       return 0;
-}
+static SIMPLE_DEV_PM_OPS(cw1200_pm_ops, cw1200_spi_suspend, NULL);
+
 #endif
 
 static struct spi_driver spi_driver = {
@@ -472,8 +470,7 @@ static struct spi_driver spi_driver = {
                .bus            = &spi_bus_type,
                .owner          = THIS_MODULE,
 #ifdef CONFIG_PM
-               .suspend        = cw1200_spi_suspend,
-               .resume         = cw1200_spi_resume,
+               .pm             = &cw1200_pm_ops,
 #endif
        },
 };
index 4a47c7f8a246dac5c2fd3770163341adbeeec73b..b0f65fa094287868f21802c89f395d99996a4f11 100644 (file)
@@ -293,7 +293,7 @@ void cw1200_remove_interface(struct ieee80211_hw *dev,
        }
        priv->vif = NULL;
        priv->mode = NL80211_IFTYPE_MONITOR;
-       memset(priv->mac_addr, 0, ETH_ALEN);
+       eth_zero_addr(priv->mac_addr);
        memset(&priv->p2p_ps_modeinfo, 0, sizeof(priv->p2p_ps_modeinfo));
        cw1200_free_keys(priv);
        cw1200_setup_mac(priv);
@@ -1240,8 +1240,8 @@ static void cw1200_do_join(struct cw1200_common *priv)
 
        bssid = priv->vif->bss_conf.bssid;
 
-       bss = cfg80211_get_bss(priv->hw->wiphy, priv->channel,
-                       bssid, NULL, 0, 0, 0);
+       bss = cfg80211_get_bss(priv->hw->wiphy, priv->channel, bssid, NULL, 0,
+                              IEEE80211_BSS_TYPE_ANY, IEEE80211_PRIVACY_ANY);
 
        if (!bss && !conf->ibss_joined) {
                wsm_unlock_tx(priv);
index 0bd541175ecda7d9277e4abd33b6b6b4148bdd8c..d28bd49cb5fd16132623630542788b448b2e8542 100644 (file)
@@ -1429,7 +1429,7 @@ void cw1200_link_id_gc_work(struct work_struct *work)
                                priv->link_id_map &= ~mask;
                                priv->sta_asleep_mask &= ~mask;
                                priv->pspoll_mask &= ~mask;
-                               memset(map_link.mac_addr, 0, ETH_ALEN);
+                               eth_zero_addr(map_link.mac_addr);
                                spin_unlock_bh(&priv->ps_state_lock);
                                reset.link_id = i + 1;
                                wsm_reset(priv, &reset);
index 8bde776894695effab540ae5ee7e0f2e3abee691..055e11d353caf4c679688f45c35e8f02de4bce42 100644 (file)
@@ -174,8 +174,8 @@ netdev_tx_t hostap_data_start_xmit(struct sk_buff *skb,
                /* send broadcast and multicast frames to broadcast RA, if
                 * configured; otherwise, use unicast RA of the WDS link */
                if ((local->wds_type & HOSTAP_WDS_BROADCAST_RA) &&
-                   skb->data[0] & 0x01)
-                       memset(&hdr.addr1, 0xff, ETH_ALEN);
+                   is_multicast_ether_addr(skb->data))
+                       eth_broadcast_addr(hdr.addr1);
                else if (iface->type == HOSTAP_INTERFACE_WDS)
                        memcpy(&hdr.addr1, iface->u.wds.remote_addr,
                               ETH_ALEN);
index fd8d83dd4f62ab09b22126c4b330af1ef9b571d9..c995ace153ee6ecd53dff68145543889f6bbb424 100644 (file)
@@ -309,7 +309,7 @@ void hostap_deauth_all_stas(struct net_device *dev, struct ap_data *ap,
        int i;
 
        PDEBUG(DEBUG_AP, "%s: Deauthenticate all stations\n", dev->name);
-       memset(addr, 0xff, ETH_ALEN);
+       eth_broadcast_addr(addr);
 
        resp = cpu_to_le16(WLAN_REASON_PREV_AUTH_NOT_VALID);
 
@@ -1015,8 +1015,8 @@ static void prism2_send_mgmt(struct net_device *dev,
                memcpy(hdr->addr3, dev->dev_addr, ETH_ALEN); /* SA */
        } else if (ieee80211_is_ctl(hdr->frame_control)) {
                /* control:ACK does not have addr2 or addr3 */
-               memset(hdr->addr2, 0, ETH_ALEN);
-               memset(hdr->addr3, 0, ETH_ALEN);
+               eth_zero_addr(hdr->addr2);
+               eth_zero_addr(hdr->addr3);
        } else {
                memcpy(hdr->addr2, dev->dev_addr, ETH_ALEN); /* SA */
                memcpy(hdr->addr3, dev->dev_addr, ETH_ALEN); /* BSSID */
@@ -1601,7 +1601,7 @@ static void handle_assoc(local_info_t *local, struct sk_buff *skb,
                memcpy(prev_ap, pos, ETH_ALEN);
                pos++; pos++; pos++; left -= 6;
        } else
-               memset(prev_ap, 0, ETH_ALEN);
+               eth_zero_addr(prev_ap);
 
        if (left >= 2) {
                unsigned int ileft;
index de7c4ffec3096b07ccaece961666b0bdde51ea27..7635ac4f6679625962d3003b6996c93c481b070f 100644 (file)
@@ -442,7 +442,7 @@ static void handle_info_queue_linkstatus(local_info_t *local)
        } else {
                netif_carrier_off(local->dev);
                netif_carrier_off(local->ddev);
-               memset(wrqu.ap_addr.sa_data, 0, ETH_ALEN);
+               eth_zero_addr(wrqu.ap_addr.sa_data);
        }
        wrqu.ap_addr.sa_family = ARPHRD_ETHER;
 
index 52919ad4272622aeb92d8b9d3d74daf1715a3d40..01de1a3bf94ef0965d03dfd9c6a23d5fd2f744b6 100644 (file)
@@ -224,7 +224,7 @@ int prism2_wds_del(local_info_t *local, u8 *remote_addr,
 
        if (selected) {
                if (do_not_remove)
-                       memset(selected->u.wds.remote_addr, 0, ETH_ALEN);
+                       eth_zero_addr(selected->u.wds.remote_addr);
                else {
                        hostap_remove_interface(selected->dev, rtnl_locked, 0);
                        local->wds_connections--;
@@ -798,7 +798,6 @@ static void prism2_tx_timeout(struct net_device *dev)
 
 const struct header_ops hostap_80211_ops = {
        .create         = eth_header,
-       .rebuild        = eth_rebuild_header,
        .cache          = eth_header_cache,
        .cache_update   = eth_header_cache_update,
        .parse          = hostap_80211_header_parse,
@@ -1088,7 +1087,7 @@ int prism2_sta_deauth(local_info_t *local, u16 reason)
 
        ret = prism2_sta_send_mgmt(local, local->bssid, IEEE80211_STYPE_DEAUTH,
                                   (u8 *) &val, 2);
-       memset(wrqu.ap_addr.sa_data, 0, ETH_ALEN);
+       eth_zero_addr(wrqu.ap_addr.sa_data);
        wireless_send_event(local->dev, SIOCGIWAP, &wrqu, NULL);
        return ret;
 }
index 57904015380f05f28f5b555a8322caef59ab699d..ca25283e1c9201b566b09fcbe02a8aaa6c9c1811 100644 (file)
@@ -4,6 +4,7 @@
 #include <linux/interrupt.h>
 #include <linux/wireless.h>
 #include <linux/netdevice.h>
+#include <linux/etherdevice.h>
 #include <linux/mutex.h>
 #include <net/iw_handler.h>
 #include <net/ieee80211_radiotap.h>
@@ -85,16 +86,16 @@ struct hfa384x_rx_frame {
        /* 802.11 */
        __le16 frame_control;
        __le16 duration_id;
-       u8 addr1[6];
-       u8 addr2[6];
-       u8 addr3[6];
+       u8 addr1[ETH_ALEN];
+       u8 addr2[ETH_ALEN];
+       u8 addr3[ETH_ALEN];
        __le16 seq_ctrl;
-       u8 addr4[6];
+       u8 addr4[ETH_ALEN];
        __le16 data_len;
 
        /* 802.3 */
-       u8 dst_addr[6];
-       u8 src_addr[6];
+       u8 dst_addr[ETH_ALEN];
+       u8 src_addr[ETH_ALEN];
        __be16 len;
 
        /* followed by frame data; max 2304 bytes */
@@ -114,16 +115,16 @@ struct hfa384x_tx_frame {
        /* 802.11 */
        __le16 frame_control; /* parts not used */
        __le16 duration_id;
-       u8 addr1[6];
-       u8 addr2[6]; /* filled by firmware */
-       u8 addr3[6];
+       u8 addr1[ETH_ALEN];
+       u8 addr2[ETH_ALEN]; /* filled by firmware */
+       u8 addr3[ETH_ALEN];
        __le16 seq_ctrl; /* filled by firmware */
-       u8 addr4[6];
+       u8 addr4[ETH_ALEN];
        __le16 data_len;
 
        /* 802.3 */
-       u8 dst_addr[6];
-       u8 src_addr[6];
+       u8 dst_addr[ETH_ALEN];
+       u8 src_addr[ETH_ALEN];
        __be16 len;
 
        /* followed by frame data; max 2304 bytes */
@@ -156,7 +157,7 @@ struct hfa384x_hostscan_request {
 } __packed;
 
 struct hfa384x_join_request {
-       u8 bssid[6];
+       u8 bssid[ETH_ALEN];
        __le16 channel;
 } __packed;
 
@@ -228,7 +229,7 @@ struct hfa384x_scan_result {
        __le16 chid;
        __le16 anl;
        __le16 sl;
-       u8 bssid[6];
+       u8 bssid[ETH_ALEN];
        __le16 beacon_interval;
        __le16 capability;
        __le16 ssid_len;
@@ -241,7 +242,7 @@ struct hfa384x_hostscan_result {
        __le16 chid;
        __le16 anl;
        __le16 sl;
-       u8 bssid[6];
+       u8 bssid[ETH_ALEN];
        __le16 beacon_interval;
        __le16 capability;
        __le16 ssid_len;
@@ -824,7 +825,7 @@ struct local_info {
 #define PRISM2_INFO_PENDING_SCANRESULTS 1
        int prev_link_status; /* previous received LinkStatus info */
        int prev_linkstatus_connected;
-       u8 preferred_ap[6]; /* use this AP if possible */
+       u8 preferred_ap[ETH_ALEN]; /* use this AP if possible */
 
 #ifdef PRISM2_CALLBACK
        void *callback_data; /* Can be used in callbacks; e.g., allocate
index 21de4fe6cf2d0ff87f46ef0b280d2a53f0999dc3..d6ec44d7a39129009693e9497e2c3e4f9c88fb4e 100644 (file)
@@ -66,7 +66,7 @@ config IPW2100_DEBUG
 config IPW2200
        tristate "Intel PRO/Wireless 2200BG and 2915ABG Network Connection"
        depends on PCI && CFG80211
-       select CFG80211_WEXT
+       select CFG80211_WEXT_EXPORT
        select WIRELESS_EXT
        select WEXT_SPY
        select WEXT_PRIV
index 6fabea0309dd9a208d82be0451e07d24af45ab16..08eb229e7816010f11e702d679cb178b213362d2 100644 (file)
@@ -2147,8 +2147,8 @@ static void isr_indicate_association_lost(struct ipw2100_priv *priv, u32 status)
                return;
        }
 
-       memset(priv->bssid, 0, ETH_ALEN);
-       memset(priv->ieee->bssid, 0, ETH_ALEN);
+       eth_zero_addr(priv->bssid);
+       eth_zero_addr(priv->ieee->bssid);
 
        netif_carrier_off(priv->net_dev);
        netif_stop_queue(priv->net_dev);
@@ -6956,7 +6956,7 @@ static int ipw2100_wx_get_wap(struct net_device *dev,
                wrqu->ap_addr.sa_family = ARPHRD_ETHER;
                memcpy(wrqu->ap_addr.sa_data, priv->bssid, ETH_ALEN);
        } else
-               memset(wrqu->ap_addr.sa_data, 0, ETH_ALEN);
+               eth_zero_addr(wrqu->ap_addr.sa_data);
 
        IPW_DEBUG_WX("Getting WAP BSSID: %pM\n", wrqu->ap_addr.sa_data);
        return 0;
@@ -8300,7 +8300,7 @@ static void ipw2100_wx_event_work(struct work_struct *work)
            priv->status & STATUS_RF_KILL_MASK ||
            ipw2100_get_ordinal(priv, IPW_ORD_STAT_ASSN_AP_BSSID,
                                &priv->bssid, &len)) {
-               memset(wrqu.ap_addr.sa_data, 0, ETH_ALEN);
+               eth_zero_addr(wrqu.ap_addr.sa_data);
        } else {
                /* We now have the BSSID, so can finish setting to the full
                 * associated state */
index 67cad9b05ad821fc720da095aced2a9fe72202b8..39f3e6f5cbcd230a49145d0bf0f589cc85d4abb7 100644 (file)
@@ -1964,7 +1964,7 @@ static void notify_wx_assoc_event(struct ipw_priv *priv)
        if (priv->status & STATUS_ASSOCIATED)
                memcpy(wrqu.ap_addr.sa_data, priv->bssid, ETH_ALEN);
        else
-               memset(wrqu.ap_addr.sa_data, 0, ETH_ALEN);
+               eth_zero_addr(wrqu.ap_addr.sa_data);
        wireless_send_event(priv->net_dev, SIOCGIWAP, &wrqu, NULL);
 }
 
@@ -7400,7 +7400,7 @@ static int ipw_associate_network(struct ipw_priv *priv,
        memcpy(priv->assoc_request.bssid, network->bssid, ETH_ALEN);
 
        if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
-               memset(&priv->assoc_request.dest, 0xFF, ETH_ALEN);
+               eth_broadcast_addr(priv->assoc_request.dest);
                priv->assoc_request.atim_window = cpu_to_le16(network->atim_window);
        } else {
                memcpy(priv->assoc_request.dest, network->bssid, ETH_ALEN);
@@ -8986,7 +8986,7 @@ static int ipw_wx_get_wap(struct net_device *dev,
                wrqu->ap_addr.sa_family = ARPHRD_ETHER;
                memcpy(wrqu->ap_addr.sa_data, priv->bssid, ETH_ALEN);
        } else
-               memset(wrqu->ap_addr.sa_data, 0, ETH_ALEN);
+               eth_zero_addr(wrqu->ap_addr.sa_data);
 
        IPW_DEBUG_WX("Getting WAP BSSID: %pM\n",
                     wrqu->ap_addr.sa_data);
index eaaeea19d8c5bcc99d887ee7b3fd9c592b897045..bac60b2bc3f014a53668841e934f5062207f4080 100644 (file)
@@ -1678,7 +1678,7 @@ il4965_rs_stay_in_table(struct il_lq_sta *lq_sta, bool force_search)
                    lq_sta->total_success > lq_sta->max_success_limit ||
                    (!lq_sta->search_better_tbl && lq_sta->flush_timer &&
                     flush_interval_passed)) {
-                       D_RATE("LQ: stay is expired %d %d %d\n:",
+                       D_RATE("LQ: stay is expired %d %d %d\n",
                               lq_sta->total_failed, lq_sta->total_success,
                               flush_interval_passed);
 
index 2c4fa49686ef1fdfad904b6450efff7151b18f18..887114582583b2e477cdad704b99f3a6e64bc742 100644 (file)
@@ -4634,7 +4634,7 @@ il_mac_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
        il->vif = NULL;
        il->iw_mode = NL80211_IFTYPE_UNSPECIFIED;
        il_teardown_interface(il, vif);
-       memset(il->bssid, 0, ETH_ALEN);
+       eth_zero_addr(il->bssid);
 
        D_MAC80211("leave\n");
        mutex_unlock(&il->mutex);
index cceb026e0793b45fa418e7c023855d5c0bd23a02..5abd62ed8cb47da0c9dfe21de3592dc2482f6926 100644 (file)
@@ -1130,20 +1130,23 @@ done:
        IWL_DEBUG_MAC80211(priv, "leave\n");
 }
 
-static void iwlagn_mac_rssi_callback(struct ieee80211_hw *hw,
-                                    struct ieee80211_vif *vif,
-                                    enum ieee80211_rssi_event rssi_event)
+static void iwlagn_mac_event_callback(struct ieee80211_hw *hw,
+                                     struct ieee80211_vif *vif,
+                                     const struct ieee80211_event *event)
 {
        struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
 
+       if (event->type != RSSI_EVENT)
+               return;
+
        IWL_DEBUG_MAC80211(priv, "enter\n");
        mutex_lock(&priv->mutex);
 
        if (priv->lib->bt_params &&
            priv->lib->bt_params->advanced_bt_coexist) {
-               if (rssi_event == RSSI_EVENT_LOW)
+               if (event->u.rssi.data == RSSI_EVENT_LOW)
                        priv->bt_enable_pspoll = true;
-               else if (rssi_event == RSSI_EVENT_HIGH)
+               else if (event->u.rssi.data == RSSI_EVENT_HIGH)
                        priv->bt_enable_pspoll = false;
 
                iwlagn_send_advance_bt_config(priv);
@@ -1614,7 +1617,7 @@ const struct ieee80211_ops iwlagn_hw_ops = {
        .channel_switch = iwlagn_mac_channel_switch,
        .flush = iwlagn_mac_flush,
        .tx_last_beacon = iwlagn_mac_tx_last_beacon,
-       .rssi_callback = iwlagn_mac_rssi_callback,
+       .event_callback = iwlagn_mac_event_callback,
        .set_tim = iwlagn_mac_set_tim,
 };
 
index c4d6dd7402d9066dd1fb171103f8cc22dc438eae..234e30f498b2dde18f3f355d5cfdcf13db6dc185 100644 (file)
@@ -1549,7 +1549,7 @@ static void iwl_dump_nic_error_log(struct iwl_priv *priv)
                                      table.blink1, table.blink2, table.ilink1,
                                      table.ilink2, table.bcon_time, table.gp1,
                                      table.gp2, table.gp3, table.ucode_ver,
-                                     table.hw_ver, table.brd_ver);
+                                     table.hw_ver, 0, table.brd_ver);
        IWL_ERR(priv, "0x%08X | %-28s\n", table.error_id,
                desc_lookup(table.error_id));
        IWL_ERR(priv, "0x%08X | uPc\n", table.pc);
index 32b78a66536db90bbb1e67f10e210df49f6b2d03..3bd7c86e90d9fca5c43c6a95ac7795ec74ddeaf7 100644 (file)
@@ -3153,12 +3153,13 @@ static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file,
        desc += sprintf(buff+desc, "lq type %s\n",
           (is_legacy(tbl->lq_type)) ? "legacy" : "HT");
        if (is_Ht(tbl->lq_type)) {
-               desc += sprintf(buff+desc, " %s",
+               desc += sprintf(buff + desc, " %s",
                   (is_siso(tbl->lq_type)) ? "SISO" :
                   ((is_mimo2(tbl->lq_type)) ? "MIMO2" : "MIMO3"));
-                  desc += sprintf(buff+desc, " %s",
+               desc += sprintf(buff + desc, " %s",
                   (tbl->is_ht40) ? "40MHz" : "20MHz");
-                  desc += sprintf(buff+desc, " %s %s %s\n", (tbl->is_SGI) ? "SGI" : "",
+               desc += sprintf(buff + desc, " %s %s %s\n",
+                  (tbl->is_SGI) ? "SGI" : "",
                   (lq_sta->is_green) ? "GF enabled" : "",
                   (lq_sta->is_agg) ? "AGG on" : "");
        }
index 1e40a12de077237add85776ec6ad3c6fddf30857..275df12a6045044cdee1ddeba1c8840caf491923 100644 (file)
@@ -189,9 +189,9 @@ static void iwlagn_tx_cmd_build_rate(struct iwl_priv *priv,
                rate_flags |= RATE_MCS_CCK_MSK;
 
        /* Set up antennas */
-        if (priv->lib->bt_params &&
-            priv->lib->bt_params->advanced_bt_coexist &&
-            priv->bt_full_concurrent) {
+       if (priv->lib->bt_params &&
+           priv->lib->bt_params->advanced_bt_coexist &&
+           priv->bt_full_concurrent) {
                /* operated as 1x1 in full concurrency mode */
                priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant,
                                first_antenna(priv->nvm_data->valid_tx_ant));
index 97e38d2e2983b757717be0dcac1aa3b5be262336..36e786f0387bd42593fe3c8ec523831694483bea 100644 (file)
 #include "iwl-agn-hw.h"
 
 /* Highest firmware API version supported */
-#define IWL7260_UCODE_API_MAX  12
-#define IWL3160_UCODE_API_MAX  12
+#define IWL7260_UCODE_API_MAX  13
+#define IWL3160_UCODE_API_MAX  13
 
 /* Oldest version we won't warn about */
-#define IWL7260_UCODE_API_OK   10
-#define IWL3160_UCODE_API_OK   10
+#define IWL7260_UCODE_API_OK   12
+#define IWL3160_UCODE_API_OK   12
 
 /* Lowest firmware API version supported */
-#define IWL7260_UCODE_API_MIN  9
-#define IWL3160_UCODE_API_MIN  9
+#define IWL7260_UCODE_API_MIN  10
+#define IWL3160_UCODE_API_MIN  10
 
 /* NVM versions */
 #define IWL7260_NVM_VERSION            0x0a1d
index 2f7fe8167dc963259dfaea5469de47dc2bbad6f3..9c396a42aec8ba708fcab1e81b69567c23f055ba 100644 (file)
 #include "iwl-agn-hw.h"
 
 /* Highest firmware API version supported */
-#define IWL8000_UCODE_API_MAX  12
+#define IWL8000_UCODE_API_MAX  13
 
 /* Oldest version we won't warn about */
-#define IWL8000_UCODE_API_OK   10
+#define IWL8000_UCODE_API_OK   12
 
 /* Lowest firmware API version supported */
-#define IWL8000_UCODE_API_MIN  9
+#define IWL8000_UCODE_API_MIN  10
 
 /* NVM versions */
 #define IWL8000_NVM_VERSION            0x0a1d
index 6842545535582246cf5369327c7748344ce98a97..9bb36d79c2bd3b6f32e7370db651533eb3bdad5c 100644 (file)
@@ -157,6 +157,7 @@ do {                                                                \
 /* 0x0000F000 - 0x00001000 */
 #define IWL_DL_ASSOC           0x00001000
 #define IWL_DL_DROP            0x00002000
+#define IWL_DL_LAR             0x00004000
 #define IWL_DL_COEX            0x00008000
 /* 0x000F0000 - 0x00010000 */
 #define IWL_DL_FW              0x00010000
@@ -219,5 +220,6 @@ do {                                                                \
 #define IWL_DEBUG_POWER(p, f, a...)    IWL_DEBUG(p, IWL_DL_POWER, f, ## a)
 #define IWL_DEBUG_11H(p, f, a...)      IWL_DEBUG(p, IWL_DL_11H, f, ## a)
 #define IWL_DEBUG_RPM(p, f, a...)      IWL_DEBUG(p, IWL_DL_RPM, f, ## a)
+#define IWL_DEBUG_LAR(p, f, a...)      IWL_DEBUG(p, IWL_DL_LAR, f, ## a)
 
 #endif
index 78bd41bf34b0f04dac4d056d470ac30a80b212c8..53555a0fce56093071c1bbfb40d8cb5f5d300dd8 100644 (file)
@@ -431,11 +431,11 @@ TRACE_EVENT(iwlwifi_dev_ucode_error,
        TP_PROTO(const struct device *dev, u32 desc, u32 tsf_low,
                 u32 data1, u32 data2, u32 line, u32 blink1,
                 u32 blink2, u32 ilink1, u32 ilink2, u32 bcon_time,
-                u32 gp1, u32 gp2, u32 gp3, u32 ucode_ver, u32 hw_ver,
+                u32 gp1, u32 gp2, u32 gp3, u32 major, u32 minor, u32 hw_ver,
                 u32 brd_ver),
        TP_ARGS(dev, desc, tsf_low, data1, data2, line,
                blink1, blink2, ilink1, ilink2, bcon_time, gp1, gp2,
-               gp3, ucode_ver, hw_ver, brd_ver),
+               gp3, major, minor, hw_ver, brd_ver),
        TP_STRUCT__entry(
                DEV_ENTRY
                __field(u32, desc)
@@ -451,7 +451,8 @@ TRACE_EVENT(iwlwifi_dev_ucode_error,
                __field(u32, gp1)
                __field(u32, gp2)
                __field(u32, gp3)
-               __field(u32, ucode_ver)
+               __field(u32, major)
+               __field(u32, minor)
                __field(u32, hw_ver)
                __field(u32, brd_ver)
        ),
@@ -470,21 +471,22 @@ TRACE_EVENT(iwlwifi_dev_ucode_error,
                __entry->gp1 = gp1;
                __entry->gp2 = gp2;
                __entry->gp3 = gp3;
-               __entry->ucode_ver = ucode_ver;
+               __entry->major = major;
+               __entry->minor = minor;
                __entry->hw_ver = hw_ver;
                __entry->brd_ver = brd_ver;
        ),
        TP_printk("[%s] #%02d %010u data 0x%08X 0x%08X line %u, "
                  "blink 0x%05X 0x%05X ilink 0x%05X 0x%05X "
-                 "bcon_tm %010u gp 0x%08X 0x%08X 0x%08X uCode 0x%08X "
-                 "hw 0x%08X brd 0x%08X",
+                 "bcon_tm %010u gp 0x%08X 0x%08X 0x%08X major 0x%08X "
+                 "minor 0x%08X hw 0x%08X brd 0x%08X",
                  __get_str(dev), __entry->desc, __entry->tsf_low,
                  __entry->data1,
                  __entry->data2, __entry->line, __entry->blink1,
                  __entry->blink2, __entry->ilink1, __entry->ilink2,
                  __entry->bcon_time, __entry->gp1, __entry->gp2,
-                 __entry->gp3, __entry->ucode_ver, __entry->hw_ver,
-                 __entry->brd_ver)
+                 __entry->gp3, __entry->major, __entry->minor,
+                 __entry->hw_ver, __entry->brd_ver)
 );
 
 TRACE_EVENT(iwlwifi_dev_ucode_event,
index c7154ac42c8c366d093cfb462a46d9ac98c01e02..aefdd9b7c1051f70abcf85597cf6966606f99b7f 100644 (file)
@@ -175,6 +175,8 @@ static void iwl_dealloc_ucode(struct iwl_drv *drv)
        kfree(drv->fw.dbg_dest_tlv);
        for (i = 0; i < ARRAY_SIZE(drv->fw.dbg_conf_tlv); i++)
                kfree(drv->fw.dbg_conf_tlv[i]);
+       for (i = 0; i < ARRAY_SIZE(drv->fw.dbg_trigger_tlv); i++)
+               kfree(drv->fw.dbg_trigger_tlv[i]);
 
        for (i = 0; i < IWL_UCODE_TYPE_MAX; i++)
                iwl_free_fw_img(drv, drv->fw.img + i);
@@ -293,8 +295,10 @@ struct iwl_firmware_pieces {
 
        /* FW debug data parsed for driver usage */
        struct iwl_fw_dbg_dest_tlv *dbg_dest_tlv;
-       struct iwl_fw_dbg_conf_tlv *dbg_conf_tlv[FW_DBG_MAX];
-       size_t dbg_conf_tlv_len[FW_DBG_MAX];
+       struct iwl_fw_dbg_conf_tlv *dbg_conf_tlv[FW_DBG_CONF_MAX];
+       size_t dbg_conf_tlv_len[FW_DBG_CONF_MAX];
+       struct iwl_fw_dbg_trigger_tlv *dbg_trigger_tlv[FW_DBG_TRIGGER_MAX];
+       size_t dbg_trigger_tlv_len[FW_DBG_TRIGGER_MAX];
 };
 
 /*
@@ -842,6 +846,23 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
                        capa->n_scan_channels =
                                le32_to_cpup((__le32 *)tlv_data);
                        break;
+               case IWL_UCODE_TLV_FW_VERSION: {
+                       __le32 *ptr = (void *)tlv_data;
+                       u32 major, minor;
+                       u8 local_comp;
+
+                       if (tlv_len != sizeof(u32) * 3)
+                               goto invalid_tlv_len;
+
+                       major = le32_to_cpup(ptr++);
+                       minor = le32_to_cpup(ptr++);
+                       local_comp = le32_to_cpup(ptr);
+
+                       snprintf(drv->fw.fw_version,
+                                sizeof(drv->fw.fw_version), "%u.%u.%u",
+                                major, minor, local_comp);
+                       break;
+                       }
                case IWL_UCODE_TLV_FW_DBG_DEST: {
                        struct iwl_fw_dbg_dest_tlv *dest = (void *)tlv_data;
 
@@ -897,6 +918,31 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
                        pieces->dbg_conf_tlv_len[conf->id] = tlv_len;
                        break;
                        }
+               case IWL_UCODE_TLV_FW_DBG_TRIGGER: {
+                       struct iwl_fw_dbg_trigger_tlv *trigger =
+                               (void *)tlv_data;
+                       u32 trigger_id = le32_to_cpu(trigger->id);
+
+                       if (trigger_id >= ARRAY_SIZE(drv->fw.dbg_trigger_tlv)) {
+                               IWL_ERR(drv,
+                                       "Skip unknown trigger: %u\n",
+                                       trigger->id);
+                               break;
+                       }
+
+                       if (pieces->dbg_trigger_tlv[trigger_id]) {
+                               IWL_ERR(drv,
+                                       "Ignore duplicate dbg trigger %u\n",
+                                       trigger->id);
+                               break;
+                       }
+
+                       IWL_INFO(drv, "Found debug trigger: %u\n", trigger->id);
+
+                       pieces->dbg_trigger_tlv[trigger_id] = trigger;
+                       pieces->dbg_trigger_tlv_len[trigger_id] = tlv_len;
+                       break;
+                       }
                case IWL_UCODE_TLV_SEC_RT_USNIFFER:
                        usniffer_images = true;
                        iwl_store_ucode_sec(pieces, tlv_data,
@@ -968,34 +1014,34 @@ static int validate_sec_sizes(struct iwl_drv *drv,
 
        /* Verify that uCode images will fit in card's SRAM. */
        if (get_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_INST) >
-                                                       cfg->max_inst_size) {
+           cfg->max_inst_size) {
                IWL_ERR(drv, "uCode instr len %Zd too large to fit in\n",
                        get_sec_size(pieces, IWL_UCODE_REGULAR,
-                                               IWL_UCODE_SECTION_INST));
+                                    IWL_UCODE_SECTION_INST));
                return -1;
        }
 
        if (get_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_DATA) >
-                                                       cfg->max_data_size) {
+           cfg->max_data_size) {
                IWL_ERR(drv, "uCode data len %Zd too large to fit in\n",
                        get_sec_size(pieces, IWL_UCODE_REGULAR,
-                                               IWL_UCODE_SECTION_DATA));
+                                    IWL_UCODE_SECTION_DATA));
                return -1;
        }
 
-        if (get_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_INST) >
-                                                       cfg->max_inst_size) {
+       if (get_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_INST) >
+            cfg->max_inst_size) {
                IWL_ERR(drv, "uCode init instr len %Zd too large to fit in\n",
                        get_sec_size(pieces, IWL_UCODE_INIT,
-                                               IWL_UCODE_SECTION_INST));
+                                    IWL_UCODE_SECTION_INST));
                return -1;
        }
 
        if (get_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_DATA) >
-                                                       cfg->max_data_size) {
+           cfg->max_data_size) {
                IWL_ERR(drv, "uCode init data len %Zd too large to fit in\n",
                        get_sec_size(pieces, IWL_UCODE_REGULAR,
-                                               IWL_UCODE_SECTION_DATA));
+                                    IWL_UCODE_SECTION_DATA));
                return -1;
        }
        return 0;
@@ -1107,7 +1153,10 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
        if (err)
                goto try_again;
 
-       api_ver = IWL_UCODE_API(drv->fw.ucode_ver);
+       if (drv->fw.ucode_capa.api[0] & IWL_UCODE_TLV_API_NEW_VERSION)
+               api_ver = drv->fw.ucode_ver;
+       else
+               api_ver = IWL_UCODE_API(drv->fw.ucode_ver);
 
        /*
         * api_ver should match the api version forming part of the
@@ -1178,6 +1227,19 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
                }
        }
 
+       for (i = 0; i < ARRAY_SIZE(drv->fw.dbg_trigger_tlv); i++) {
+               if (pieces->dbg_trigger_tlv[i]) {
+                       drv->fw.dbg_trigger_tlv_len[i] =
+                               pieces->dbg_trigger_tlv_len[i];
+                       drv->fw.dbg_trigger_tlv[i] =
+                               kmemdup(pieces->dbg_trigger_tlv[i],
+                                       drv->fw.dbg_trigger_tlv_len[i],
+                                       GFP_KERNEL);
+                       if (!drv->fw.dbg_trigger_tlv[i])
+                               goto out_free_fw;
+               }
+       }
+
        /* Now that we can no longer fail, copy information */
 
        /*
@@ -1485,6 +1547,10 @@ module_param_named(d0i3_disable, iwlwifi_mod_params.d0i3_disable,
                   bool, S_IRUGO);
 MODULE_PARM_DESC(d0i3_disable, "disable d0i3 functionality (default: Y)");
 
+module_param_named(lar_disable, iwlwifi_mod_params.lar_disable,
+                  bool, S_IRUGO);
+MODULE_PARM_DESC(lar_disable, "disable LAR functionality (default: N)");
+
 module_param_named(uapsd_disable, iwlwifi_mod_params.uapsd_disable,
                   bool, S_IRUGO | S_IWUSR);
 #ifdef CONFIG_IWLWIFI_UAPSD
index adf522c756e61dbbe146fe463dc39241edb3762a..67a3a241b331cd08180bbfe888d7707ad7dc149b 100644 (file)
@@ -68,7 +68,7 @@
 
 /* for all modules */
 #define DRV_NAME        "iwlwifi"
-#define DRV_COPYRIGHT  "Copyright(c) 2003- 2014 Intel Corporation"
+#define DRV_COPYRIGHT  "Copyright(c) 2003- 2015 Intel Corporation"
 #define DRV_AUTHOR     "<ilw@linux.intel.com>"
 
 /* radio config bits (actual values from NVM definition) */
index f0548b8a64b072a1a58b80e6d6d5cc7b9d00df59..5234a0bf11e4e3286b740c22518f4a039e224e94 100644 (file)
@@ -94,6 +94,7 @@ struct iwl_nvm_data {
        u32 nvm_version;
        s8 max_tx_pwr_half_dbm;
 
+       bool lar_enabled;
        struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS];
        struct ieee80211_channel channels[];
 };
index 919a2548a92c5dca064258c437cdb8e59ade42db..37b38a585dd182f8fa1c44256f4a16ecc008d3df 100644 (file)
@@ -82,6 +82,8 @@
  *     sections like this in a single file.
  * @IWL_FW_ERROR_DUMP_FH_REGS: range of FH registers
  * @IWL_FW_ERROR_DUMP_MEM: chunk of memory
+ * @IWL_FW_ERROR_DUMP_ERROR_INFO: description of what triggered this dump.
+ *     Structured as &struct iwl_fw_error_dump_trigger_desc.
  */
 enum iwl_fw_error_dump_type {
        /* 0 is deprecated */
@@ -94,6 +96,7 @@ enum iwl_fw_error_dump_type {
        IWL_FW_ERROR_DUMP_TXF = 7,
        IWL_FW_ERROR_DUMP_FH_REGS = 8,
        IWL_FW_ERROR_DUMP_MEM = 9,
+       IWL_FW_ERROR_DUMP_ERROR_INFO = 10,
 
        IWL_FW_ERROR_DUMP_MAX,
 };
@@ -230,4 +233,47 @@ iwl_fw_error_next_data(struct iwl_fw_error_dump_data *data)
        return (void *)(data->data + le32_to_cpu(data->len));
 }
 
+/**
+ * enum iwl_fw_dbg_trigger - triggers available
+ *
+ * @FW_DBG_TRIGGER_USER: trigger log collection by user
+ *     This should not be defined as a trigger to the driver, but a value the
+ *     driver should set to indicate that the trigger was initiated by the
+ *     user.
+ * @FW_DBG_TRIGGER_FW_ASSERT: trigger log collection when the firmware asserts
+ * @FW_DBG_TRIGGER_MISSED_BEACONS: trigger log collection when beacons are
+ *     missed.
+ * @FW_DBG_TRIGGER_CHANNEL_SWITCH: trigger log collection upon channel switch.
+ * @FW_DBG_TRIGGER_FW_NOTIF: trigger log collection when the firmware sends a
+ *     command response or a notification.
+ * @FW_DB_TRIGGER_RESERVED: reserved
+ * @FW_DBG_TRIGGER_STATS: trigger log collection upon statistics threshold.
+ * @FW_DBG_TRIGGER_RSSI: trigger log collection when the rssi of the beacon
+ *     goes below a threshold.
+ */
+enum iwl_fw_dbg_trigger {
+       FW_DBG_TRIGGER_INVALID = 0,
+       FW_DBG_TRIGGER_USER,
+       FW_DBG_TRIGGER_FW_ASSERT,
+       FW_DBG_TRIGGER_MISSED_BEACONS,
+       FW_DBG_TRIGGER_CHANNEL_SWITCH,
+       FW_DBG_TRIGGER_FW_NOTIF,
+       FW_DB_TRIGGER_RESERVED,
+       FW_DBG_TRIGGER_STATS,
+       FW_DBG_TRIGGER_RSSI,
+
+       /* must be last */
+       FW_DBG_TRIGGER_MAX,
+};
+
+/**
+ * struct iwl_fw_error_dump_trigger_desc - describes the trigger condition
+ * @type: %enum iwl_fw_dbg_trigger
+ * @data: raw data about what happened
+ */
+struct iwl_fw_error_dump_trigger_desc {
+       __le32 type;
+       u8 data[];
+};
+
 #endif /* __fw_error_dump_h__ */
index 016d913846818d2ea3731c1b4c0fae4b088f5363..291a3382aa3fc5596e243ff3a4e93d7cd44557ba 100644 (file)
@@ -66,6 +66,7 @@
 #define __iwl_fw_file_h__
 
 #include <linux/netdevice.h>
+#include <linux/nl80211.h>
 
 /* v1/v2 uCode file layout */
 struct iwl_ucode_header {
@@ -133,8 +134,10 @@ enum iwl_ucode_tlv_type {
        IWL_UCODE_TLV_N_SCAN_CHANNELS           = 31,
        IWL_UCODE_TLV_SEC_RT_USNIFFER   = 34,
        IWL_UCODE_TLV_SDIO_ADMA_ADDR    = 35,
+       IWL_UCODE_TLV_FW_VERSION        = 36,
        IWL_UCODE_TLV_FW_DBG_DEST       = 38,
        IWL_UCODE_TLV_FW_DBG_CONF       = 39,
+       IWL_UCODE_TLV_FW_DBG_TRIGGER    = 40,
 };
 
 struct iwl_ucode_tlv {
@@ -156,7 +159,8 @@ struct iwl_tlv_ucode_header {
        __le32 zero;
        __le32 magic;
        u8 human_readable[FW_VER_HUMAN_READABLE_SZ];
-       __le32 ver;             /* major/minor/API/serial */
+       /* major/minor/API/serial or major in new format */
+       __le32 ver;
        __le32 build;
        __le64 ignore;
        /*
@@ -236,11 +240,9 @@ enum iwl_ucode_tlv_flag {
 /**
  * enum iwl_ucode_tlv_api - ucode api
  * @IWL_UCODE_TLV_API_BT_COEX_SPLIT: new API for BT Coex
- * @IWL_UCODE_TLV_API_DISABLE_STA_TX: ucode supports tx_disable bit.
- * @IWL_UCODE_TLV_API_LMAC_SCAN: This ucode uses LMAC unified scan API.
- * @IWL_UCODE_TLV_API_SF_NO_DUMMY_NOTIF: ucode supports disabling dummy notif.
  * @IWL_UCODE_TLV_API_FRAGMENTED_SCAN: This ucode supports active dwell time
  *     longer than the passive one, which is essential for fragmented scan.
+ * @IWL_UCODE_TLV_API_WIFI_MCC_UPDATE: ucode supports MCC updates with source.
  * IWL_UCODE_TLV_API_HDC_PHASE_0: ucode supports finer configuration of LTR
  * @IWL_UCODE_TLV_API_BASIC_DWELL: use only basic dwell time in scan command,
  *     regardless of the band or the number of the probes. FW will calculate
@@ -250,19 +252,21 @@ enum iwl_ucode_tlv_flag {
  * @IWL_UCODE_TLV_API_SINGLE_SCAN_EBS: EBS is supported for single scans too.
  * @IWL_UCODE_TLV_API_ASYNC_DTM: Async temperature notifications are supported.
  * @IWL_UCODE_TLV_API_LQ_SS_PARAMS: Configure STBC/BFER via LQ CMD ss_params
+ * @IWL_UCODE_TLV_API_STATS_V10: uCode supports/uses statistics API version 10
+ * @IWL_UCODE_TLV_API_NEW_VERSION: new versioning format
  */
 enum iwl_ucode_tlv_api {
        IWL_UCODE_TLV_API_BT_COEX_SPLIT         = BIT(3),
-       IWL_UCODE_TLV_API_DISABLE_STA_TX        = BIT(5),
-       IWL_UCODE_TLV_API_LMAC_SCAN             = BIT(6),
-       IWL_UCODE_TLV_API_SF_NO_DUMMY_NOTIF     = BIT(7),
        IWL_UCODE_TLV_API_FRAGMENTED_SCAN       = BIT(8),
+       IWL_UCODE_TLV_API_WIFI_MCC_UPDATE       = BIT(9),
        IWL_UCODE_TLV_API_HDC_PHASE_0           = BIT(10),
        IWL_UCODE_TLV_API_BASIC_DWELL           = BIT(13),
        IWL_UCODE_TLV_API_SCD_CFG               = BIT(15),
        IWL_UCODE_TLV_API_SINGLE_SCAN_EBS       = BIT(16),
        IWL_UCODE_TLV_API_ASYNC_DTM             = BIT(17),
        IWL_UCODE_TLV_API_LQ_SS_PARAMS          = BIT(18),
+       IWL_UCODE_TLV_API_STATS_V10             = BIT(19),
+       IWL_UCODE_TLV_API_NEW_VERSION           = BIT(20),
 };
 
 /**
@@ -284,6 +288,9 @@ enum iwl_ucode_tlv_api {
  *     which also implies support for the scheduler configuration command
  * @IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH: supports TDLS channel switching
  * @IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT: supports Hot Spot Command
+ * @IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS: support radio and beacon statistics
+ * @IWL_UCODE_TLV_CAPA_BT_COEX_PLCR: enabled BT Coex packet level co-running
+ * @IWL_UCODE_TLV_CAPA_BT_COEX_RRC: supports BT Coex RRC
  */
 enum iwl_ucode_tlv_capa {
        IWL_UCODE_TLV_CAPA_D0I3_SUPPORT                 = BIT(0),
@@ -298,6 +305,9 @@ enum iwl_ucode_tlv_capa {
        IWL_UCODE_TLV_CAPA_DQA_SUPPORT                  = BIT(12),
        IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH          = BIT(13),
        IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT              = BIT(18),
+       IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS           = BIT(22),
+       IWL_UCODE_TLV_CAPA_BT_COEX_PLCR                 = BIT(28),
+       IWL_UCODE_TLV_CAPA_BT_COEX_RRC                  = BIT(30),
 };
 
 /* The default calibrate table size if not specified by firmware file */
@@ -450,44 +460,129 @@ struct iwl_fw_dbg_conf_hcmd {
 } __packed;
 
 /**
- * struct iwl_fw_dbg_trigger - a TLV that describes a debug configuration
+ * enum iwl_fw_dbg_trigger_mode - triggers functionalities
  *
- * @enabled: is this trigger enabled
- * @reserved:
- * @len: length, in bytes, of the %trigger field
- * @trigger: pointer to a trigger struct
+ * @IWL_FW_DBG_TRIGGER_START: when trigger occurs re-conf the dbg mechanism
+ * @IWL_FW_DBG_TRIGGER_STOP: when trigger occurs pull the dbg data
  */
-struct iwl_fw_dbg_trigger {
-       u8 enabled;
-       u8 reserved;
-       u8 len;
-       u8 trigger[0];
+enum iwl_fw_dbg_trigger_mode {
+       IWL_FW_DBG_TRIGGER_START = BIT(0),
+       IWL_FW_DBG_TRIGGER_STOP = BIT(1),
+};
+
+/**
+ * enum iwl_fw_dbg_trigger_vif_type - define the VIF type for a trigger
+ * @IWL_FW_DBG_CONF_VIF_ANY: any vif type
+ * @IWL_FW_DBG_CONF_VIF_IBSS: IBSS mode
+ * @IWL_FW_DBG_CONF_VIF_STATION: BSS mode
+ * @IWL_FW_DBG_CONF_VIF_AP: AP mode
+ * @IWL_FW_DBG_CONF_VIF_P2P_CLIENT: P2P Client mode
+ * @IWL_FW_DBG_CONF_VIF_P2P_GO: P2P GO mode
+ * @IWL_FW_DBG_CONF_VIF_P2P_DEVICE: P2P device
+ */
+enum iwl_fw_dbg_trigger_vif_type {
+       IWL_FW_DBG_CONF_VIF_ANY = NL80211_IFTYPE_UNSPECIFIED,
+       IWL_FW_DBG_CONF_VIF_IBSS = NL80211_IFTYPE_ADHOC,
+       IWL_FW_DBG_CONF_VIF_STATION = NL80211_IFTYPE_STATION,
+       IWL_FW_DBG_CONF_VIF_AP = NL80211_IFTYPE_AP,
+       IWL_FW_DBG_CONF_VIF_P2P_CLIENT = NL80211_IFTYPE_P2P_CLIENT,
+       IWL_FW_DBG_CONF_VIF_P2P_GO = NL80211_IFTYPE_P2P_GO,
+       IWL_FW_DBG_CONF_VIF_P2P_DEVICE = NL80211_IFTYPE_P2P_DEVICE,
+};
+
+/**
+ * struct iwl_fw_dbg_trigger_tlv - a TLV that describes the trigger
+ * @id: %enum iwl_fw_dbg_trigger
+ * @vif_type: %enum iwl_fw_dbg_trigger_vif_type
+ * @stop_conf_ids: bitmap of configurations this trigger relates to.
+ *     if the mode is %IWL_FW_DBG_TRIGGER_STOP, then if the bit corresponding
+ *     to the currently running configuration is set, the data should be
+ *     collected.
+ * @stop_delay: how many milliseconds to wait before collecting the data
+ *     after the STOP trigger fires.
+ * @mode: %enum iwl_fw_dbg_trigger_mode - can be stop / start of both
+ * @start_conf_id: if mode is %IWL_FW_DBG_TRIGGER_START, this defines what
+ *     configuration should be applied when the triggers kicks in.
+ * @occurrences: number of occurrences. 0 means the trigger will never fire.
+ */
+struct iwl_fw_dbg_trigger_tlv {
+       __le32 id;
+       __le32 vif_type;
+       __le32 stop_conf_ids;
+       __le32 stop_delay;
+       u8 mode;
+       u8 start_conf_id;
+       __le16 occurrences;
+       __le32 reserved[2];
+
+       u8 data[0];
 } __packed;
 
+#define FW_DBG_START_FROM_ALIVE        0
+#define FW_DBG_CONF_MAX                32
+#define FW_DBG_INVALID         0xff
+
 /**
- * enum iwl_fw_dbg_conf - configurations available
- *
- * @FW_DBG_CUSTOM: take this configuration from alive
- *     Note that the trigger is NO-OP for this configuration
+ * struct iwl_fw_dbg_trigger_missed_bcon - configures trigger for missed beacons
+ * @stop_consec_missed_bcon: stop recording if threshold is crossed.
+ * @stop_consec_missed_bcon_since_rx: stop recording if threshold is crossed.
+ * @start_consec_missed_bcon: start recording if threshold is crossed.
+ * @start_consec_missed_bcon_since_rx: start recording if threshold is crossed.
+ * @reserved1: reserved
+ * @reserved2: reserved
+ */
+struct iwl_fw_dbg_trigger_missed_bcon {
+       __le32 stop_consec_missed_bcon;
+       __le32 stop_consec_missed_bcon_since_rx;
+       __le32 reserved2[2];
+       __le32 start_consec_missed_bcon;
+       __le32 start_consec_missed_bcon_since_rx;
+       __le32 reserved1[2];
+} __packed;
+
+/**
+ * struct iwl_fw_dbg_trigger_cmd - configures trigger for messages from FW.
+ * cmds: the list of commands to trigger the collection on
  */
-enum iwl_fw_dbg_conf {
-       FW_DBG_CUSTOM = 0,
+struct iwl_fw_dbg_trigger_cmd {
+       struct cmd {
+               u8 cmd_id;
+               u8 group_id;
+       } __packed cmds[16];
+} __packed;
 
-       /* must be last */
-       FW_DBG_MAX,
-       FW_DBG_INVALID = 0xff,
-};
+/**
+ * iwl_fw_dbg_trigger_stats - configures trigger for statistics
+ * @stop_offset: the offset of the value to be monitored
+ * @stop_threshold: the threshold above which to collect
+ * @start_offset: the offset of the value to be monitored
+ * @start_threshold: the threshold above which to start recording
+ */
+struct iwl_fw_dbg_trigger_stats {
+       __le32 stop_offset;
+       __le32 stop_threshold;
+       __le32 start_offset;
+       __le32 start_threshold;
+} __packed;
 
 /**
- * struct iwl_fw_dbg_conf_tlv - a TLV that describes a debug configuration
- *
- * @id: %enum iwl_fw_dbg_conf
+ * struct iwl_fw_dbg_trigger_low_rssi - trigger for low beacon RSSI
+ * @rssi: RSSI value to trigger at
+ */
+struct iwl_fw_dbg_trigger_low_rssi {
+       __le32 rssi;
+} __packed;
+
+/**
+ * struct iwl_fw_dbg_conf_tlv - a TLV that describes a debug configuration.
+ * @id: conf id
  * @usniffer: should the uSniffer image be used
  * @num_of_hcmds: how many HCMDs to send are present here
  * @hcmd: a variable length host command to be sent to apply the configuration.
  *     If there is more than one HCMD to send, they will appear one after the
  *     other and be sent in the order that they appear in.
- * This parses IWL_UCODE_TLV_FW_DBG_CONF
+ * This parses IWL_UCODE_TLV_FW_DBG_CONF. The user can add up-to
+ * %FW_DBG_CONF_MAX configuration per run.
  */
 struct iwl_fw_dbg_conf_tlv {
        u8 id;
@@ -495,8 +590,6 @@ struct iwl_fw_dbg_conf_tlv {
        u8 reserved;
        u8 num_of_hcmds;
        struct iwl_fw_dbg_conf_hcmd hcmd;
-
-       /* struct iwl_fw_dbg_trigger sits after all variable length hcmds */
 } __packed;
 
 #endif  /* __iwl_fw_file_h__ */
index ffd785cc67d6703a395b1809322de6a9f83f5f3d..cf75bafae51da0f60255d9a7f98f7d2f5e18bcac 100644 (file)
@@ -68,6 +68,7 @@
 #include <net/mac80211.h>
 
 #include "iwl-fw-file.h"
+#include "iwl-fw-error-dump.h"
 
 /**
  * enum iwl_ucode_type
@@ -157,6 +158,8 @@ struct iwl_fw_cscheme_list {
  * @dbg_dest_tlv: points to the destination TLV for debug
  * @dbg_conf_tlv: array of pointers to configuration TLVs for debug
  * @dbg_conf_tlv_len: lengths of the @dbg_conf_tlv entries
+ * @dbg_trigger_tlv: array of pointers to triggers TLVs
+ * @dbg_trigger_tlv_len: lengths of the @dbg_trigger_tlv entries
  * @dbg_dest_reg_num: num of reg_ops in %dbg_dest_tlv
  */
 struct iwl_fw {
@@ -186,9 +189,10 @@ struct iwl_fw {
        u32 sdio_adma_addr;
 
        struct iwl_fw_dbg_dest_tlv *dbg_dest_tlv;
-       struct iwl_fw_dbg_conf_tlv *dbg_conf_tlv[FW_DBG_MAX];
-       size_t dbg_conf_tlv_len[FW_DBG_MAX];
-
+       struct iwl_fw_dbg_conf_tlv *dbg_conf_tlv[FW_DBG_CONF_MAX];
+       size_t dbg_conf_tlv_len[FW_DBG_CONF_MAX];
+       struct iwl_fw_dbg_trigger_tlv *dbg_trigger_tlv[FW_DBG_TRIGGER_MAX];
+       size_t dbg_trigger_tlv_len[FW_DBG_TRIGGER_MAX];
        u8 dbg_dest_reg_num;
 };
 
@@ -206,46 +210,29 @@ static inline const char *get_fw_dbg_mode_string(int mode)
        }
 }
 
-static inline const struct iwl_fw_dbg_trigger *
-iwl_fw_dbg_conf_get_trigger(const struct iwl_fw *fw, u8 id)
+static inline bool
+iwl_fw_dbg_conf_usniffer(const struct iwl_fw *fw, u8 id)
 {
        const struct iwl_fw_dbg_conf_tlv *conf_tlv = fw->dbg_conf_tlv[id];
-       u8 *ptr;
-       int i;
 
        if (!conf_tlv)
-               return NULL;
-
-       ptr = (void *)&conf_tlv->hcmd;
-       for (i = 0; i < conf_tlv->num_of_hcmds; i++) {
-               ptr += sizeof(conf_tlv->hcmd);
-               ptr += le16_to_cpu(conf_tlv->hcmd.len);
-       }
-
-       return (const struct iwl_fw_dbg_trigger *)ptr;
-}
-
-static inline bool
-iwl_fw_dbg_conf_enabled(const struct iwl_fw *fw, u8 id)
-{
-       const struct iwl_fw_dbg_trigger *trigger =
-               iwl_fw_dbg_conf_get_trigger(fw, id);
-
-       if (!trigger)
                return false;
 
-       return trigger->enabled;
+       return conf_tlv->usniffer;
 }
 
-static inline bool
-iwl_fw_dbg_conf_usniffer(const struct iwl_fw *fw, u8 id)
-{
-       const struct iwl_fw_dbg_conf_tlv *conf_tlv = fw->dbg_conf_tlv[id];
+#define iwl_fw_dbg_trigger_enabled(fw, id) ({                  \
+       void *__dbg_trigger = (fw)->dbg_trigger_tlv[(id)];      \
+       unlikely(__dbg_trigger);                                \
+})
 
-       if (!conf_tlv)
-               return false;
+static inline struct iwl_fw_dbg_trigger_tlv*
+iwl_fw_dbg_get_trigger(const struct iwl_fw *fw, u8 id)
+{
+       if (WARN_ON(id >= ARRAY_SIZE(fw->dbg_trigger_tlv)))
+               return NULL;
 
-       return conf_tlv->usniffer;
+       return fw->dbg_trigger_tlv[id];
 }
 
 #endif  /* __iwl_fw_h__ */
index 03250a45272eba7185851d74bc90d6a6863d664d..78cac43e2bcd163997006065d112b1a61a6f6643 100644 (file)
@@ -201,6 +201,8 @@ void iwl_force_nmi(struct iwl_trans *trans)
        } else {
                iwl_write_prph(trans, DEVICE_SET_NMI_8000B_REG,
                               DEVICE_SET_NMI_8000B_VAL);
+               iwl_write_prph(trans, DEVICE_SET_NMI_REG,
+                              DEVICE_SET_NMI_VAL_DRV);
        }
 }
 IWL_EXPORT_SYMBOL(iwl_force_nmi);
index e8eabd21ccfefa1083c36bbcbe1b26b7b858d3ff..ac2b90df841316134e85c6cc8fb5b205eb679ee9 100644 (file)
@@ -103,6 +103,7 @@ enum iwl_disable_11n {
  * @debug_level: levels are IWL_DL_*
  * @ant_coupling: antenna coupling in dB, default = 0
  * @d0i3_disable: disable d0i3, default = 1,
+ * @lar_disable: disable LAR (regulatory), default = 0
  * @fw_monitor: allow to use firmware monitor
  */
 struct iwl_mod_params {
@@ -121,6 +122,7 @@ struct iwl_mod_params {
        char *nvm_file;
        bool uapsd_disable;
        bool d0i3_disable;
+       bool lar_disable;
        bool fw_monitor;
 };
 
index c74f1a4edf2367ba558dc97c9f744b49b91ba42f..774637746427ccce154a236b186fa7c79ffdc514 100644 (file)
@@ -103,8 +103,16 @@ enum family_8000_nvm_offsets {
        SKU_FAMILY_8000 = 4,
        N_HW_ADDRS_FAMILY_8000 = 5,
 
+       /* NVM PHY-SKU-Section offset (in words) for B0 */
+       RADIO_CFG_FAMILY_8000_B0 = 0,
+       SKU_FAMILY_8000_B0 = 2,
+       N_HW_ADDRS_FAMILY_8000_B0 = 3,
+
        /* NVM REGULATORY -Section offset (in words) definitions */
        NVM_CHANNELS_FAMILY_8000 = 0,
+       NVM_LAR_OFFSET_FAMILY_8000_OLD = 0x4C7,
+       NVM_LAR_OFFSET_FAMILY_8000 = 0x507,
+       NVM_LAR_ENABLED_FAMILY_8000 = 0x7,
 
        /* NVM calibration section offset (in words) definitions */
        NVM_CALIB_SECTION_FAMILY_8000 = 0x2B8,
@@ -146,7 +154,9 @@ static const u8 iwl_nvm_channels_family_8000[] = {
 #define NUM_2GHZ_CHANNELS_FAMILY_8000  14
 #define FIRST_2GHZ_HT_MINUS            5
 #define LAST_2GHZ_HT_PLUS              9
-#define LAST_5GHZ_HT                   161
+#define LAST_5GHZ_HT                   165
+#define LAST_5GHZ_HT_FAMILY_8000       181
+#define N_HW_ADDR_MASK                 0xF
 
 /* rate data (static) */
 static struct ieee80211_rate iwl_cfg80211_rates[] = {
@@ -201,9 +211,57 @@ enum iwl_nvm_channel_flags {
 #define CHECK_AND_PRINT_I(x)   \
        ((ch_flags & NVM_CHANNEL_##x) ? # x " " : "")
 
+static u32 iwl_get_channel_flags(u8 ch_num, int ch_idx, bool is_5ghz,
+                                u16 nvm_flags, const struct iwl_cfg *cfg)
+{
+       u32 flags = IEEE80211_CHAN_NO_HT40;
+       u32 last_5ghz_ht = LAST_5GHZ_HT;
+
+       if (cfg->device_family == IWL_DEVICE_FAMILY_8000)
+               last_5ghz_ht = LAST_5GHZ_HT_FAMILY_8000;
+
+       if (!is_5ghz && (nvm_flags & NVM_CHANNEL_40MHZ)) {
+               if (ch_num <= LAST_2GHZ_HT_PLUS)
+                       flags &= ~IEEE80211_CHAN_NO_HT40PLUS;
+               if (ch_num >= FIRST_2GHZ_HT_MINUS)
+                       flags &= ~IEEE80211_CHAN_NO_HT40MINUS;
+       } else if (ch_num <= last_5ghz_ht && (nvm_flags & NVM_CHANNEL_40MHZ)) {
+               if ((ch_idx - NUM_2GHZ_CHANNELS) % 2 == 0)
+                       flags &= ~IEEE80211_CHAN_NO_HT40PLUS;
+               else
+                       flags &= ~IEEE80211_CHAN_NO_HT40MINUS;
+       }
+       if (!(nvm_flags & NVM_CHANNEL_80MHZ))
+               flags |= IEEE80211_CHAN_NO_80MHZ;
+       if (!(nvm_flags & NVM_CHANNEL_160MHZ))
+               flags |= IEEE80211_CHAN_NO_160MHZ;
+
+       if (!(nvm_flags & NVM_CHANNEL_IBSS))
+               flags |= IEEE80211_CHAN_NO_IR;
+
+       if (!(nvm_flags & NVM_CHANNEL_ACTIVE))
+               flags |= IEEE80211_CHAN_NO_IR;
+
+       if (nvm_flags & NVM_CHANNEL_RADAR)
+               flags |= IEEE80211_CHAN_RADAR;
+
+       if (nvm_flags & NVM_CHANNEL_INDOOR_ONLY)
+               flags |= IEEE80211_CHAN_INDOOR_ONLY;
+
+       /* Set the GO concurrent flag only in case that NO_IR is set.
+        * Otherwise it is meaningless
+        */
+       if ((nvm_flags & NVM_CHANNEL_GO_CONCURRENT) &&
+           (flags & IEEE80211_CHAN_NO_IR))
+               flags |= IEEE80211_CHAN_GO_CONCURRENT;
+
+       return flags;
+}
+
 static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
                                struct iwl_nvm_data *data,
-                               const __le16 * const nvm_ch_flags)
+                               const __le16 * const nvm_ch_flags,
+                               bool lar_supported)
 {
        int ch_idx;
        int n_channels = 0;
@@ -228,9 +286,14 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
 
                if (ch_idx >= num_2ghz_channels &&
                    !data->sku_cap_band_52GHz_enable)
-                       ch_flags &= ~NVM_CHANNEL_VALID;
+                       continue;
 
-               if (!(ch_flags & NVM_CHANNEL_VALID)) {
+               if (!lar_supported && !(ch_flags & NVM_CHANNEL_VALID)) {
+                       /*
+                        * Channels might become valid later if lar is
+                        * supported, hence we still want to add them to
+                        * the list of supported channels to cfg80211.
+                        */
                        IWL_DEBUG_EEPROM(dev,
                                         "Ch. %d Flags %x [%sGHz] - No traffic\n",
                                         nvm_chan[ch_idx],
@@ -250,45 +313,6 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
                        ieee80211_channel_to_frequency(
                                channel->hw_value, channel->band);
 
-               /* TODO: Need to be dependent to the NVM */
-               channel->flags = IEEE80211_CHAN_NO_HT40;
-               if (ch_idx < num_2ghz_channels &&
-                   (ch_flags & NVM_CHANNEL_40MHZ)) {
-                       if (nvm_chan[ch_idx] <= LAST_2GHZ_HT_PLUS)
-                               channel->flags &= ~IEEE80211_CHAN_NO_HT40PLUS;
-                       if (nvm_chan[ch_idx] >= FIRST_2GHZ_HT_MINUS)
-                               channel->flags &= ~IEEE80211_CHAN_NO_HT40MINUS;
-               } else if (nvm_chan[ch_idx] <= LAST_5GHZ_HT &&
-                          (ch_flags & NVM_CHANNEL_40MHZ)) {
-                       if ((ch_idx - num_2ghz_channels) % 2 == 0)
-                               channel->flags &= ~IEEE80211_CHAN_NO_HT40PLUS;
-                       else
-                               channel->flags &= ~IEEE80211_CHAN_NO_HT40MINUS;
-               }
-               if (!(ch_flags & NVM_CHANNEL_80MHZ))
-                       channel->flags |= IEEE80211_CHAN_NO_80MHZ;
-               if (!(ch_flags & NVM_CHANNEL_160MHZ))
-                       channel->flags |= IEEE80211_CHAN_NO_160MHZ;
-
-               if (!(ch_flags & NVM_CHANNEL_IBSS))
-                       channel->flags |= IEEE80211_CHAN_NO_IR;
-
-               if (!(ch_flags & NVM_CHANNEL_ACTIVE))
-                       channel->flags |= IEEE80211_CHAN_NO_IR;
-
-               if (ch_flags & NVM_CHANNEL_RADAR)
-                       channel->flags |= IEEE80211_CHAN_RADAR;
-
-               if (ch_flags & NVM_CHANNEL_INDOOR_ONLY)
-                       channel->flags |= IEEE80211_CHAN_INDOOR_ONLY;
-
-               /* Set the GO concurrent flag only in case that NO_IR is set.
-                * Otherwise it is meaningless
-                */
-               if ((ch_flags & NVM_CHANNEL_GO_CONCURRENT) &&
-                   (channel->flags & IEEE80211_CHAN_NO_IR))
-                       channel->flags |= IEEE80211_CHAN_GO_CONCURRENT;
-
                /* Initialize regulatory-based run-time data */
 
                /*
@@ -297,6 +321,15 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
                 */
                channel->max_power = IWL_DEFAULT_MAX_TX_POWER;
                is_5ghz = channel->band == IEEE80211_BAND_5GHZ;
+
+               /* don't put limitations in case we're using LAR */
+               if (!lar_supported)
+                       channel->flags = iwl_get_channel_flags(nvm_chan[ch_idx],
+                                                              ch_idx, is_5ghz,
+                                                              ch_flags, cfg);
+               else
+                       channel->flags = 0;
+
                IWL_DEBUG_EEPROM(dev,
                                 "Ch. %d [%sGHz] %s%s%s%s%s%s%s(0x%02x %ddBm): Ad-Hoc %ssupported\n",
                                 channel->hw_value,
@@ -370,8 +403,8 @@ static void iwl_init_vht_hw_capab(const struct iwl_cfg *cfg,
 
 static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg,
                            struct iwl_nvm_data *data,
-                           const __le16 *ch_section, bool enable_vht,
-                           u8 tx_chains, u8 rx_chains)
+                           const __le16 *ch_section,
+                           u8 tx_chains, u8 rx_chains, bool lar_supported)
 {
        int n_channels;
        int n_used = 0;
@@ -380,11 +413,12 @@ static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg,
        if (cfg->device_family != IWL_DEVICE_FAMILY_8000)
                n_channels = iwl_init_channel_map(
                                dev, cfg, data,
-                               &ch_section[NVM_CHANNELS]);
+                               &ch_section[NVM_CHANNELS], lar_supported);
        else
                n_channels = iwl_init_channel_map(
                                dev, cfg, data,
-                               &ch_section[NVM_CHANNELS_FAMILY_8000]);
+                               &ch_section[NVM_CHANNELS_FAMILY_8000],
+                               lar_supported);
 
        sband = &data->bands[IEEE80211_BAND_2GHZ];
        sband->band = IEEE80211_BAND_2GHZ;
@@ -403,7 +437,7 @@ static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg,
                                          IEEE80211_BAND_5GHZ);
        iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, IEEE80211_BAND_5GHZ,
                             tx_chains, rx_chains);
-       if (enable_vht)
+       if (data->sku_cap_11ac_enable)
                iwl_init_vht_hw_capab(cfg, data, &sband->vht_cap,
                                      tx_chains, rx_chains);
 
@@ -413,10 +447,15 @@ static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg,
 }
 
 static int iwl_get_sku(const struct iwl_cfg *cfg,
-                      const __le16 *nvm_sw)
+                      const __le16 *nvm_sw, const __le16 *phy_sku,
+                      bool is_family_8000_a_step)
 {
        if (cfg->device_family != IWL_DEVICE_FAMILY_8000)
                return le16_to_cpup(nvm_sw + SKU);
+
+       if (!is_family_8000_a_step)
+               return le32_to_cpup((__le32 *)(phy_sku +
+                                              SKU_FAMILY_8000_B0));
        else
                return le32_to_cpup((__le32 *)(nvm_sw + SKU_FAMILY_8000));
 }
@@ -432,23 +471,36 @@ static int iwl_get_nvm_version(const struct iwl_cfg *cfg,
 }
 
 static int iwl_get_radio_cfg(const struct iwl_cfg *cfg,
-                            const __le16 *nvm_sw)
+                            const __le16 *nvm_sw, const __le16 *phy_sku,
+                            bool is_family_8000_a_step)
 {
        if (cfg->device_family != IWL_DEVICE_FAMILY_8000)
                return le16_to_cpup(nvm_sw + RADIO_CFG);
+
+       if (!is_family_8000_a_step)
+               return le32_to_cpup((__le32 *)(phy_sku +
+                                              RADIO_CFG_FAMILY_8000_B0));
        else
                return le32_to_cpup((__le32 *)(nvm_sw + RADIO_CFG_FAMILY_8000));
+
 }
 
-#define N_HW_ADDRS_MASK_FAMILY_8000    0xF
 static int iwl_get_n_hw_addrs(const struct iwl_cfg *cfg,
-                             const __le16 *nvm_sw)
+                             const __le16 *nvm_sw, bool is_family_8000_a_step)
 {
+       int n_hw_addr;
+
        if (cfg->device_family != IWL_DEVICE_FAMILY_8000)
                return le16_to_cpup(nvm_sw + N_HW_ADDRS);
+
+       if (!is_family_8000_a_step)
+               n_hw_addr = le32_to_cpup((__le32 *)(nvm_sw +
+                                                   N_HW_ADDRS_FAMILY_8000_B0));
        else
-               return le32_to_cpup((__le32 *)(nvm_sw + N_HW_ADDRS_FAMILY_8000))
-                      & N_HW_ADDRS_MASK_FAMILY_8000;
+               n_hw_addr = le32_to_cpup((__le32 *)(nvm_sw +
+                                                   N_HW_ADDRS_FAMILY_8000));
+
+       return n_hw_addr & N_HW_ADDR_MASK;
 }
 
 static void iwl_set_radio_cfg(const struct iwl_cfg *cfg,
@@ -491,7 +543,8 @@ static void iwl_set_hw_address_family_8000(struct device *dev,
                                           const struct iwl_cfg *cfg,
                                           struct iwl_nvm_data *data,
                                           const __le16 *mac_override,
-                                          const __le16 *nvm_hw)
+                                          const __le16 *nvm_hw,
+                                          u32 mac_addr0, u32 mac_addr1)
 {
        const u8 *hw_addr;
 
@@ -515,48 +568,17 @@ static void iwl_set_hw_address_family_8000(struct device *dev,
        }
 
        if (nvm_hw) {
-               /* read the MAC address from OTP */
-               if (!dev_is_pci(dev) || (data->nvm_version < 0xE08)) {
-                       /* read the mac address from the WFPM location */
-                       hw_addr = (const u8 *)(nvm_hw +
-                                              HW_ADDR0_WFPM_FAMILY_8000);
-                       data->hw_addr[0] = hw_addr[3];
-                       data->hw_addr[1] = hw_addr[2];
-                       data->hw_addr[2] = hw_addr[1];
-                       data->hw_addr[3] = hw_addr[0];
-
-                       hw_addr = (const u8 *)(nvm_hw +
-                                              HW_ADDR1_WFPM_FAMILY_8000);
-                       data->hw_addr[4] = hw_addr[1];
-                       data->hw_addr[5] = hw_addr[0];
-               } else if ((data->nvm_version >= 0xE08) &&
-                          (data->nvm_version < 0xE0B)) {
-                       /* read "reverse order"  from the PCIe location */
-                       hw_addr = (const u8 *)(nvm_hw +
-                                              HW_ADDR0_PCIE_FAMILY_8000);
-                       data->hw_addr[5] = hw_addr[2];
-                       data->hw_addr[4] = hw_addr[1];
-                       data->hw_addr[3] = hw_addr[0];
-
-                       hw_addr = (const u8 *)(nvm_hw +
-                                              HW_ADDR1_PCIE_FAMILY_8000);
-                       data->hw_addr[2] = hw_addr[3];
-                       data->hw_addr[1] = hw_addr[2];
-                       data->hw_addr[0] = hw_addr[1];
-               } else {
-                       /* read from the PCIe location */
-                       hw_addr = (const u8 *)(nvm_hw +
-                                              HW_ADDR0_PCIE_FAMILY_8000);
-                       data->hw_addr[5] = hw_addr[0];
-                       data->hw_addr[4] = hw_addr[1];
-                       data->hw_addr[3] = hw_addr[2];
-
-                       hw_addr = (const u8 *)(nvm_hw +
-                                              HW_ADDR1_PCIE_FAMILY_8000);
-                       data->hw_addr[2] = hw_addr[1];
-                       data->hw_addr[1] = hw_addr[2];
-                       data->hw_addr[0] = hw_addr[3];
-               }
+               /* read the MAC address from HW resisters */
+               hw_addr = (const u8 *)&mac_addr0;
+               data->hw_addr[0] = hw_addr[3];
+               data->hw_addr[1] = hw_addr[2];
+               data->hw_addr[2] = hw_addr[1];
+               data->hw_addr[3] = hw_addr[0];
+
+               hw_addr = (const u8 *)&mac_addr1;
+               data->hw_addr[4] = hw_addr[1];
+               data->hw_addr[5] = hw_addr[0];
+
                if (!is_valid_ether_addr(data->hw_addr))
                        IWL_ERR_DEV(dev,
                                    "mac address from hw section is not valid\n");
@@ -571,11 +593,15 @@ struct iwl_nvm_data *
 iwl_parse_nvm_data(struct device *dev, const struct iwl_cfg *cfg,
                   const __le16 *nvm_hw, const __le16 *nvm_sw,
                   const __le16 *nvm_calib, const __le16 *regulatory,
-                  const __le16 *mac_override, u8 tx_chains, u8 rx_chains)
+                  const __le16 *mac_override, const __le16 *phy_sku,
+                  u8 tx_chains, u8 rx_chains,
+                  bool lar_fw_supported, bool is_family_8000_a_step,
+                  u32 mac_addr0, u32 mac_addr1)
 {
        struct iwl_nvm_data *data;
        u32 sku;
        u32 radio_cfg;
+       u16 lar_config;
 
        if (cfg->device_family != IWL_DEVICE_FAMILY_8000)
                data = kzalloc(sizeof(*data) +
@@ -592,22 +618,25 @@ iwl_parse_nvm_data(struct device *dev, const struct iwl_cfg *cfg,
 
        data->nvm_version = iwl_get_nvm_version(cfg, nvm_sw);
 
-       radio_cfg = iwl_get_radio_cfg(cfg, nvm_sw);
+       radio_cfg =
+               iwl_get_radio_cfg(cfg, nvm_sw, phy_sku, is_family_8000_a_step);
        iwl_set_radio_cfg(cfg, data, radio_cfg);
        if (data->valid_tx_ant)
                tx_chains &= data->valid_tx_ant;
        if (data->valid_rx_ant)
                rx_chains &= data->valid_rx_ant;
 
-       sku = iwl_get_sku(cfg, nvm_sw);
+       sku = iwl_get_sku(cfg, nvm_sw, phy_sku, is_family_8000_a_step);
        data->sku_cap_band_24GHz_enable = sku & NVM_SKU_CAP_BAND_24GHZ;
        data->sku_cap_band_52GHz_enable = sku & NVM_SKU_CAP_BAND_52GHZ;
        data->sku_cap_11n_enable = sku & NVM_SKU_CAP_11N_ENABLE;
-       data->sku_cap_11ac_enable = sku & NVM_SKU_CAP_11AC_ENABLE;
        if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_ALL)
                data->sku_cap_11n_enable = false;
+       data->sku_cap_11ac_enable = data->sku_cap_11n_enable &&
+                                   (sku & NVM_SKU_CAP_11AC_ENABLE);
 
-       data->n_hw_addrs = iwl_get_n_hw_addrs(cfg, nvm_sw);
+       data->n_hw_addrs =
+               iwl_get_n_hw_addrs(cfg, nvm_sw, is_family_8000_a_step);
 
        if (cfg->device_family != IWL_DEVICE_FAMILY_8000) {
                /* Checking for required sections */
@@ -626,16 +655,23 @@ iwl_parse_nvm_data(struct device *dev, const struct iwl_cfg *cfg,
                iwl_set_hw_address(cfg, data, nvm_hw);
 
                iwl_init_sbands(dev, cfg, data, nvm_sw,
-                               sku & NVM_SKU_CAP_11AC_ENABLE, tx_chains,
-                               rx_chains);
+                               tx_chains, rx_chains, lar_fw_supported);
        } else {
+               u16 lar_offset = data->nvm_version < 0xE39 ?
+                                NVM_LAR_OFFSET_FAMILY_8000_OLD :
+                                NVM_LAR_OFFSET_FAMILY_8000;
+
+               lar_config = le16_to_cpup(regulatory + lar_offset);
+               data->lar_enabled = !!(lar_config &
+                                      NVM_LAR_ENABLED_FAMILY_8000);
+
                /* MAC address in family 8000 */
                iwl_set_hw_address_family_8000(dev, cfg, data, mac_override,
-                                              nvm_hw);
+                                              nvm_hw, mac_addr0, mac_addr1);
 
                iwl_init_sbands(dev, cfg, data, regulatory,
-                               sku & NVM_SKU_CAP_11AC_ENABLE, tx_chains,
-                               rx_chains);
+                               tx_chains, rx_chains,
+                               lar_fw_supported && data->lar_enabled);
        }
 
        data->calib_version = 255;
@@ -643,3 +679,164 @@ iwl_parse_nvm_data(struct device *dev, const struct iwl_cfg *cfg,
        return data;
 }
 IWL_EXPORT_SYMBOL(iwl_parse_nvm_data);
+
+static u32 iwl_nvm_get_regdom_bw_flags(const u8 *nvm_chan,
+                                      int ch_idx, u16 nvm_flags,
+                                      const struct iwl_cfg *cfg)
+{
+       u32 flags = NL80211_RRF_NO_HT40;
+       u32 last_5ghz_ht = LAST_5GHZ_HT;
+
+       if (cfg->device_family == IWL_DEVICE_FAMILY_8000)
+               last_5ghz_ht = LAST_5GHZ_HT_FAMILY_8000;
+
+       if (ch_idx < NUM_2GHZ_CHANNELS &&
+           (nvm_flags & NVM_CHANNEL_40MHZ)) {
+               if (nvm_chan[ch_idx] <= LAST_2GHZ_HT_PLUS)
+                       flags &= ~NL80211_RRF_NO_HT40PLUS;
+               if (nvm_chan[ch_idx] >= FIRST_2GHZ_HT_MINUS)
+                       flags &= ~NL80211_RRF_NO_HT40MINUS;
+       } else if (nvm_chan[ch_idx] <= last_5ghz_ht &&
+                  (nvm_flags & NVM_CHANNEL_40MHZ)) {
+               if ((ch_idx - NUM_2GHZ_CHANNELS) % 2 == 0)
+                       flags &= ~NL80211_RRF_NO_HT40PLUS;
+               else
+                       flags &= ~NL80211_RRF_NO_HT40MINUS;
+       }
+
+       if (!(nvm_flags & NVM_CHANNEL_80MHZ))
+               flags |= NL80211_RRF_NO_80MHZ;
+       if (!(nvm_flags & NVM_CHANNEL_160MHZ))
+               flags |= NL80211_RRF_NO_160MHZ;
+
+       if (!(nvm_flags & NVM_CHANNEL_ACTIVE))
+               flags |= NL80211_RRF_NO_IR;
+
+       if (nvm_flags & NVM_CHANNEL_RADAR)
+               flags |= NL80211_RRF_DFS;
+
+       if (nvm_flags & NVM_CHANNEL_INDOOR_ONLY)
+               flags |= NL80211_RRF_NO_OUTDOOR;
+
+       /* Set the GO concurrent flag only in case that NO_IR is set.
+        * Otherwise it is meaningless
+        */
+       if ((nvm_flags & NVM_CHANNEL_GO_CONCURRENT) &&
+           (flags & NL80211_RRF_NO_IR))
+               flags |= NL80211_RRF_GO_CONCURRENT;
+
+       return flags;
+}
+
+struct ieee80211_regdomain *
+iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
+                      int num_of_ch, __le32 *channels, u16 fw_mcc)
+{
+       int ch_idx;
+       u16 ch_flags, prev_ch_flags = 0;
+       const u8 *nvm_chan = cfg->device_family == IWL_DEVICE_FAMILY_8000 ?
+                            iwl_nvm_channels_family_8000 : iwl_nvm_channels;
+       struct ieee80211_regdomain *regd;
+       int size_of_regd;
+       struct ieee80211_reg_rule *rule;
+       enum ieee80211_band band;
+       int center_freq, prev_center_freq = 0;
+       int valid_rules = 0;
+       bool new_rule;
+       int max_num_ch = cfg->device_family == IWL_DEVICE_FAMILY_8000 ?
+                        IWL_NUM_CHANNELS_FAMILY_8000 : IWL_NUM_CHANNELS;
+
+       if (WARN_ON_ONCE(num_of_ch > NL80211_MAX_SUPP_REG_RULES))
+               return ERR_PTR(-EINVAL);
+
+       if (WARN_ON(num_of_ch > max_num_ch))
+               num_of_ch = max_num_ch;
+
+       IWL_DEBUG_DEV(dev, IWL_DL_LAR, "building regdom for %d channels\n",
+                     num_of_ch);
+
+       /* build a regdomain rule for every valid channel */
+       size_of_regd =
+               sizeof(struct ieee80211_regdomain) +
+               num_of_ch * sizeof(struct ieee80211_reg_rule);
+
+       regd = kzalloc(size_of_regd, GFP_KERNEL);
+       if (!regd)
+               return ERR_PTR(-ENOMEM);
+
+       for (ch_idx = 0; ch_idx < num_of_ch; ch_idx++) {
+               ch_flags = (u16)__le32_to_cpup(channels + ch_idx);
+               band = (ch_idx < NUM_2GHZ_CHANNELS) ?
+                      IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
+               center_freq = ieee80211_channel_to_frequency(nvm_chan[ch_idx],
+                                                            band);
+               new_rule = false;
+
+               if (!(ch_flags & NVM_CHANNEL_VALID)) {
+                       IWL_DEBUG_DEV(dev, IWL_DL_LAR,
+                                     "Ch. %d Flags %x [%sGHz] - No traffic\n",
+                                     nvm_chan[ch_idx],
+                                     ch_flags,
+                                     (ch_idx >= NUM_2GHZ_CHANNELS) ?
+                                     "5.2" : "2.4");
+                       continue;
+               }
+
+               /* we can't continue the same rule */
+               if (ch_idx == 0 || prev_ch_flags != ch_flags ||
+                   center_freq - prev_center_freq > 20) {
+                       valid_rules++;
+                       new_rule = true;
+               }
+
+               rule = &regd->reg_rules[valid_rules - 1];
+
+               if (new_rule)
+                       rule->freq_range.start_freq_khz =
+                                               MHZ_TO_KHZ(center_freq - 10);
+
+               rule->freq_range.end_freq_khz = MHZ_TO_KHZ(center_freq + 10);
+
+               /* this doesn't matter - not used by FW */
+               rule->power_rule.max_antenna_gain = DBI_TO_MBI(6);
+               rule->power_rule.max_eirp =
+                       DBM_TO_MBM(IWL_DEFAULT_MAX_TX_POWER);
+
+               rule->flags = iwl_nvm_get_regdom_bw_flags(nvm_chan, ch_idx,
+                                                         ch_flags, cfg);
+
+               /* rely on auto-calculation to merge BW of contiguous chans */
+               rule->flags |= NL80211_RRF_AUTO_BW;
+               rule->freq_range.max_bandwidth_khz = 0;
+
+               prev_ch_flags = ch_flags;
+               prev_center_freq = center_freq;
+
+               IWL_DEBUG_DEV(dev, IWL_DL_LAR,
+                             "Ch. %d [%sGHz] %s%s%s%s%s%s%s%s%s(0x%02x): Ad-Hoc %ssupported\n",
+                             center_freq,
+                             band == IEEE80211_BAND_5GHZ ? "5.2" : "2.4",
+                             CHECK_AND_PRINT_I(VALID),
+                             CHECK_AND_PRINT_I(ACTIVE),
+                             CHECK_AND_PRINT_I(RADAR),
+                             CHECK_AND_PRINT_I(WIDE),
+                             CHECK_AND_PRINT_I(40MHZ),
+                             CHECK_AND_PRINT_I(80MHZ),
+                             CHECK_AND_PRINT_I(160MHZ),
+                             CHECK_AND_PRINT_I(INDOOR_ONLY),
+                             CHECK_AND_PRINT_I(GO_CONCURRENT),
+                             ch_flags,
+                             ((ch_flags & NVM_CHANNEL_ACTIVE) &&
+                              !(ch_flags & NVM_CHANNEL_RADAR))
+                                        ? "" : "not ");
+       }
+
+       regd->n_reg_rules = valid_rules;
+
+       /* set alpha2 from FW. */
+       regd->alpha2[0] = fw_mcc >> 8;
+       regd->alpha2[1] = fw_mcc & 0xff;
+
+       return regd;
+}
+IWL_EXPORT_SYMBOL(iwl_parse_nvm_mcc_info);
index c9c45a39d212c2516433bdce81c74373460383ab..c995d2cee3f691c6dbe2c3592856d155d4e98f90 100644 (file)
@@ -62,6 +62,7 @@
 #ifndef __iwl_nvm_parse_h__
 #define __iwl_nvm_parse_h__
 
+#include <net/cfg80211.h>
 #include "iwl-eeprom-parse.h"
 
 /**
@@ -76,6 +77,22 @@ struct iwl_nvm_data *
 iwl_parse_nvm_data(struct device *dev, const struct iwl_cfg *cfg,
                   const __le16 *nvm_hw, const __le16 *nvm_sw,
                   const __le16 *nvm_calib, const __le16 *regulatory,
-                  const __le16 *mac_override, u8 tx_chains, u8 rx_chains);
+                  const __le16 *mac_override, const __le16 *phy_sku,
+                  u8 tx_chains, u8 rx_chains,
+                  bool lar_fw_supported, bool is_family_8000_a_step,
+                  u32 mac_addr0, u32 mac_addr1);
+
+/**
+ * iwl_parse_mcc_info - parse MCC (mobile country code) info coming from FW
+ *
+ * This function parses the regulatory channel data received as a
+ * MCC_UPDATE_CMD command. It returns a newly allocation regulatory domain,
+ * to be fed into the regulatory core. An ERR_PTR is returned on error.
+ * If not given to the regulatory core, the user is responsible for freeing
+ * the regdomain returned here with kfree.
+ */
+struct ieee80211_regdomain *
+iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
+                      int num_of_ch, __le32 *channels, u16 fw_mcc);
 
 #endif /* __iwl_nvm_parse_h__ */
index d4fb5cad07ea1d36c508c25ec83450a2e80e3807..e893c6eb260cd4866b7c7d03c1437b9d479ad9de 100644 (file)
@@ -72,7 +72,7 @@
 #include "iwl-trans.h"
 
 #define CHANNEL_NUM_SIZE       4       /* num of channels in calib_ch size */
-#define IWL_NUM_PAPD_CH_GROUPS 7
+#define IWL_NUM_PAPD_CH_GROUPS 9
 #define IWL_NUM_TXP_CH_GROUPS  9
 
 struct iwl_phy_db_entry {
index 6221e4dfc64fcc0ee907d1dcec4e29394bb4933f..bc962888c5836435450882a0d535c8d9534d14e7 100644 (file)
@@ -370,7 +370,33 @@ enum secure_load_status_reg {
 #define MON_BUFF_CYCLE_CNT             (0xa03c48)
 
 #define DBGC_IN_SAMPLE                 (0xa03c00)
-#define DBGC_OUT_CTRL                  (0xa03c0c)
+
+/* enable the ID buf for read */
+#define WFPM_PS_CTL_CLR                        0xA0300C
+#define WFMP_MAC_ADDR_0                        0xA03080
+#define WFMP_MAC_ADDR_1                        0xA03084
+#define LMPM_PMG_EN                    0xA01CEC
+#define RADIO_REG_SYS_MANUAL_DFT_0     0xAD4078
+#define RFIC_REG_RD                    0xAD0470
+#define WFPM_CTRL_REG                  0xA03030
+enum {
+       ENABLE_WFPM = BIT(31),
+       WFPM_AUX_CTL_AUX_IF_MAC_OWNER_MSK       = 0x80000000,
+};
+
+#define AUX_MISC_REG                   0xA200B0
+enum {
+       HW_STEP_LOCATION_BITS = 24,
+};
+
+#define AUX_MISC_MASTER1_EN            0xA20818
+enum aux_misc_master1_en {
+       AUX_MISC_MASTER1_EN_SBE_MSK     = 0x1,
+};
+
+#define AUX_MISC_MASTER1_SMPHR_STATUS  0xA20800
+#define RSA_ENABLE                     0xA24B08
+#define PREG_AUX_BUS_WPROT_0           0xA04CC0
 
 /* FW chicken bits */
 #define LMPM_CHICK                     0xA01FF8
index a96bd8db6ceb67e7932ebf4c9a8b75a1f43094de..11ac5c58527f6f92b5980b15f43defa9ad5822c9 100644 (file)
@@ -458,6 +458,8 @@ struct iwl_trans_txq_scd_cfg {
  * @txq_disable: de-configure a Tx queue to send AMPDUs
  *     Must be atomic
  * @wait_tx_queue_empty: wait until tx queues are empty. May sleep.
+ * @freeze_txq_timer: prevents the timer of the queue from firing until the
+ *     queue is set to awake. Must be atomic.
  * @dbgfs_register: add the dbgfs files under this directory. Files will be
  *     automatically deleted.
  * @write8: write a u8 to a register at offset ofs from the BAR
@@ -517,6 +519,8 @@ struct iwl_trans_ops {
 
        int (*dbgfs_register)(struct iwl_trans *trans, struct dentry* dir);
        int (*wait_tx_queue_empty)(struct iwl_trans *trans, u32 txq_bm);
+       void (*freeze_txq_timer)(struct iwl_trans *trans, unsigned long txqs,
+                                bool freeze);
 
        void (*write8)(struct iwl_trans *trans, u32 ofs, u8 val);
        void (*write32)(struct iwl_trans *trans, u32 ofs, u32 val);
@@ -595,6 +599,7 @@ enum iwl_d0i3_mode {
  * @dflt_pwr_limit: default power limit fetched from the platform (ACPI)
  * @dbg_dest_tlv: points to the destination TLV for debug
  * @dbg_conf_tlv: array of pointers to configuration TLVs for debug
+ * @dbg_trigger_tlv: array of pointers to triggers TLVs for debug
  * @dbg_dest_reg_num: num of reg_ops in %dbg_dest_tlv
  */
 struct iwl_trans {
@@ -628,7 +633,8 @@ struct iwl_trans {
        u64 dflt_pwr_limit;
 
        const struct iwl_fw_dbg_dest_tlv *dbg_dest_tlv;
-       const struct iwl_fw_dbg_conf_tlv *dbg_conf_tlv[FW_DBG_MAX];
+       const struct iwl_fw_dbg_conf_tlv *dbg_conf_tlv[FW_DBG_CONF_MAX];
+       struct iwl_fw_dbg_trigger_tlv * const *dbg_trigger_tlv;
        u8 dbg_dest_reg_num;
 
        enum iwl_d0i3_mode d0i3_mode;
@@ -871,6 +877,17 @@ void iwl_trans_ac_txq_enable(struct iwl_trans *trans, int queue, int fifo,
        iwl_trans_txq_enable_cfg(trans, queue, 0, &cfg, queue_wdg_timeout);
 }
 
+static inline void iwl_trans_freeze_txq_timer(struct iwl_trans *trans,
+                                             unsigned long txqs,
+                                             bool freeze)
+{
+       if (unlikely(trans->state != IWL_TRANS_FW_ALIVE))
+               IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
+
+       if (trans->ops->freeze_txq_timer)
+               trans->ops->freeze_txq_timer(trans, txqs, freeze);
+}
+
 static inline int iwl_trans_wait_tx_queue_empty(struct iwl_trans *trans,
                                                u32 txqs)
 {
index 7810c41cf9a7300e0d3b9bc16bca22a44188b463..13a0a03158deb0d1d884b390dfb2e7e3751b5ee2 100644 (file)
 #include "mvm.h"
 #include "iwl-debug.h"
 
-const u32 iwl_bt_ctl_kill_msk[BT_KILL_MSK_MAX] = {
-       [BT_KILL_MSK_DEFAULT] = 0xfffffc00,
-       [BT_KILL_MSK_NEVER] = 0xffffffff,
-       [BT_KILL_MSK_ALWAYS] = 0,
-};
-
-const u8 iwl_bt_cts_kill_msk[BT_MAX_AG][BT_COEX_MAX_LUT] = {
-       {
-               BT_KILL_MSK_ALWAYS,
-               BT_KILL_MSK_ALWAYS,
-               BT_KILL_MSK_ALWAYS,
-       },
-       {
-               BT_KILL_MSK_NEVER,
-               BT_KILL_MSK_NEVER,
-               BT_KILL_MSK_NEVER,
-       },
-       {
-               BT_KILL_MSK_NEVER,
-               BT_KILL_MSK_NEVER,
-               BT_KILL_MSK_NEVER,
-       },
-       {
-               BT_KILL_MSK_DEFAULT,
-               BT_KILL_MSK_NEVER,
-               BT_KILL_MSK_DEFAULT,
-       },
-};
-
-const u8 iwl_bt_ack_kill_msk[BT_MAX_AG][BT_COEX_MAX_LUT] = {
-       {
-               BT_KILL_MSK_ALWAYS,
-               BT_KILL_MSK_ALWAYS,
-               BT_KILL_MSK_ALWAYS,
-       },
-       {
-               BT_KILL_MSK_ALWAYS,
-               BT_KILL_MSK_ALWAYS,
-               BT_KILL_MSK_ALWAYS,
-       },
-       {
-               BT_KILL_MSK_ALWAYS,
-               BT_KILL_MSK_ALWAYS,
-               BT_KILL_MSK_ALWAYS,
-       },
-       {
-               BT_KILL_MSK_DEFAULT,
-               BT_KILL_MSK_ALWAYS,
-               BT_KILL_MSK_DEFAULT,
-       },
-};
-
-static const __le32 iwl_bt_prio_boost[BT_COEX_BOOST_SIZE] = {
-       cpu_to_le32(0xf0f0f0f0), /* 50% */
-       cpu_to_le32(0xc0c0c0c0), /* 25% */
-       cpu_to_le32(0xfcfcfcfc), /* 75% */
-       cpu_to_le32(0xfefefefe), /* 87.5% */
-};
-
-static const __le32 iwl_single_shared_ant[BT_COEX_MAX_LUT][BT_COEX_LUT_SIZE] = {
-       {
-               cpu_to_le32(0x40000000),
-               cpu_to_le32(0x00000000),
-               cpu_to_le32(0x44000000),
-               cpu_to_le32(0x00000000),
-               cpu_to_le32(0x40000000),
-               cpu_to_le32(0x00000000),
-               cpu_to_le32(0x44000000),
-               cpu_to_le32(0x00000000),
-               cpu_to_le32(0xc0004000),
-               cpu_to_le32(0xf0005000),
-               cpu_to_le32(0xc0004000),
-               cpu_to_le32(0xf0005000),
-       },
-       {
-               cpu_to_le32(0x40000000),
-               cpu_to_le32(0x00000000),
-               cpu_to_le32(0x44000000),
-               cpu_to_le32(0x00000000),
-               cpu_to_le32(0x40000000),
-               cpu_to_le32(0x00000000),
-               cpu_to_le32(0x44000000),
-               cpu_to_le32(0x00000000),
-               cpu_to_le32(0xc0004000),
-               cpu_to_le32(0xf0005000),
-               cpu_to_le32(0xc0004000),
-               cpu_to_le32(0xf0005000),
-       },
-       {
-               cpu_to_le32(0x40000000),
-               cpu_to_le32(0x00000000),
-               cpu_to_le32(0x44000000),
-               cpu_to_le32(0x00000000),
-               cpu_to_le32(0x40000000),
-               cpu_to_le32(0x00000000),
-               cpu_to_le32(0x44000000),
-               cpu_to_le32(0x00000000),
-               cpu_to_le32(0xc0004000),
-               cpu_to_le32(0xf0005000),
-               cpu_to_le32(0xc0004000),
-               cpu_to_le32(0xf0005000),
-       },
-};
-
-static const __le32 iwl_combined_lookup[BT_COEX_MAX_LUT][BT_COEX_LUT_SIZE] = {
-       {
-               /* Tight */
-               cpu_to_le32(0xaaaaaaaa),
-               cpu_to_le32(0xaaaaaaaa),
-               cpu_to_le32(0xaeaaaaaa),
-               cpu_to_le32(0xaaaaaaaa),
-               cpu_to_le32(0xcc00ff28),
-               cpu_to_le32(0x0000aaaa),
-               cpu_to_le32(0xcc00aaaa),
-               cpu_to_le32(0x0000aaaa),
-               cpu_to_le32(0xc0004000),
-               cpu_to_le32(0x00004000),
-               cpu_to_le32(0xf0005000),
-               cpu_to_le32(0xf0005000),
-       },
-       {
-               /* Loose */
-               cpu_to_le32(0xaaaaaaaa),
-               cpu_to_le32(0xaaaaaaaa),
-               cpu_to_le32(0xaaaaaaaa),
-               cpu_to_le32(0xaaaaaaaa),
-               cpu_to_le32(0xcc00ff28),
-               cpu_to_le32(0x0000aaaa),
-               cpu_to_le32(0xcc00aaaa),
-               cpu_to_le32(0x0000aaaa),
-               cpu_to_le32(0x00000000),
-               cpu_to_le32(0x00000000),
-               cpu_to_le32(0xf0005000),
-               cpu_to_le32(0xf0005000),
-       },
-       {
-               /* Tx Tx disabled */
-               cpu_to_le32(0xaaaaaaaa),
-               cpu_to_le32(0xaaaaaaaa),
-               cpu_to_le32(0xeeaaaaaa),
-               cpu_to_le32(0xaaaaaaaa),
-               cpu_to_le32(0xcc00ff28),
-               cpu_to_le32(0x0000aaaa),
-               cpu_to_le32(0xcc00aaaa),
-               cpu_to_le32(0x0000aaaa),
-               cpu_to_le32(0xc0004000),
-               cpu_to_le32(0xc0004000),
-               cpu_to_le32(0xf0005000),
-               cpu_to_le32(0xf0005000),
-       },
-};
-
 /* 20MHz / 40MHz below / 40Mhz above*/
 static const __le64 iwl_ci_mask[][3] = {
        /* dummy entry for channel 0 */
@@ -596,14 +444,6 @@ int iwl_send_bt_init_conf(struct iwl_mvm *mvm)
                goto send_cmd;
        }
 
-       bt_cmd->max_kill = cpu_to_le32(5);
-       bt_cmd->bt4_antenna_isolation_thr =
-               cpu_to_le32(IWL_MVM_BT_COEX_ANTENNA_COUPLING_THRS);
-       bt_cmd->bt4_tx_tx_delta_freq_thr = cpu_to_le32(15);
-       bt_cmd->bt4_tx_rx_max_freq0 = cpu_to_le32(15);
-       bt_cmd->override_primary_lut = cpu_to_le32(BT_COEX_INVALID_LUT);
-       bt_cmd->override_secondary_lut = cpu_to_le32(BT_COEX_INVALID_LUT);
-
        mode = iwlwifi_mod_params.bt_coex_active ? BT_COEX_NW : BT_COEX_DISABLE;
        bt_cmd->mode = cpu_to_le32(mode);
 
@@ -611,7 +451,7 @@ int iwl_send_bt_init_conf(struct iwl_mvm *mvm)
                bt_cmd->enabled_modules |=
                        cpu_to_le32(BT_COEX_SYNC2SCO_ENABLED);
 
-       if (IWL_MVM_BT_COEX_CORUNNING)
+       if (iwl_mvm_bt_is_plcr_supported(mvm))
                bt_cmd->enabled_modules |= cpu_to_le32(BT_COEX_CORUN_ENABLED);
 
        if (IWL_MVM_BT_COEX_MPLUT) {
@@ -622,18 +462,6 @@ int iwl_send_bt_init_conf(struct iwl_mvm *mvm)
 
        bt_cmd->enabled_modules |= cpu_to_le32(BT_COEX_HIGH_BAND_RET);
 
-       if (mvm->cfg->bt_shared_single_ant)
-               memcpy(&bt_cmd->decision_lut, iwl_single_shared_ant,
-                      sizeof(iwl_single_shared_ant));
-       else
-               memcpy(&bt_cmd->decision_lut, iwl_combined_lookup,
-                      sizeof(iwl_combined_lookup));
-
-       memcpy(&bt_cmd->mplut_prio_boost, iwl_bt_prio_boost,
-              sizeof(iwl_bt_prio_boost));
-       bt_cmd->multiprio_lut[0] = cpu_to_le32(IWL_MVM_BT_COEX_MPLUT_REG0);
-       bt_cmd->multiprio_lut[1] = cpu_to_le32(IWL_MVM_BT_COEX_MPLUT_REG1);
-
 send_cmd:
        memset(&mvm->last_bt_notif, 0, sizeof(mvm->last_bt_notif));
        memset(&mvm->last_bt_ci_cmd, 0, sizeof(mvm->last_bt_ci_cmd));
@@ -644,48 +472,6 @@ send_cmd:
        return ret;
 }
 
-static int iwl_mvm_bt_udpate_sw_boost(struct iwl_mvm *mvm)
-{
-       struct iwl_bt_coex_profile_notif *notif = &mvm->last_bt_notif;
-       u32 primary_lut = le32_to_cpu(notif->primary_ch_lut);
-       u32 secondary_lut = le32_to_cpu(notif->secondary_ch_lut);
-       u32 ag = le32_to_cpu(notif->bt_activity_grading);
-       struct iwl_bt_coex_sw_boost_update_cmd cmd = {};
-       u8 ack_kill_msk[NUM_PHY_CTX] = {};
-       u8 cts_kill_msk[NUM_PHY_CTX] = {};
-       int i;
-
-       lockdep_assert_held(&mvm->mutex);
-
-       ack_kill_msk[0] = iwl_bt_ack_kill_msk[ag][primary_lut];
-       cts_kill_msk[0] = iwl_bt_cts_kill_msk[ag][primary_lut];
-
-       ack_kill_msk[1] = iwl_bt_ack_kill_msk[ag][secondary_lut];
-       cts_kill_msk[1] = iwl_bt_cts_kill_msk[ag][secondary_lut];
-
-       /* Don't send HCMD if there is no update */
-       if (!memcmp(ack_kill_msk, mvm->bt_ack_kill_msk, sizeof(ack_kill_msk)) ||
-           !memcmp(cts_kill_msk, mvm->bt_cts_kill_msk, sizeof(cts_kill_msk)))
-               return 0;
-
-       memcpy(mvm->bt_ack_kill_msk, ack_kill_msk,
-              sizeof(mvm->bt_ack_kill_msk));
-       memcpy(mvm->bt_cts_kill_msk, cts_kill_msk,
-              sizeof(mvm->bt_cts_kill_msk));
-
-       BUILD_BUG_ON(ARRAY_SIZE(ack_kill_msk) < ARRAY_SIZE(cmd.boost_values));
-
-       for (i = 0; i < ARRAY_SIZE(cmd.boost_values); i++) {
-               cmd.boost_values[i].kill_ack_msk =
-                       cpu_to_le32(iwl_bt_ctl_kill_msk[ack_kill_msk[i]]);
-               cmd.boost_values[i].kill_cts_msk =
-                       cpu_to_le32(iwl_bt_ctl_kill_msk[cts_kill_msk[i]]);
-       }
-
-       return iwl_mvm_send_cmd_pdu(mvm, BT_COEX_UPDATE_SW_BOOST, 0,
-                                   sizeof(cmd), &cmd);
-}
-
 static int iwl_mvm_bt_coex_reduced_txp(struct iwl_mvm *mvm, u8 sta_id,
                                       bool enable)
 {
@@ -951,9 +737,6 @@ static void iwl_mvm_bt_coex_notif_handle(struct iwl_mvm *mvm)
                        IWL_ERR(mvm, "Failed to send BT_CI cmd\n");
                memcpy(&mvm->last_bt_ci_cmd, &cmd, sizeof(cmd));
        }
-
-       if (iwl_mvm_bt_udpate_sw_boost(mvm))
-               IWL_ERR(mvm, "Failed to update the ctrl_kill_msk\n");
 }
 
 int iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm,
@@ -1024,7 +807,7 @@ static void iwl_mvm_bt_rssi_iterator(void *_data, u8 *mac,
 }
 
 void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
-                          enum ieee80211_rssi_event rssi_event)
+                          enum ieee80211_rssi_event_data rssi_event)
 {
        struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
        struct iwl_bt_iterator_data data = {
@@ -1074,9 +857,6 @@ void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
        ieee80211_iterate_active_interfaces_atomic(
                mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
                iwl_mvm_bt_rssi_iterator, &data);
-
-       if (iwl_mvm_bt_udpate_sw_boost(mvm))
-               IWL_ERR(mvm, "Failed to update the ctrl_kill_msk\n");
 }
 
 #define LINK_QUAL_AGG_TIME_LIMIT_DEF   (4000)
@@ -1235,7 +1015,7 @@ int iwl_mvm_rx_ant_coupling_notif(struct iwl_mvm *mvm,
        if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BT_COEX_SPLIT))
                return iwl_mvm_rx_ant_coupling_notif_old(mvm, rxb, dev_cmd);
 
-       if (!IWL_MVM_BT_COEX_CORUNNING)
+       if (!iwl_mvm_bt_is_plcr_supported(mvm))
                return 0;
 
        lockdep_assert_held(&mvm->mutex);
index 542ee74f290aec1a3f356b3c69718b5029c4e708..d954591e0be58528d138f8738b2cb2325db1fed3 100644 (file)
@@ -288,6 +288,65 @@ static const __le64 iwl_ci_mask[][3] = {
        },
 };
 
+enum iwl_bt_kill_msk {
+       BT_KILL_MSK_DEFAULT,
+       BT_KILL_MSK_NEVER,
+       BT_KILL_MSK_ALWAYS,
+       BT_KILL_MSK_MAX,
+};
+
+static const u32 iwl_bt_ctl_kill_msk[BT_KILL_MSK_MAX] = {
+       [BT_KILL_MSK_DEFAULT] = 0xfffffc00,
+       [BT_KILL_MSK_NEVER] = 0xffffffff,
+       [BT_KILL_MSK_ALWAYS] = 0,
+};
+
+static const u8 iwl_bt_cts_kill_msk[BT_MAX_AG][BT_COEX_MAX_LUT] = {
+       {
+               BT_KILL_MSK_ALWAYS,
+               BT_KILL_MSK_ALWAYS,
+               BT_KILL_MSK_ALWAYS,
+       },
+       {
+               BT_KILL_MSK_NEVER,
+               BT_KILL_MSK_NEVER,
+               BT_KILL_MSK_NEVER,
+       },
+       {
+               BT_KILL_MSK_NEVER,
+               BT_KILL_MSK_NEVER,
+               BT_KILL_MSK_NEVER,
+       },
+       {
+               BT_KILL_MSK_DEFAULT,
+               BT_KILL_MSK_NEVER,
+               BT_KILL_MSK_DEFAULT,
+       },
+};
+
+static const u8 iwl_bt_ack_kill_msk[BT_MAX_AG][BT_COEX_MAX_LUT] = {
+       {
+               BT_KILL_MSK_ALWAYS,
+               BT_KILL_MSK_ALWAYS,
+               BT_KILL_MSK_ALWAYS,
+       },
+       {
+               BT_KILL_MSK_ALWAYS,
+               BT_KILL_MSK_ALWAYS,
+               BT_KILL_MSK_ALWAYS,
+       },
+       {
+               BT_KILL_MSK_ALWAYS,
+               BT_KILL_MSK_ALWAYS,
+               BT_KILL_MSK_ALWAYS,
+       },
+       {
+               BT_KILL_MSK_DEFAULT,
+               BT_KILL_MSK_ALWAYS,
+               BT_KILL_MSK_DEFAULT,
+       },
+};
+
 struct corunning_block_luts {
        u8 range;
        __le32 lut20[BT_COEX_CORUN_LUT_SIZE];
@@ -619,7 +678,7 @@ int iwl_send_bt_init_conf_old(struct iwl_mvm *mvm)
        if (IWL_MVM_BT_COEX_SYNC2SCO)
                bt_cmd->flags |= cpu_to_le32(BT_COEX_SYNC2SCO);
 
-       if (IWL_MVM_BT_COEX_CORUNNING) {
+       if (iwl_mvm_bt_is_plcr_supported(mvm)) {
                bt_cmd->valid_bit_msk |= cpu_to_le32(BT_VALID_CORUN_LUT_20 |
                                                     BT_VALID_CORUN_LUT_40);
                bt_cmd->flags |= cpu_to_le32(BT_COEX_CORUNNING);
@@ -633,7 +692,7 @@ int iwl_send_bt_init_conf_old(struct iwl_mvm *mvm)
        if (IWL_MVM_BT_COEX_TTC)
                bt_cmd->flags |= cpu_to_le32(BT_COEX_TTC);
 
-       if (IWL_MVM_BT_COEX_RRC)
+       if (iwl_mvm_bt_is_rrc_supported(mvm))
                bt_cmd->flags |= cpu_to_le32(BT_COEX_RRC);
 
        if (mvm->cfg->bt_shared_single_ant)
@@ -1069,7 +1128,7 @@ static void iwl_mvm_bt_rssi_iterator(void *_data, u8 *mac,
 }
 
 void iwl_mvm_bt_rssi_event_old(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
-                              enum ieee80211_rssi_event rssi_event)
+                              enum ieee80211_rssi_event_data rssi_event)
 {
        struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
        struct iwl_bt_iterator_data data = {
@@ -1168,16 +1227,10 @@ bool iwl_mvm_bt_coex_is_mimo_allowed_old(struct iwl_mvm *mvm,
        return lut_type != BT_COEX_LOOSE_LUT;
 }
 
-bool iwl_mvm_bt_coex_is_ant_avail_old(struct iwl_mvm *mvm, u8 ant)
-{
-       u32 ag = le32_to_cpu(mvm->last_bt_notif_old.bt_activity_grading);
-       return ag < BT_HIGH_TRAFFIC;
-}
-
 bool iwl_mvm_bt_coex_is_shared_ant_avail_old(struct iwl_mvm *mvm)
 {
        u32 ag = le32_to_cpu(mvm->last_bt_notif_old.bt_activity_grading);
-       return ag == BT_OFF;
+       return ag < BT_HIGH_TRAFFIC;
 }
 
 bool iwl_mvm_bt_coex_is_tpc_allowed_old(struct iwl_mvm *mvm,
@@ -1214,7 +1267,7 @@ int iwl_mvm_rx_ant_coupling_notif_old(struct iwl_mvm *mvm,
                .dataflags = { IWL_HCMD_DFL_NOCOPY, },
        };
 
-       if (!IWL_MVM_BT_COEX_CORUNNING)
+       if (!iwl_mvm_bt_is_plcr_supported(mvm))
                return 0;
 
        lockdep_assert_held(&mvm->mutex);
index 14e8fd6618897adbdd7e18adb849b51032736839..5f8afa5f11a35f8da4761fb638d76f34f4f0a9eb 100644 (file)
@@ -694,6 +694,9 @@ static int iwl_mvm_d3_reprogram(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
        if (ret)
                IWL_ERR(mvm, "Failed to send quota: %d\n", ret);
 
+       if (iwl_mvm_is_lar_supported(mvm) && iwl_mvm_init_fw_regd(mvm))
+               IWL_ERR(mvm, "Failed to initialize D3 LAR information\n");
+
        return 0;
 }
 
@@ -1596,7 +1599,7 @@ iwl_mvm_get_wakeup_status(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
 
        /* RF-kill already asserted again... */
        if (!cmd.resp_pkt) {
-               ret = -ERFKILL;
+               fw_status = ERR_PTR(-ERFKILL);
                goto out_free_resp;
        }
 
@@ -1605,7 +1608,7 @@ iwl_mvm_get_wakeup_status(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
        len = iwl_rx_packet_payload_len(cmd.resp_pkt);
        if (len < status_size) {
                IWL_ERR(mvm, "Invalid WoWLAN status response!\n");
-               ret = -EIO;
+               fw_status = ERR_PTR(-EIO);
                goto out_free_resp;
        }
 
@@ -1613,7 +1616,7 @@ iwl_mvm_get_wakeup_status(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
        if (len != (status_size +
                    ALIGN(le32_to_cpu(status->wake_packet_bufsize), 4))) {
                IWL_ERR(mvm, "Invalid WoWLAN status response!\n");
-               ret = -EIO;
+               fw_status = ERR_PTR(-EIO);
                goto out_free_resp;
        }
 
@@ -1621,7 +1624,7 @@ iwl_mvm_get_wakeup_status(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
 
 out_free_resp:
        iwl_free_resp(&cmd);
-       return ret ? ERR_PTR(ret) : fw_status;
+       return fw_status;
 }
 
 /* releases the MVM mutex */
@@ -1874,27 +1877,36 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
        /* query SRAM first in case we want event logging */
        iwl_mvm_read_d3_sram(mvm);
 
+       /*
+        * Query the current location and source from the D3 firmware so we
+        * can play it back when we re-intiailize the D0 firmware
+        */
+       iwl_mvm_update_changed_regdom(mvm);
+
        if (mvm->net_detect) {
                iwl_mvm_query_netdetect_reasons(mvm, vif);
+               /* has unlocked the mutex, so skip that */
+               goto out;
        } else {
                keep = iwl_mvm_query_wakeup_reasons(mvm, vif);
 #ifdef CONFIG_IWLWIFI_DEBUGFS
                if (keep)
                        mvm->keep_vif = vif;
 #endif
+               /* has unlocked the mutex, so skip that */
+               goto out_iterate;
        }
-       /* has unlocked the mutex, so skip that */
-       goto out;
 
  out_unlock:
        mutex_unlock(&mvm->mutex);
 
- out:
+out_iterate:
        if (!test)
                ieee80211_iterate_active_interfaces_rtnl(mvm->hw,
                        IEEE80211_IFACE_ITER_NORMAL,
                        iwl_mvm_d3_disconnect_iter, keep ? vif : NULL);
 
+out:
        /* return 1 to reconfigure the device */
        set_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
        set_bit(IWL_MVM_STATUS_D3_RECONFIG, &mvm->status);
index 5fe14591e1c41f06d0f9c4893290165b3ea7674e..5f37eab5008d7a9ebe1719204f024c9d70e01434 100644 (file)
@@ -545,6 +545,57 @@ static ssize_t iwl_dbgfs_uapsd_misbehaving_write(struct ieee80211_vif *vif,
        return ret ? count : -EINVAL;
 }
 
+static ssize_t iwl_dbgfs_rx_phyinfo_write(struct ieee80211_vif *vif, char *buf,
+                                         size_t count, loff_t *ppos)
+{
+       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+       struct iwl_mvm *mvm = mvmvif->mvm;
+       struct ieee80211_chanctx_conf *chanctx_conf;
+       struct iwl_mvm_phy_ctxt *phy_ctxt;
+       u16 value;
+       int ret;
+
+       ret = kstrtou16(buf, 0, &value);
+       if (ret)
+               return ret;
+
+       mutex_lock(&mvm->mutex);
+       rcu_read_lock();
+
+       chanctx_conf = rcu_dereference(vif->chanctx_conf);
+       /* make sure the channel context is assigned */
+       if (!chanctx_conf) {
+               rcu_read_unlock();
+               mutex_unlock(&mvm->mutex);
+               return -EINVAL;
+       }
+
+       phy_ctxt = &mvm->phy_ctxts[*(u16 *)chanctx_conf->drv_priv];
+       rcu_read_unlock();
+
+       mvm->dbgfs_rx_phyinfo = value;
+
+       ret = iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &chanctx_conf->min_def,
+                                      chanctx_conf->rx_chains_static,
+                                      chanctx_conf->rx_chains_dynamic);
+       mutex_unlock(&mvm->mutex);
+
+       return ret ?: count;
+}
+
+static ssize_t iwl_dbgfs_rx_phyinfo_read(struct file *file,
+                                        char __user *user_buf,
+                                        size_t count, loff_t *ppos)
+{
+       struct ieee80211_vif *vif = file->private_data;
+       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+       char buf[8];
+
+       snprintf(buf, sizeof(buf), "0x%04x\n", mvmvif->mvm->dbgfs_rx_phyinfo);
+
+       return simple_read_from_buffer(user_buf, count, ppos, buf, sizeof(buf));
+}
+
 #define MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz) \
        _MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz, struct ieee80211_vif)
 #define MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz) \
@@ -560,6 +611,7 @@ MVM_DEBUGFS_READ_WRITE_FILE_OPS(pm_params, 32);
 MVM_DEBUGFS_READ_WRITE_FILE_OPS(bf_params, 256);
 MVM_DEBUGFS_READ_WRITE_FILE_OPS(low_latency, 10);
 MVM_DEBUGFS_READ_WRITE_FILE_OPS(uapsd_misbehaving, 20);
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(rx_phyinfo, 10);
 
 void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
 {
@@ -575,7 +627,6 @@ void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
                return;
 
        mvmvif->dbgfs_dir = debugfs_create_dir("iwlmvm", dbgfs_dir);
-       mvmvif->mvm = mvm;
 
        if (!mvmvif->dbgfs_dir) {
                IWL_ERR(mvm, "Failed to create debugfs directory under %s\n",
@@ -595,6 +646,8 @@ void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
                                 S_IRUSR | S_IWUSR);
        MVM_DEBUGFS_ADD_FILE_VIF(uapsd_misbehaving, mvmvif->dbgfs_dir,
                                 S_IRUSR | S_IWUSR);
+       MVM_DEBUGFS_ADD_FILE_VIF(rx_phyinfo, mvmvif->dbgfs_dir,
+                                S_IRUSR | S_IWUSR);
 
        if (vif->type == NL80211_IFTYPE_STATION && !vif->p2p &&
            mvmvif == mvm->bf_allowed_vif)
index 82c09d86af8c055d5fdda608756d094905898a8f..8c5229892e573aa58234e4e64893ddee0c89253e 100644 (file)
@@ -562,11 +562,12 @@ static ssize_t iwl_dbgfs_bt_cmd_read(struct file *file, char __user *user_buf,
                               "\tSecondary Channel Bitmap 0x%016llx\n",
                               le64_to_cpu(cmd->bt_secondary_ci));
 
-               pos += scnprintf(buf+pos, bufsz-pos, "BT Configuration CMD\n");
-               pos += scnprintf(buf+pos, bufsz-pos, "\tACK Kill Mask 0x%08x\n",
-                                iwl_bt_ctl_kill_msk[mvm->bt_ack_kill_msk[0]]);
-               pos += scnprintf(buf+pos, bufsz-pos, "\tCTS Kill Mask 0x%08x\n",
-                                iwl_bt_ctl_kill_msk[mvm->bt_cts_kill_msk[0]]);
+               pos += scnprintf(buf+pos, bufsz-pos,
+                                "BT Configuration CMD - 0=default, 1=never, 2=always\n");
+               pos += scnprintf(buf+pos, bufsz-pos, "\tACK Kill msk idx %d\n",
+                                mvm->bt_ack_kill_msk[0]);
+               pos += scnprintf(buf+pos, bufsz-pos, "\tCTS Kill msk idx %d\n",
+                                mvm->bt_cts_kill_msk[0]);
 
        } else {
                struct iwl_bt_coex_ci_cmd *cmd = &mvm->last_bt_ci_cmd;
@@ -579,21 +580,6 @@ static ssize_t iwl_dbgfs_bt_cmd_read(struct file *file, char __user *user_buf,
                pos += scnprintf(buf+pos, bufsz-pos,
                               "\tSecondary Channel Bitmap 0x%016llx\n",
                               le64_to_cpu(cmd->bt_secondary_ci));
-
-               pos += scnprintf(buf+pos, bufsz-pos, "BT Configuration CMD\n");
-               pos += scnprintf(buf+pos, bufsz-pos,
-                                "\tPrimary: ACK Kill Mask 0x%08x\n",
-                                iwl_bt_ctl_kill_msk[mvm->bt_ack_kill_msk[0]]);
-               pos += scnprintf(buf+pos, bufsz-pos,
-                                "\tPrimary: CTS Kill Mask 0x%08x\n",
-                                iwl_bt_ctl_kill_msk[mvm->bt_cts_kill_msk[0]]);
-               pos += scnprintf(buf+pos, bufsz-pos,
-                                "\tSecondary: ACK Kill Mask 0x%08x\n",
-                                iwl_bt_ctl_kill_msk[mvm->bt_ack_kill_msk[1]]);
-               pos += scnprintf(buf+pos, bufsz-pos,
-                                "\tSecondary: CTS Kill Mask 0x%08x\n",
-                                iwl_bt_ctl_kill_msk[mvm->bt_cts_kill_msk[1]]);
-
        }
 
        mutex_unlock(&mvm->mutex);
@@ -942,7 +928,7 @@ static ssize_t iwl_dbgfs_fw_dbg_conf_read(struct file *file,
                                          size_t count, loff_t *ppos)
 {
        struct iwl_mvm *mvm = file->private_data;
-       enum iwl_fw_dbg_conf conf;
+       int conf;
        char buf[8];
        const size_t bufsz = sizeof(buf);
        int pos = 0;
@@ -966,7 +952,7 @@ static ssize_t iwl_dbgfs_fw_dbg_conf_write(struct iwl_mvm *mvm,
        if (ret)
                return ret;
 
-       if (WARN_ON(conf_id >= FW_DBG_MAX))
+       if (WARN_ON(conf_id >= FW_DBG_CONF_MAX))
                return -EINVAL;
 
        mutex_lock(&mvm->mutex);
@@ -985,7 +971,7 @@ static ssize_t iwl_dbgfs_fw_dbg_collect_write(struct iwl_mvm *mvm,
        if (ret)
                return ret;
 
-       iwl_mvm_fw_dbg_collect(mvm);
+       iwl_mvm_fw_dbg_collect(mvm, FW_DBG_TRIGGER_USER, NULL, 0, 0);
 
        iwl_mvm_unref(mvm, IWL_MVM_REF_PRPH_WRITE);
 
index f3b11897991eeee2e1d88329d075e52a25a106e3..d398a6102805e9c77d1ee921e80c4572b45d2111 100644 (file)
@@ -235,36 +235,12 @@ enum iwl_bt_coex_enabled_modules {
  * struct iwl_bt_coex_cmd - bt coex configuration command
  * @mode: enum %iwl_bt_coex_mode
  * @enabled_modules: enum %iwl_bt_coex_enabled_modules
- * @max_kill: max count of Tx retries due to kill from PTA
- * @override_primary_lut: enum %iwl_bt_coex_lut_type: BT_COEX_INVALID_LUT
- *     should be set by default
- * @override_secondary_lut: enum %iwl_bt_coex_lut_type: BT_COEX_INVALID_LUT
- *     should be set by default
- * @bt4_antenna_isolation_thr: antenna threshold value
- * @bt4_tx_tx_delta_freq_thr: TxTx delta frequency
- * @bt4_tx_rx_max_freq0: TxRx max frequency
- * @multiprio_lut: multi priority LUT configuration
- * @mplut_prio_boost: BT priority boost registers
- * @decision_lut: PTA decision LUT, per Prio-Ch
  *
  * The structure is used for the BT_COEX command.
  */
 struct iwl_bt_coex_cmd {
        __le32 mode;
        __le32 enabled_modules;
-
-       __le32 max_kill;
-       __le32 override_primary_lut;
-       __le32 override_secondary_lut;
-       __le32 bt4_antenna_isolation_thr;
-
-       __le32 bt4_tx_tx_delta_freq_thr;
-       __le32 bt4_tx_rx_max_freq0;
-
-       __le32 multiprio_lut[BT_COEX_MULTI_PRIO_LUT_SIZE];
-       __le32 mplut_prio_boost[BT_COEX_BOOST_SIZE];
-
-       __le32 decision_lut[BT_COEX_MAX_LUT][BT_COEX_LUT_SIZE];
 } __packed; /* BT_COEX_CMD_API_S_VER_6 */
 
 /**
@@ -279,29 +255,6 @@ struct iwl_bt_coex_corun_lut_update_cmd {
        __le32 corun_lut40[BT_COEX_CORUN_LUT_SIZE];
 } __packed; /* BT_COEX_UPDATE_CORUN_LUT_API_S_VER_1 */
 
-/**
- * struct iwl_bt_coex_sw_boost - SW boost values
- * @wifi_tx_prio_boost: SW boost of wifi tx priority
- * @wifi_rx_prio_boost: SW boost of wifi rx priority
- * @kill_ack_msk: kill ACK mask. 1 - Tx ACK, 0 - kill Tx of ACK.
- * @kill_cts_msk: kill CTS mask. 1 - Tx CTS, 0 - kill Tx of CTS.
- */
-struct iwl_bt_coex_sw_boost {
-       __le32 wifi_tx_prio_boost;
-       __le32 wifi_rx_prio_boost;
-       __le32 kill_ack_msk;
-       __le32 kill_cts_msk;
-};
-
-/**
- * struct iwl_bt_coex_sw_boost_update_cmd - command to update the SW boost
- * @boost_values: check struct  %iwl_bt_coex_sw_boost - one for each channel
- *     primary / secondary / low priority
- */
-struct iwl_bt_coex_sw_boost_update_cmd {
-       struct iwl_bt_coex_sw_boost boost_values[3];
-} __packed; /* BT_COEX_UPDATE_SW_BOOST_S_VER_1 */
-
 /**
  * struct iwl_bt_coex_reduced_txp_update_cmd
  * @reduced_txp: bit BT_REDUCED_TX_POWER_BIT to enable / disable, rest of the
index c405cda1025fa4b745b541d65b679bca1dd01253..aabaedd3b3ee1c6e3deeadc1ec7a9a9fe6059b83 100644 (file)
@@ -70,6 +70,7 @@
 #define MAC_INDEX_AUX          4
 #define MAC_INDEX_MIN_DRIVER   0
 #define NUM_MAC_INDEX_DRIVER   MAC_INDEX_AUX
+#define NUM_MAC_INDEX          (MAC_INDEX_AUX + 1)
 
 enum iwl_ac {
        AC_BK,
index cfc0e65b34a5e14494d83af1b560d0a33493aa66..a5fbbd637070795b97fafde922aabb50d7bcaf4c 100644 (file)
 
 /* Scan Commands, Responses, Notifications */
 
-/* Masks for iwl_scan_channel.type flags */
-#define SCAN_CHANNEL_TYPE_ACTIVE       BIT(0)
-#define SCAN_CHANNEL_NARROW_BAND       BIT(22)
-
 /* Max number of IEs for direct SSID scans in a command */
 #define PROBE_OPTION_MAX               20
 
-/**
- * struct iwl_scan_channel - entry in REPLY_SCAN_CMD channel table
- * @channel: band is selected by iwl_scan_cmd "flags" field
- * @tx_gain: gain for analog radio
- * @dsp_atten: gain for DSP
- * @active_dwell: dwell time for active scan in TU, typically 5-50
- * @passive_dwell: dwell time for passive scan in TU, typically 20-500
- * @type: type is broken down to these bits:
- *     bit 0: 0 = passive, 1 = active
- *     bits 1-20: SSID direct bit map. If any of these bits is set then
- *             the corresponding SSID IE is transmitted in probe request
- *             (bit i adds IE in position i to the probe request)
- *     bit 22: channel width, 0 = regular, 1 = TGj narrow channel
- *
- * @iteration_count:
- * @iteration_interval:
- * This struct is used once for each channel in the scan list.
- * Each channel can independently select:
- * 1)  SSID for directed active scans
- * 2)  Txpower setting (for rate specified within Tx command)
- * 3)  How long to stay on-channel (behavior may be modified by quiet_time,
- *     quiet_plcp_th, good_CRC_th)
- *
- * To avoid uCode errors, make sure the following are true (see comments
- * under struct iwl_scan_cmd about max_out_time and quiet_time):
- * 1)  If using passive_dwell (i.e. passive_dwell != 0):
- *     active_dwell <= passive_dwell (< max_out_time if max_out_time != 0)
- * 2)  quiet_time <= active_dwell
- * 3)  If restricting off-channel time (i.e. max_out_time !=0):
- *     passive_dwell < max_out_time
- *     active_dwell < max_out_time
- */
-struct iwl_scan_channel {
-       __le32 type;
-       __le16 channel;
-       __le16 iteration_count;
-       __le32 iteration_interval;
-       __le16 active_dwell;
-       __le16 passive_dwell;
-} __packed; /* SCAN_CHANNEL_CONTROL_API_S_VER_1 */
-
 /**
  * struct iwl_ssid_ie - directed scan network information element
  *
@@ -132,152 +87,6 @@ struct iwl_ssid_ie {
        u8 ssid[IEEE80211_MAX_SSID_LEN];
 } __packed; /* SCAN_DIRECT_SSID_IE_API_S_VER_1 */
 
-/**
- * iwl_scan_flags - masks for scan command flags
- *@SCAN_FLAGS_PERIODIC_SCAN:
- *@SCAN_FLAGS_P2P_PUBLIC_ACTION_FRAME_TX:
- *@SCAN_FLAGS_DELAYED_SCAN_LOWBAND:
- *@SCAN_FLAGS_DELAYED_SCAN_HIGHBAND:
- *@SCAN_FLAGS_FRAGMENTED_SCAN:
- *@SCAN_FLAGS_PASSIVE2ACTIVE: use active scan on channels that was active
- *     in the past hour, even if they are marked as passive.
- */
-enum iwl_scan_flags {
-       SCAN_FLAGS_PERIODIC_SCAN                = BIT(0),
-       SCAN_FLAGS_P2P_PUBLIC_ACTION_FRAME_TX   = BIT(1),
-       SCAN_FLAGS_DELAYED_SCAN_LOWBAND         = BIT(2),
-       SCAN_FLAGS_DELAYED_SCAN_HIGHBAND        = BIT(3),
-       SCAN_FLAGS_FRAGMENTED_SCAN              = BIT(4),
-       SCAN_FLAGS_PASSIVE2ACTIVE               = BIT(5),
-};
-
-/**
- * enum iwl_scan_type - Scan types for scan command
- * @SCAN_TYPE_FORCED:
- * @SCAN_TYPE_BACKGROUND:
- * @SCAN_TYPE_OS:
- * @SCAN_TYPE_ROAMING:
- * @SCAN_TYPE_ACTION:
- * @SCAN_TYPE_DISCOVERY:
- * @SCAN_TYPE_DISCOVERY_FORCED:
- */
-enum iwl_scan_type {
-       SCAN_TYPE_FORCED                = 0,
-       SCAN_TYPE_BACKGROUND            = 1,
-       SCAN_TYPE_OS                    = 2,
-       SCAN_TYPE_ROAMING               = 3,
-       SCAN_TYPE_ACTION                = 4,
-       SCAN_TYPE_DISCOVERY             = 5,
-       SCAN_TYPE_DISCOVERY_FORCED      = 6,
-}; /* SCAN_ACTIVITY_TYPE_E_VER_1 */
-
-/**
- * struct iwl_scan_cmd - scan request command
- * ( SCAN_REQUEST_CMD = 0x80 )
- * @len: command length in bytes
- * @scan_flags: scan flags from SCAN_FLAGS_*
- * @channel_count: num of channels in channel list
- *     (1 - ucode_capa.n_scan_channels)
- * @quiet_time: in msecs, dwell this time for active scan on quiet channels
- * @quiet_plcp_th: quiet PLCP threshold (channel is quiet if less than
- *     this number of packets were received (typically 1)
- * @passive2active: is auto switching from passive to active during scan allowed
- * @rxchain_sel_flags: RXON_RX_CHAIN_*
- * @max_out_time: in TUs, max out of serving channel time
- * @suspend_time: how long to pause scan when returning to service channel:
- *     bits 0-19: beacon interal in TUs (suspend before executing)
- *     bits 20-23: reserved
- *     bits 24-31: number of beacons (suspend between channels)
- * @rxon_flags: RXON_FLG_*
- * @filter_flags: RXON_FILTER_*
- * @tx_cmd: for active scans (zero for passive), w/o payload,
- *     no RS so specify TX rate
- * @direct_scan: direct scan SSIDs
- * @type: one of SCAN_TYPE_*
- * @repeats: how many time to repeat the scan
- */
-struct iwl_scan_cmd {
-       __le16 len;
-       u8 scan_flags;
-       u8 channel_count;
-       __le16 quiet_time;
-       __le16 quiet_plcp_th;
-       __le16 passive2active;
-       __le16 rxchain_sel_flags;
-       __le32 max_out_time;
-       __le32 suspend_time;
-       /* RX_ON_FLAGS_API_S_VER_1 */
-       __le32 rxon_flags;
-       __le32 filter_flags;
-       struct iwl_tx_cmd tx_cmd;
-       struct iwl_ssid_ie direct_scan[PROBE_OPTION_MAX];
-       __le32 type;
-       __le32 repeats;
-
-       /*
-        * Probe request frame, followed by channel list.
-        *
-        * Size of probe request frame is specified by byte count in tx_cmd.
-        * Channel list follows immediately after probe request frame.
-        * Number of channels in list is specified by channel_count.
-        * Each channel in list is of type:
-        *
-        * struct iwl_scan_channel channels[0];
-        *
-        * NOTE:  Only one band of channels can be scanned per pass.  You
-        * must not mix 2.4GHz channels and 5.2GHz channels, and you must wait
-        * for one scan to complete (i.e. receive SCAN_COMPLETE_NOTIFICATION)
-        * before requesting another scan.
-        */
-       u8 data[0];
-} __packed; /* SCAN_REQUEST_FIXED_PART_API_S_VER_5 */
-
-/* Response to scan request contains only status with one of these values */
-#define SCAN_RESPONSE_OK       0x1
-#define SCAN_RESPONSE_ERROR    0x2
-
-/*
- * SCAN_ABORT_CMD = 0x81
- * When scan abort is requested, the command has no fields except the common
- * header. The response contains only a status with one of these values.
- */
-#define SCAN_ABORT_POSSIBLE    0x1
-#define SCAN_ABORT_IGNORED     0x2 /* no pending scans */
-
-/* TODO: complete documentation */
-#define  SCAN_OWNER_STATUS 0x1
-#define  MEASURE_OWNER_STATUS 0x2
-
-/**
- * struct iwl_scan_start_notif - notifies start of scan in the device
- * ( SCAN_START_NOTIFICATION = 0x82 )
- * @tsf_low: TSF timer (lower half) in usecs
- * @tsf_high: TSF timer (higher half) in usecs
- * @beacon_timer: structured as follows:
- *     bits 0:19 - beacon interval in usecs
- *     bits 20:23 - reserved (0)
- *     bits 24:31 - number of beacons
- * @channel: which channel is scanned
- * @band: 0 for 5.2 GHz, 1 for 2.4 GHz
- * @status: one of *_OWNER_STATUS
- */
-struct iwl_scan_start_notif {
-       __le32 tsf_low;
-       __le32 tsf_high;
-       __le32 beacon_timer;
-       u8 channel;
-       u8 band;
-       u8 reserved[2];
-       __le32 status;
-} __packed; /* SCAN_START_NTF_API_S_VER_1 */
-
-/* scan results probe_status first bit indicates success */
-#define SCAN_PROBE_STATUS_OK           0
-#define SCAN_PROBE_STATUS_TX_FAILED    BIT(0)
-/* error statuses combined with TX_FAILED */
-#define SCAN_PROBE_STATUS_FAIL_TTL     BIT(1)
-#define SCAN_PROBE_STATUS_FAIL_BT      BIT(2)
-
 /* How many statistics are gathered for each channel */
 #define SCAN_RESULTS_STATISTICS 1
 
index 928168b183467177a5b7b32b17133eb75f5d89c4..709e28d8b1b09634aa1e427a2544ae0ea669df59 100644 (file)
@@ -65,6 +65,7 @@
 
 #ifndef __fw_api_stats_h__
 #define __fw_api_stats_h__
+#include "fw-api-mac.h"
 
 struct mvm_statistics_dbg {
        __le32 burst_check;
@@ -218,7 +219,7 @@ struct mvm_statistics_bt_activity {
        __le32 lo_priority_rx_denied_cnt;
 } __packed;  /* STATISTICS_BT_ACTIVITY_API_S_VER_1 */
 
-struct mvm_statistics_general {
+struct mvm_statistics_general_v5 {
        __le32 radio_temperature;
        __le32 radio_voltage;
        struct mvm_statistics_dbg dbg;
@@ -244,6 +245,39 @@ struct mvm_statistics_general {
        struct mvm_statistics_bt_activity bt_activity;
 } __packed; /* STATISTICS_GENERAL_API_S_VER_5 */
 
+struct mvm_statistics_general_v8 {
+       __le32 radio_temperature;
+       __le32 radio_voltage;
+       struct mvm_statistics_dbg dbg;
+       __le32 sleep_time;
+       __le32 slots_out;
+       __le32 slots_idle;
+       __le32 ttl_timestamp;
+       struct mvm_statistics_div slow_div;
+       __le32 rx_enable_counter;
+       /*
+        * num_of_sos_states:
+        *  count the number of times we have to re-tune
+        *  in order to get out of bad PHY status
+        */
+       __le32 num_of_sos_states;
+       __le32 beacon_filtered;
+       __le32 missed_beacons;
+       __s8 beacon_filter_average_energy;
+       __s8 beacon_filter_reason;
+       __s8 beacon_filter_current_energy;
+       __s8 beacon_filter_reserved;
+       __le32 beacon_filter_delta_time;
+       struct mvm_statistics_bt_activity bt_activity;
+       __le64 rx_time;
+       __le64 on_time_rf;
+       __le64 on_time_scan;
+       __le64 tx_time;
+       __le32 beacon_counter[NUM_MAC_INDEX];
+       u8 beacon_average_energy[NUM_MAC_INDEX];
+       u8 reserved[4 - (NUM_MAC_INDEX % 4)];
+} __packed; /* STATISTICS_GENERAL_API_S_VER_8 */
+
 struct mvm_statistics_rx {
        struct mvm_statistics_rx_phy ofdm;
        struct mvm_statistics_rx_phy cck;
@@ -256,22 +290,28 @@ struct mvm_statistics_rx {
  *
  * By default, uCode issues this notification after receiving a beacon
  * while associated.  To disable this behavior, set DISABLE_NOTIF flag in the
- * REPLY_STATISTICS_CMD 0x9c, above.
- *
- * Statistics counters continue to increment beacon after beacon, but are
- * cleared when changing channels or when driver issues REPLY_STATISTICS_CMD
- * 0x9c with CLEAR_STATS bit set (see above).
- *
- * uCode also issues this notification during scans.  uCode clears statistics
- * appropriately so that each notification contains statistics for only the
- * one channel that has just been scanned.
+ * STATISTICS_CMD (0x9c), below.
  */
 
-struct iwl_notif_statistics {
+struct iwl_notif_statistics_v8 {
        __le32 flag;
        struct mvm_statistics_rx rx;
        struct mvm_statistics_tx tx;
-       struct mvm_statistics_general general;
+       struct mvm_statistics_general_v5 general;
 } __packed; /* STATISTICS_NTFY_API_S_VER_8 */
 
+struct iwl_notif_statistics_v10 {
+       __le32 flag;
+       struct mvm_statistics_rx rx;
+       struct mvm_statistics_tx tx;
+       struct mvm_statistics_general_v8 general;
+} __packed; /* STATISTICS_NTFY_API_S_VER_10 */
+
+#define IWL_STATISTICS_FLG_CLEAR               0x1
+#define IWL_STATISTICS_FLG_DISABLE_NOTIF       0x2
+
+struct iwl_statistics_cmd {
+       __le32 flags;
+} __packed; /* STATISTICS_CMD_API_S_VER_1 */
+
 #endif /* __fw_api_stats_h__ */
index b56154fe8ec59cbb1db6b65aea1867e16f8504f9..aab68cbae754d547a9e1fe514c4c88de6877777f 100644 (file)
@@ -192,6 +192,7 @@ enum {
        BEACON_NOTIFICATION = 0x90,
        BEACON_TEMPLATE_CMD = 0x91,
        TX_ANT_CONFIGURATION_CMD = 0x98,
+       STATISTICS_CMD = 0x9c,
        STATISTICS_NOTIFICATION = 0x9d,
        EOSP_NOTIFICATION = 0x9e,
        REDUCE_TX_POWER_CMD = 0x9f,
@@ -211,6 +212,10 @@ enum {
        REPLY_RX_MPDU_CMD = 0xc1,
        BA_NOTIF = 0xc5,
 
+       /* Location Aware Regulatory */
+       MCC_UPDATE_CMD = 0xc8,
+       MCC_CHUB_UPDATE_CMD = 0xc9,
+
        MARKER_CMD = 0xcb,
 
        /* BT Coex */
@@ -361,7 +366,8 @@ enum {
        NVM_SECTION_TYPE_CALIBRATION = 4,
        NVM_SECTION_TYPE_PRODUCTION = 5,
        NVM_SECTION_TYPE_MAC_OVERRIDE = 11,
-       NVM_MAX_NUM_SECTIONS = 12,
+       NVM_SECTION_TYPE_PHY_SKU = 12,
+       NVM_MAX_NUM_SECTIONS = 13,
 };
 
 /**
@@ -431,7 +437,7 @@ enum {
 
 #define IWL_ALIVE_FLG_RFKILL   BIT(0)
 
-struct mvm_alive_resp {
+struct mvm_alive_resp_ver1 {
        __le16 status;
        __le16 flags;
        u8 ucode_minor;
@@ -482,6 +488,30 @@ struct mvm_alive_resp_ver2 {
        __le32 dbg_print_buff_addr;
 } __packed; /* ALIVE_RES_API_S_VER_2 */
 
+struct mvm_alive_resp {
+       __le16 status;
+       __le16 flags;
+       __le32 ucode_minor;
+       __le32 ucode_major;
+       u8 ver_subtype;
+       u8 ver_type;
+       u8 mac;
+       u8 opt;
+       __le32 timestamp;
+       __le32 error_event_table_ptr;   /* SRAM address for error log */
+       __le32 log_event_table_ptr;     /* SRAM address for LMAC event log */
+       __le32 cpu_register_ptr;
+       __le32 dbgm_config_ptr;
+       __le32 alive_counter_ptr;
+       __le32 scd_base_ptr;            /* SRAM address for SCD */
+       __le32 st_fwrd_addr;            /* pointer to Store and forward */
+       __le32 st_fwrd_size;
+       __le32 umac_minor;              /* UMAC version: minor */
+       __le32 umac_major;              /* UMAC version: major */
+       __le32 error_info_addr;         /* SRAM address for UMAC error log */
+       __le32 dbg_print_buff_addr;
+} __packed; /* ALIVE_RES_API_S_VER_3 */
+
 /* Error response/notification */
 enum {
        FW_ERR_UNKNOWN_CMD = 0x0,
@@ -1417,7 +1447,19 @@ enum iwl_sf_scenario {
 #define SF_W_MARK_LEGACY 4096
 #define SF_W_MARK_SCAN 4096
 
-/* SF Scenarios timers for FULL_ON state (aligned to 32 uSec) */
+/* SF Scenarios timers for default configuration (aligned to 32 uSec) */
+#define SF_SINGLE_UNICAST_IDLE_TIMER_DEF 160   /* 150 uSec  */
+#define SF_SINGLE_UNICAST_AGING_TIMER_DEF 400  /* 0.4 mSec */
+#define SF_AGG_UNICAST_IDLE_TIMER_DEF 160              /* 150 uSec */
+#define SF_AGG_UNICAST_AGING_TIMER_DEF 400             /* 0.4 mSec */
+#define SF_MCAST_IDLE_TIMER_DEF 160            /* 150 mSec */
+#define SF_MCAST_AGING_TIMER_DEF 400           /* 0.4 mSec */
+#define SF_BA_IDLE_TIMER_DEF 160                       /* 150 uSec */
+#define SF_BA_AGING_TIMER_DEF 400                      /* 0.4 mSec */
+#define SF_TX_RE_IDLE_TIMER_DEF 160                    /* 150 uSec */
+#define SF_TX_RE_AGING_TIMER_DEF 400           /* 0.4 mSec */
+
+/* SF Scenarios timers for BSS MAC configuration (aligned to 32 uSec) */
 #define SF_SINGLE_UNICAST_IDLE_TIMER 320       /* 300 uSec  */
 #define SF_SINGLE_UNICAST_AGING_TIMER 2016     /* 2 mSec */
 #define SF_AGG_UNICAST_IDLE_TIMER 320          /* 300 uSec */
@@ -1448,6 +1490,92 @@ struct iwl_sf_cfg_cmd {
        __le32 full_on_timeouts[SF_NUM_SCENARIO][SF_NUM_TIMEOUT_TYPES];
 } __packed; /* SF_CFG_API_S_VER_2 */
 
+/***********************************
+ * Location Aware Regulatory (LAR) API - MCC updates
+ ***********************************/
+
+/**
+ * struct iwl_mcc_update_cmd - Request the device to update geographic
+ * regulatory profile according to the given MCC (Mobile Country Code).
+ * The MCC is two letter-code, ascii upper case[A-Z] or '00' for world domain.
+ * 'ZZ' MCC will be used to switch to NVM default profile; in this case, the
+ * MCC in the cmd response will be the relevant MCC in the NVM.
+ * @mcc: given mobile country code
+ * @source_id: the source from where we got the MCC, see iwl_mcc_source
+ * @reserved: reserved for alignment
+ */
+struct iwl_mcc_update_cmd {
+       __le16 mcc;
+       u8 source_id;
+       u8 reserved;
+} __packed; /* LAR_UPDATE_MCC_CMD_API_S */
+
+/**
+ * iwl_mcc_update_resp - response to MCC_UPDATE_CMD.
+ * Contains the new channel control profile map, if changed, and the new MCC
+ * (mobile country code).
+ * The new MCC may be different than what was requested in MCC_UPDATE_CMD.
+ * @status: see &enum iwl_mcc_update_status
+ * @mcc: the new applied MCC
+ * @cap: capabilities for all channels which matches the MCC
+ * @source_id: the MCC source, see iwl_mcc_source
+ * @n_channels: number of channels in @channels_data (may be 14, 39, 50 or 51
+ *             channels, depending on platform)
+ * @channels: channel control data map, DWORD for each channel. Only the first
+ *     16bits are used.
+ */
+struct iwl_mcc_update_resp {
+       __le32 status;
+       __le16 mcc;
+       u8 cap;
+       u8 source_id;
+       __le32 n_channels;
+       __le32 channels[0];
+} __packed; /* LAR_UPDATE_MCC_CMD_RESP_S */
+
+/**
+ * struct iwl_mcc_chub_notif - chub notifies of mcc change
+ * (MCC_CHUB_UPDATE_CMD = 0xc9)
+ * The Chub (Communication Hub, CommsHUB) is a HW component that connects to
+ * the cellular and connectivity cores that gets updates of the mcc, and
+ * notifies the ucode directly of any mcc change.
+ * The ucode requests the driver to request the device to update geographic
+ * regulatory  profile according to the given MCC (Mobile Country Code).
+ * The MCC is two letter-code, ascii upper case[A-Z] or '00' for world domain.
+ * 'ZZ' MCC will be used to switch to NVM default profile; in this case, the
+ * MCC in the cmd response will be the relevant MCC in the NVM.
+ * @mcc: given mobile country code
+ * @source_id: identity of the change originator, see iwl_mcc_source
+ * @reserved1: reserved for alignment
+ */
+struct iwl_mcc_chub_notif {
+       u16 mcc;
+       u8 source_id;
+       u8 reserved1;
+} __packed; /* LAR_MCC_NOTIFY_S */
+
+enum iwl_mcc_update_status {
+       MCC_RESP_NEW_CHAN_PROFILE,
+       MCC_RESP_SAME_CHAN_PROFILE,
+       MCC_RESP_INVALID,
+       MCC_RESP_NVM_DISABLED,
+       MCC_RESP_ILLEGAL,
+       MCC_RESP_LOW_PRIORITY,
+};
+
+enum iwl_mcc_source {
+       MCC_SOURCE_OLD_FW = 0,
+       MCC_SOURCE_ME = 1,
+       MCC_SOURCE_BIOS = 2,
+       MCC_SOURCE_3G_LTE_HOST = 3,
+       MCC_SOURCE_3G_LTE_DEVICE = 4,
+       MCC_SOURCE_WIFI = 5,
+       MCC_SOURCE_RESERVED = 6,
+       MCC_SOURCE_DEFAULT = 7,
+       MCC_SOURCE_UNINITIALIZED = 8,
+       MCC_SOURCE_GET_CURRENT = 0x10
+};
+
 /* DTS measurements */
 
 enum iwl_dts_measurement_flags {
index ca38e9817374cc6f4a462048c2d59a5244e33b1c..6cf7d9837ca54e938500302cd013a8a4a396be5d 100644 (file)
@@ -112,25 +112,27 @@ static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait,
        struct iwl_mvm *mvm =
                container_of(notif_wait, struct iwl_mvm, notif_wait);
        struct iwl_mvm_alive_data *alive_data = data;
-       struct mvm_alive_resp *palive;
+       struct mvm_alive_resp_ver1 *palive1;
        struct mvm_alive_resp_ver2 *palive2;
+       struct mvm_alive_resp *palive;
 
-       if (iwl_rx_packet_payload_len(pkt) == sizeof(*palive)) {
-               palive = (void *)pkt->data;
+       if (iwl_rx_packet_payload_len(pkt) == sizeof(*palive1)) {
+               palive1 = (void *)pkt->data;
 
                mvm->support_umac_log = false;
                mvm->error_event_table =
-                       le32_to_cpu(palive->error_event_table_ptr);
-               mvm->log_event_table = le32_to_cpu(palive->log_event_table_ptr);
-               alive_data->scd_base_addr = le32_to_cpu(palive->scd_base_ptr);
+                       le32_to_cpu(palive1->error_event_table_ptr);
+               mvm->log_event_table =
+                       le32_to_cpu(palive1->log_event_table_ptr);
+               alive_data->scd_base_addr = le32_to_cpu(palive1->scd_base_ptr);
 
-               alive_data->valid = le16_to_cpu(palive->status) ==
+               alive_data->valid = le16_to_cpu(palive1->status) ==
                                    IWL_ALIVE_STATUS_OK;
                IWL_DEBUG_FW(mvm,
                             "Alive VER1 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
-                            le16_to_cpu(palive->status), palive->ver_type,
-                            palive->ver_subtype, palive->flags);
-       } else {
+                            le16_to_cpu(palive1->status), palive1->ver_type,
+                            palive1->ver_subtype, palive1->flags);
+       } else if (iwl_rx_packet_payload_len(pkt) == sizeof(*palive2)) {
                palive2 = (void *)pkt->data;
 
                mvm->error_event_table =
@@ -156,6 +158,33 @@ static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait,
                IWL_DEBUG_FW(mvm,
                             "UMAC version: Major - 0x%x, Minor - 0x%x\n",
                             palive2->umac_major, palive2->umac_minor);
+       } else if (iwl_rx_packet_payload_len(pkt) == sizeof(*palive)) {
+               palive = (void *)pkt->data;
+
+               mvm->error_event_table =
+                       le32_to_cpu(palive->error_event_table_ptr);
+               mvm->log_event_table =
+                       le32_to_cpu(palive->log_event_table_ptr);
+               alive_data->scd_base_addr = le32_to_cpu(palive->scd_base_ptr);
+               mvm->umac_error_event_table =
+                       le32_to_cpu(palive->error_info_addr);
+               mvm->sf_space.addr = le32_to_cpu(palive->st_fwrd_addr);
+               mvm->sf_space.size = le32_to_cpu(palive->st_fwrd_size);
+
+               alive_data->valid = le16_to_cpu(palive->status) ==
+                                   IWL_ALIVE_STATUS_OK;
+               if (mvm->umac_error_event_table)
+                       mvm->support_umac_log = true;
+
+               IWL_DEBUG_FW(mvm,
+                            "Alive VER3 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
+                            le16_to_cpu(palive->status), palive->ver_type,
+                            palive->ver_subtype, palive->flags);
+
+               IWL_DEBUG_FW(mvm,
+                            "UMAC version: Major - 0x%x, Minor - 0x%x\n",
+                            le32_to_cpu(palive->umac_major),
+                            le32_to_cpu(palive->umac_minor));
        }
 
        return true;
@@ -188,8 +217,7 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
        struct iwl_sf_region st_fwrd_space;
 
        if (ucode_type == IWL_UCODE_REGULAR &&
-           iwl_fw_dbg_conf_usniffer(mvm->fw, FW_DBG_CUSTOM) &&
-           iwl_fw_dbg_conf_enabled(mvm->fw, FW_DBG_CUSTOM))
+           iwl_fw_dbg_conf_usniffer(mvm->fw, FW_DBG_START_FROM_ALIVE))
                fw = iwl_get_ucode_image(mvm, IWL_UCODE_REGULAR_USNIFFER);
        else
                fw = iwl_get_ucode_image(mvm, ucode_type);
@@ -451,20 +479,80 @@ exit:
        iwl_free_resp(&cmd);
 }
 
-void iwl_mvm_fw_dbg_collect(struct iwl_mvm *mvm)
+int iwl_mvm_fw_dbg_collect_desc(struct iwl_mvm *mvm,
+                               struct iwl_mvm_dump_desc *desc,
+                               unsigned int delay)
 {
+       if (test_and_set_bit(IWL_MVM_STATUS_DUMPING_FW_LOG, &mvm->status))
+               return -EBUSY;
+
+       if (WARN_ON(mvm->fw_dump_desc))
+               iwl_mvm_free_fw_dump_desc(mvm);
+
+       IWL_WARN(mvm, "Collecting data: trigger %d fired.\n",
+                le32_to_cpu(desc->trig_desc.type));
+
+       mvm->fw_dump_desc = desc;
+
        /* stop recording */
        if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_7000) {
                iwl_set_bits_prph(mvm->trans, MON_BUFF_SAMPLE_CTL, 0x100);
        } else {
                iwl_write_prph(mvm->trans, DBGC_IN_SAMPLE, 0);
-               iwl_write_prph(mvm->trans, DBGC_OUT_CTRL, 0);
+               /* wait before we collect the data till the DBGC stop */
+               udelay(100);
        }
 
-       schedule_work(&mvm->fw_error_dump_wk);
+       queue_delayed_work(system_wq, &mvm->fw_dump_wk, delay);
+
+       return 0;
+}
+
+int iwl_mvm_fw_dbg_collect(struct iwl_mvm *mvm, enum iwl_fw_dbg_trigger trig,
+                          const char *str, size_t len, unsigned int delay)
+{
+       struct iwl_mvm_dump_desc *desc;
+
+       desc = kzalloc(sizeof(*desc) + len, GFP_ATOMIC);
+       if (!desc)
+               return -ENOMEM;
+
+       desc->len = len;
+       desc->trig_desc.type = cpu_to_le32(trig);
+       memcpy(desc->trig_desc.data, str, len);
+
+       return iwl_mvm_fw_dbg_collect_desc(mvm, desc, delay);
+}
+
+int iwl_mvm_fw_dbg_collect_trig(struct iwl_mvm *mvm,
+                               struct iwl_fw_dbg_trigger_tlv *trigger,
+                               const char *str, size_t len)
+{
+       unsigned int delay = msecs_to_jiffies(le32_to_cpu(trigger->stop_delay));
+       u16 occurrences = le16_to_cpu(trigger->occurrences);
+       int ret;
+
+       if (!occurrences)
+               return 0;
+
+       ret = iwl_mvm_fw_dbg_collect(mvm, le32_to_cpu(trigger->id), str,
+                                    len, delay);
+       if (ret)
+               return ret;
+
+       trigger->occurrences = cpu_to_le16(occurrences - 1);
+       return 0;
+}
+
+static inline void iwl_mvm_restart_early_start(struct iwl_mvm *mvm)
+{
+       if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_7000)
+               iwl_clear_bits_prph(mvm->trans, MON_BUFF_SAMPLE_CTL, 0x100);
+       else
+               iwl_write_prph(mvm->trans, DBGC_IN_SAMPLE, 1);
 }
 
-int iwl_mvm_start_fw_dbg_conf(struct iwl_mvm *mvm, enum iwl_fw_dbg_conf conf_id)
+int iwl_mvm_start_fw_dbg_conf(struct iwl_mvm *mvm, u8 conf_id)
 {
        u8 *ptr;
        int ret;
@@ -474,6 +562,14 @@ int iwl_mvm_start_fw_dbg_conf(struct iwl_mvm *mvm, enum iwl_fw_dbg_conf conf_id)
                      "Invalid configuration %d\n", conf_id))
                return -EINVAL;
 
+       /* EARLY START - firmware's configuration is hard coded */
+       if ((!mvm->fw->dbg_conf_tlv[conf_id] ||
+            !mvm->fw->dbg_conf_tlv[conf_id]->num_of_hcmds) &&
+           conf_id == FW_DBG_START_FROM_ALIVE) {
+               iwl_mvm_restart_early_start(mvm);
+               return 0;
+       }
+
        if (!mvm->fw->dbg_conf_tlv[conf_id])
                return -EINVAL;
 
@@ -583,7 +679,10 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
                IWL_ERR(mvm, "Failed to initialize Smart Fifo\n");
 
        mvm->fw_dbg_conf = FW_DBG_INVALID;
-       iwl_mvm_start_fw_dbg_conf(mvm, FW_DBG_CUSTOM);
+       /* if we have a destination, assume EARLY START */
+       if (mvm->fw->dbg_dest_tlv)
+               mvm->fw_dbg_conf = FW_DBG_START_FROM_ALIVE;
+       iwl_mvm_start_fw_dbg_conf(mvm, FW_DBG_START_FROM_ALIVE);
 
        ret = iwl_send_tx_ant_cfg(mvm, iwl_mvm_get_valid_tx_ant(mvm));
        if (ret)
@@ -640,6 +739,16 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
        if (ret)
                goto error;
 
+       /*
+        * RTNL is not taken during Ct-kill, but we don't need to scan/Tx
+        * anyway, so don't init MCC.
+        */
+       if (!test_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status)) {
+               ret = iwl_mvm_init_mcc(mvm);
+               if (ret)
+                       goto error;
+       }
+
        if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN) {
                ret = iwl_mvm_config_scan(mvm);
                if (ret)
index 7bdc6220743f405c3eb1e8de88bfa6c725bf0891..581b3b8f29f9b6d7460b98eeb9ee54e3b9612c35 100644 (file)
@@ -244,6 +244,7 @@ static void iwl_mvm_mac_sta_hw_queues_iter(void *_data,
 unsigned long iwl_mvm_get_used_hw_queues(struct iwl_mvm *mvm,
                                         struct ieee80211_vif *exclude_vif)
 {
+       u8 sta_id;
        struct iwl_mvm_hw_queues_iface_iterator_data data = {
                .exclude_vif = exclude_vif,
                .used_hw_queues =
@@ -264,6 +265,13 @@ unsigned long iwl_mvm_get_used_hw_queues(struct iwl_mvm *mvm,
                                          iwl_mvm_mac_sta_hw_queues_iter,
                                          &data);
 
+       /*
+        * Some TDLS stations may be removed but are in the process of being
+        * drained. Don't touch their queues.
+        */
+       for_each_set_bit(sta_id, mvm->sta_drained, IWL_MVM_STATION_COUNT)
+               data.used_hw_queues |= mvm->tfd_drained[sta_id];
+
        return data.used_hw_queues;
 }
 
@@ -1367,10 +1375,18 @@ static void iwl_mvm_beacon_loss_iterator(void *_data, u8 *mac,
 {
        struct iwl_missed_beacons_notif *missed_beacons = _data;
        struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+       struct iwl_mvm *mvm = mvmvif->mvm;
+       struct iwl_fw_dbg_trigger_missed_bcon *bcon_trig;
+       struct iwl_fw_dbg_trigger_tlv *trigger;
+       u32 stop_trig_missed_bcon, stop_trig_missed_bcon_since_rx;
+       u32 rx_missed_bcon, rx_missed_bcon_since_rx;
 
        if (mvmvif->id != (u16)le32_to_cpu(missed_beacons->mac_id))
                return;
 
+       rx_missed_bcon = le32_to_cpu(missed_beacons->consec_missed_beacons);
+       rx_missed_bcon_since_rx =
+               le32_to_cpu(missed_beacons->consec_missed_beacons_since_last_rx);
        /*
         * TODO: the threshold should be adjusted based on latency conditions,
         * and/or in case of a CS flow on one of the other AP vifs.
@@ -1378,6 +1394,26 @@ static void iwl_mvm_beacon_loss_iterator(void *_data, u8 *mac,
        if (le32_to_cpu(missed_beacons->consec_missed_beacons_since_last_rx) >
             IWL_MVM_MISSED_BEACONS_THRESHOLD)
                ieee80211_beacon_loss(vif);
+
+       if (!iwl_fw_dbg_trigger_enabled(mvm->fw,
+                                       FW_DBG_TRIGGER_MISSED_BEACONS))
+               return;
+
+       trigger = iwl_fw_dbg_get_trigger(mvm->fw,
+                                        FW_DBG_TRIGGER_MISSED_BEACONS);
+       bcon_trig = (void *)trigger->data;
+       stop_trig_missed_bcon = le32_to_cpu(bcon_trig->stop_consec_missed_bcon);
+       stop_trig_missed_bcon_since_rx =
+               le32_to_cpu(bcon_trig->stop_consec_missed_bcon_since_rx);
+
+       /* TODO: implement start trigger */
+
+       if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trigger))
+               return;
+
+       if (rx_missed_bcon_since_rx >= stop_trig_missed_bcon_since_rx ||
+           rx_missed_bcon >= stop_trig_missed_bcon)
+               iwl_mvm_fw_dbg_collect_trig(mvm, trigger, NULL, 0);
 }
 
 int iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm,
index 09654e73a533f7733c2a5b1e47bf6251d6267585..302c8cc50f25a695c40ce59555f8a05bf58cae2d 100644 (file)
@@ -86,6 +86,7 @@
 #include "iwl-fw-error-dump.h"
 #include "iwl-prph.h"
 #include "iwl-csr.h"
+#include "iwl-nvm-parse.h"
 
 static const struct ieee80211_iface_limit iwl_mvm_limits[] = {
        {
@@ -301,6 +302,109 @@ static void iwl_mvm_reset_phy_ctxts(struct iwl_mvm *mvm)
        }
 }
 
+struct ieee80211_regdomain *iwl_mvm_get_regdomain(struct wiphy *wiphy,
+                                                 const char *alpha2,
+                                                 enum iwl_mcc_source src_id,
+                                                 bool *changed)
+{
+       struct ieee80211_regdomain *regd = NULL;
+       struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
+       struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+       struct iwl_mcc_update_resp *resp;
+
+       IWL_DEBUG_LAR(mvm, "Getting regdomain data for %s from FW\n", alpha2);
+
+       lockdep_assert_held(&mvm->mutex);
+
+       resp = iwl_mvm_update_mcc(mvm, alpha2, src_id);
+       if (IS_ERR_OR_NULL(resp)) {
+               IWL_DEBUG_LAR(mvm, "Could not get update from FW %d\n",
+                             PTR_RET(resp));
+               goto out;
+       }
+
+       if (changed)
+               *changed = (resp->status == MCC_RESP_NEW_CHAN_PROFILE);
+
+       regd = iwl_parse_nvm_mcc_info(mvm->trans->dev, mvm->cfg,
+                                     __le32_to_cpu(resp->n_channels),
+                                     resp->channels,
+                                     __le16_to_cpu(resp->mcc));
+       /* Store the return source id */
+       src_id = resp->source_id;
+       kfree(resp);
+       if (IS_ERR_OR_NULL(regd)) {
+               IWL_DEBUG_LAR(mvm, "Could not get parse update from FW %d\n",
+                             PTR_RET(regd));
+               goto out;
+       }
+
+       IWL_DEBUG_LAR(mvm, "setting alpha2 from FW to %s (0x%x, 0x%x) src=%d\n",
+                     regd->alpha2, regd->alpha2[0], regd->alpha2[1], src_id);
+       mvm->lar_regdom_set = true;
+       mvm->mcc_src = src_id;
+
+out:
+       return regd;
+}
+
+void iwl_mvm_update_changed_regdom(struct iwl_mvm *mvm)
+{
+       bool changed;
+       struct ieee80211_regdomain *regd;
+
+       if (!iwl_mvm_is_lar_supported(mvm))
+               return;
+
+       regd = iwl_mvm_get_current_regdomain(mvm, &changed);
+       if (!IS_ERR_OR_NULL(regd)) {
+               /* only update the regulatory core if changed */
+               if (changed)
+                       regulatory_set_wiphy_regd(mvm->hw->wiphy, regd);
+
+               kfree(regd);
+       }
+}
+
+struct ieee80211_regdomain *iwl_mvm_get_current_regdomain(struct iwl_mvm *mvm,
+                                                         bool *changed)
+{
+       return iwl_mvm_get_regdomain(mvm->hw->wiphy, "ZZ",
+                                    iwl_mvm_is_wifi_mcc_supported(mvm) ?
+                                    MCC_SOURCE_GET_CURRENT :
+                                    MCC_SOURCE_OLD_FW, changed);
+}
+
+int iwl_mvm_init_fw_regd(struct iwl_mvm *mvm)
+{
+       enum iwl_mcc_source used_src;
+       struct ieee80211_regdomain *regd;
+       const struct ieee80211_regdomain *r =
+                       rtnl_dereference(mvm->hw->wiphy->regd);
+
+       if (!r)
+               return 0;
+
+       /* save the last source in case we overwrite it below */
+       used_src = mvm->mcc_src;
+       if (iwl_mvm_is_wifi_mcc_supported(mvm)) {
+               /* Notify the firmware we support wifi location updates */
+               regd = iwl_mvm_get_current_regdomain(mvm, NULL);
+               if (!IS_ERR_OR_NULL(regd))
+                       kfree(regd);
+       }
+
+       /* Now set our last stored MCC and source */
+       regd = iwl_mvm_get_regdomain(mvm->hw->wiphy, r->alpha2, used_src, NULL);
+       if (IS_ERR_OR_NULL(regd))
+               return -EIO;
+
+       regulatory_set_wiphy_regd(mvm->hw->wiphy, regd);
+       kfree(regd);
+
+       return 0;
+}
+
 int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
 {
        struct ieee80211_hw *hw = mvm->hw;
@@ -339,13 +443,10 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
            !iwlwifi_mod_params.sw_crypto)
                hw->flags |= IEEE80211_HW_MFP_CAPABLE;
 
-       if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN ||
-           mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN) {
-               hw->flags |= IEEE80211_SINGLE_HW_SCAN_ON_ALL_BANDS;
-               hw->wiphy->features |=
-                       NL80211_FEATURE_SCHED_SCAN_RANDOM_MAC_ADDR |
-                       NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR;
-       }
+       hw->flags |= IEEE80211_SINGLE_HW_SCAN_ON_ALL_BANDS;
+       hw->wiphy->features |=
+               NL80211_FEATURE_SCHED_SCAN_RANDOM_MAC_ADDR |
+               NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR;
 
        hw->sta_data_size = sizeof(struct iwl_mvm_sta);
        hw->vif_data_size = sizeof(struct iwl_mvm_vif);
@@ -359,8 +460,12 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
                BIT(NL80211_IFTYPE_ADHOC);
 
        hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
-       hw->wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG |
-                                      REGULATORY_DISABLE_BEACON_HINTS;
+       hw->wiphy->regulatory_flags |= REGULATORY_ENABLE_RELAX_NO_IR;
+       if (iwl_mvm_is_lar_supported(mvm))
+               hw->wiphy->regulatory_flags |= REGULATORY_WIPHY_SELF_MANAGED;
+       else
+               hw->wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG |
+                                              REGULATORY_DISABLE_BEACON_HINTS;
 
        if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_GO_UAPSD)
                hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD;
@@ -892,12 +997,23 @@ static void iwl_mvm_dump_fifos(struct iwl_mvm *mvm,
        iwl_trans_release_nic_access(mvm->trans, &flags);
 }
 
+void iwl_mvm_free_fw_dump_desc(struct iwl_mvm *mvm)
+{
+       if (mvm->fw_dump_desc == &iwl_mvm_dump_desc_assert ||
+           !mvm->fw_dump_desc)
+               return;
+
+       kfree(mvm->fw_dump_desc);
+       mvm->fw_dump_desc = NULL;
+}
+
 void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
 {
        struct iwl_fw_error_dump_file *dump_file;
        struct iwl_fw_error_dump_data *dump_data;
        struct iwl_fw_error_dump_info *dump_info;
        struct iwl_fw_error_dump_mem *dump_mem;
+       struct iwl_fw_error_dump_trigger_desc *dump_trig;
        struct iwl_mvm_dump_ptrs *fw_error_dump;
        u32 sram_len, sram_ofs;
        u32 file_len, fifo_data_len = 0;
@@ -967,6 +1083,10 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
                   fifo_data_len +
                   sizeof(*dump_info);
 
+       if (mvm->fw_dump_desc)
+               file_len += sizeof(*dump_data) + sizeof(*dump_trig) +
+                           mvm->fw_dump_desc->len;
+
        /* Make room for the SMEM, if it exists */
        if (smem_len)
                file_len += sizeof(*dump_data) + sizeof(*dump_mem) + smem_len;
@@ -978,6 +1098,7 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
        dump_file = vzalloc(file_len);
        if (!dump_file) {
                kfree(fw_error_dump);
+               iwl_mvm_free_fw_dump_desc(mvm);
                return;
        }
 
@@ -1006,6 +1127,19 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
        if (test_bit(STATUS_FW_ERROR, &mvm->trans->status))
                iwl_mvm_dump_fifos(mvm, &dump_data);
 
+       if (mvm->fw_dump_desc) {
+               dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_ERROR_INFO);
+               dump_data->len = cpu_to_le32(sizeof(*dump_trig) +
+                                            mvm->fw_dump_desc->len);
+               dump_trig = (void *)dump_data->data;
+               memcpy(dump_trig, &mvm->fw_dump_desc->trig_desc,
+                      sizeof(*dump_trig) + mvm->fw_dump_desc->len);
+
+               /* now we can free this copy */
+               iwl_mvm_free_fw_dump_desc(mvm);
+               dump_data = iwl_fw_error_next_data(dump_data);
+       }
+
        dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
        dump_data->len = cpu_to_le32(sram_len + sizeof(*dump_mem));
        dump_mem = (void *)dump_data->data;
@@ -1044,16 +1178,26 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
 
        dev_coredumpm(mvm->trans->dev, THIS_MODULE, fw_error_dump, 0,
                      GFP_KERNEL, iwl_mvm_read_coredump, iwl_mvm_free_coredump);
+
+       clear_bit(IWL_MVM_STATUS_DUMPING_FW_LOG, &mvm->status);
 }
 
+struct iwl_mvm_dump_desc iwl_mvm_dump_desc_assert = {
+       .trig_desc = {
+               .type = cpu_to_le32(FW_DBG_TRIGGER_FW_ASSERT),
+       },
+};
+
 static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm)
 {
        /* clear the D3 reconfig, we only need it to avoid dumping a
         * firmware coredump on reconfiguration, we shouldn't do that
         * on D3->D0 transition
         */
-       if (!test_and_clear_bit(IWL_MVM_STATUS_D3_RECONFIG, &mvm->status))
+       if (!test_and_clear_bit(IWL_MVM_STATUS_D3_RECONFIG, &mvm->status)) {
+               mvm->fw_dump_desc = &iwl_mvm_dump_desc_assert;
                iwl_mvm_fw_error_dump(mvm);
+       }
 
        /* cleanup all stale references (scan, roc), but keep the
         * ucode_down ref until reconfig is complete
@@ -1094,6 +1238,10 @@ static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm)
 
        mvm->vif_count = 0;
        mvm->rx_ba_sessions = 0;
+       mvm->fw_dbg_conf = FW_DBG_INVALID;
+
+       /* keep statistics ticking */
+       iwl_mvm_accu_radio_stats(mvm);
 }
 
 int __iwl_mvm_mac_start(struct iwl_mvm *mvm)
@@ -1153,7 +1301,7 @@ static void iwl_mvm_restart_complete(struct iwl_mvm *mvm)
 
        clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
        iwl_mvm_d0i3_enable_tx(mvm, NULL);
-       ret = iwl_mvm_update_quotas(mvm, NULL);
+       ret = iwl_mvm_update_quotas(mvm, false, NULL);
        if (ret)
                IWL_ERR(mvm, "Failed to update quotas after restart (%d)\n",
                        ret);
@@ -1216,6 +1364,11 @@ void __iwl_mvm_mac_stop(struct iwl_mvm *mvm)
 {
        lockdep_assert_held(&mvm->mutex);
 
+       /* firmware counters are obviously reset now, but we shouldn't
+        * partially track so also clear the fw_reset_accu counters.
+        */
+       memset(&mvm->accu_radio_stats, 0, sizeof(mvm->accu_radio_stats));
+
        /*
         * Disallow low power states when the FW is down by taking
         * the UCODE_DOWN ref. in case of ongoing hw restart the
@@ -1255,7 +1408,8 @@ static void iwl_mvm_mac_stop(struct ieee80211_hw *hw)
 
        flush_work(&mvm->d0i3_exit_work);
        flush_work(&mvm->async_handlers_wk);
-       flush_work(&mvm->fw_error_dump_wk);
+       cancel_delayed_work_sync(&mvm->fw_dump_wk);
+       iwl_mvm_free_fw_dump_desc(mvm);
 
        mutex_lock(&mvm->mutex);
        __iwl_mvm_mac_stop(mvm);
@@ -1303,6 +1457,8 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
        struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
        int ret;
 
+       mvmvif->mvm = mvm;
+
        /*
         * make sure D0i3 exit is completed, otherwise a target access
         * during tx queue configuration could be done when still in
@@ -1320,6 +1476,11 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
 
        mutex_lock(&mvm->mutex);
 
+       /* make sure that beacon statistics don't go backwards with FW reset */
+       if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
+               mvmvif->beacon_stats.accu_num_beacons +=
+                       mvmvif->beacon_stats.num_beacons;
+
        /* Allocate resources for the MAC context, and add it to the fw  */
        ret = iwl_mvm_mac_ctxt_init(mvm, vif);
        if (ret)
@@ -1813,8 +1974,13 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
 
        if (changes & BSS_CHANGED_ASSOC) {
                if (bss_conf->assoc) {
+                       /* clear statistics to get clean beacon counter */
+                       iwl_mvm_request_statistics(mvm, true);
+                       memset(&mvmvif->beacon_stats, 0,
+                              sizeof(mvmvif->beacon_stats));
+
                        /* add quota for this interface */
-                       ret = iwl_mvm_update_quotas(mvm, NULL);
+                       ret = iwl_mvm_update_quotas(mvm, true, NULL);
                        if (ret) {
                                IWL_ERR(mvm, "failed to update quotas\n");
                                return;
@@ -1866,7 +2032,7 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
                                mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT;
                        mvmvif->ap_sta_id = IWL_MVM_STATION_COUNT;
                        /* remove quota for this interface */
-                       ret = iwl_mvm_update_quotas(mvm, NULL);
+                       ret = iwl_mvm_update_quotas(mvm, false, NULL);
                        if (ret)
                                IWL_ERR(mvm, "failed to update quotas\n");
 
@@ -1985,7 +2151,7 @@ static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw,
        /* power updated needs to be done before quotas */
        iwl_mvm_power_update_mac(mvm);
 
-       ret = iwl_mvm_update_quotas(mvm, NULL);
+       ret = iwl_mvm_update_quotas(mvm, false, NULL);
        if (ret)
                goto out_quota_failed;
 
@@ -2051,7 +2217,7 @@ static void iwl_mvm_stop_ap_ibss(struct ieee80211_hw *hw,
        if (vif->p2p && mvm->p2p_device_vif)
                iwl_mvm_mac_ctxt_changed(mvm, mvm->p2p_device_vif, false, NULL);
 
-       iwl_mvm_update_quotas(mvm, NULL);
+       iwl_mvm_update_quotas(mvm, false, NULL);
        iwl_mvm_send_rm_bcast_sta(mvm, vif);
        iwl_mvm_binding_remove_vif(mvm, vif);
 
@@ -2190,6 +2356,12 @@ static int iwl_mvm_mac_hw_scan(struct ieee80211_hw *hw,
 
        mutex_lock(&mvm->mutex);
 
+       if (iwl_mvm_is_lar_supported(mvm) && !mvm->lar_regdom_set) {
+               IWL_ERR(mvm, "scan while LAR regdomain is not set\n");
+               ret = -EBUSY;
+               goto out;
+       }
+
        if (mvm->scan_status != IWL_MVM_SCAN_NONE) {
                ret = -EBUSY;
                goto out;
@@ -2199,10 +2371,8 @@ static int iwl_mvm_mac_hw_scan(struct ieee80211_hw *hw,
 
        if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN)
                ret = iwl_mvm_scan_umac(mvm, vif, hw_req);
-       else if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN)
-               ret = iwl_mvm_unified_scan_lmac(mvm, vif, hw_req);
        else
-               ret = iwl_mvm_scan_request(mvm, vif, req);
+               ret = iwl_mvm_unified_scan_lmac(mvm, vif, hw_req);
 
        if (ret)
                iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
@@ -2272,25 +2442,35 @@ static void iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw,
 {
        struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
        struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+       unsigned long txqs = 0, tids = 0;
        int tid;
 
+       spin_lock_bh(&mvmsta->lock);
+       for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) {
+               struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
+
+               if (tid_data->state != IWL_AGG_ON &&
+                   tid_data->state != IWL_EMPTYING_HW_QUEUE_DELBA)
+                       continue;
+
+               __set_bit(tid_data->txq_id, &txqs);
+
+               if (iwl_mvm_tid_queued(tid_data) == 0)
+                       continue;
+
+               __set_bit(tid, &tids);
+       }
+
        switch (cmd) {
        case STA_NOTIFY_SLEEP:
                if (atomic_read(&mvm->pending_frames[mvmsta->sta_id]) > 0)
                        ieee80211_sta_block_awake(hw, sta, true);
-               spin_lock_bh(&mvmsta->lock);
-               for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) {
-                       struct iwl_mvm_tid_data *tid_data;
 
-                       tid_data = &mvmsta->tid_data[tid];
-                       if (tid_data->state != IWL_AGG_ON &&
-                           tid_data->state != IWL_EMPTYING_HW_QUEUE_DELBA)
-                               continue;
-                       if (iwl_mvm_tid_queued(tid_data) == 0)
-                               continue;
+               for_each_set_bit(tid, &tids, IWL_MAX_TID_COUNT)
                        ieee80211_sta_set_buffered(sta, tid, true);
-               }
-               spin_unlock_bh(&mvmsta->lock);
+
+               if (txqs)
+                       iwl_trans_freeze_txq_timer(mvm->trans, txqs, true);
                /*
                 * The fw updates the STA to be asleep. Tx packets on the Tx
                 * queues to this station will not be transmitted. The fw will
@@ -2300,11 +2480,15 @@ static void iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw,
        case STA_NOTIFY_AWAKE:
                if (WARN_ON(mvmsta->sta_id == IWL_MVM_STATION_COUNT))
                        break;
+
+               if (txqs)
+                       iwl_trans_freeze_txq_timer(mvm->trans, txqs, false);
                iwl_mvm_sta_modify_ps_wake(mvm, sta);
                break;
        default:
                break;
        }
+       spin_unlock_bh(&mvmsta->lock);
 }
 
 static void iwl_mvm_sta_pre_rcu_remove(struct ieee80211_hw *hw,
@@ -2542,13 +2726,13 @@ static int iwl_mvm_mac_sched_scan_start(struct ieee80211_hw *hw,
 
        mutex_lock(&mvm->mutex);
 
-       /* Newest FW fixes sched scan while connected on another interface */
-       if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN) {
-               if (!vif->bss_conf.idle) {
-                       ret = -EBUSY;
-                       goto out;
-               }
-       } else if (!iwl_mvm_is_idle(mvm)) {
+       if (iwl_mvm_is_lar_supported(mvm) && !mvm->lar_regdom_set) {
+               IWL_ERR(mvm, "sched-scan while LAR regdomain is not set\n");
+               ret = -EBUSY;
+               goto out;
+       }
+
+       if (!vif->bss_conf.idle) {
                ret = -EBUSY;
                goto out;
        }
@@ -3109,14 +3293,14 @@ static int __iwl_mvm_assign_vif_chanctx(struct iwl_mvm *mvm,
         */
        if (vif->type == NL80211_IFTYPE_MONITOR) {
                mvmvif->monitor_active = true;
-               ret = iwl_mvm_update_quotas(mvm, NULL);
+               ret = iwl_mvm_update_quotas(mvm, false, NULL);
                if (ret)
                        goto out_remove_binding;
        }
 
        /* Handle binding during CSA */
        if (vif->type == NL80211_IFTYPE_AP) {
-               iwl_mvm_update_quotas(mvm, NULL);
+               iwl_mvm_update_quotas(mvm, false, NULL);
                iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
        }
 
@@ -3140,7 +3324,7 @@ static int __iwl_mvm_assign_vif_chanctx(struct iwl_mvm *mvm,
 
                iwl_mvm_unref(mvm, IWL_MVM_REF_PROTECT_CSA);
 
-               iwl_mvm_update_quotas(mvm, NULL);
+               iwl_mvm_update_quotas(mvm, false, NULL);
        }
 
        goto out;
@@ -3213,7 +3397,7 @@ static void __iwl_mvm_unassign_vif_chanctx(struct iwl_mvm *mvm,
                break;
        }
 
-       iwl_mvm_update_quotas(mvm, disabled_vif);
+       iwl_mvm_update_quotas(mvm, false, disabled_vif);
        iwl_mvm_binding_remove_vif(mvm, vif);
 
 out:
@@ -3405,7 +3589,7 @@ static int __iwl_mvm_mac_testmode_cmd(struct iwl_mvm *mvm,
                mvm->noa_duration = noa_duration;
                mvm->noa_vif = vif;
 
-               return iwl_mvm_update_quotas(mvm, NULL);
+               return iwl_mvm_update_quotas(mvm, false, NULL);
        case IWL_MVM_TM_CMD_SET_BEACON_FILTER:
                /* must be associated client vif - ignore authorized */
                if (!vif || vif->type != NL80211_IFTYPE_STATION ||
@@ -3465,6 +3649,9 @@ static int iwl_mvm_pre_channel_switch(struct ieee80211_hw *hw,
        IWL_DEBUG_MAC80211(mvm, "pre CSA to freq %d\n",
                           chsw->chandef.center_freq1);
 
+       iwl_fw_dbg_trigger_simple_stop(mvm, vif, FW_DBG_TRIGGER_CHANNEL_SWITCH,
+                                      NULL, 0);
+
        switch (vif->type) {
        case NL80211_IFTYPE_AP:
                csa_vif =
@@ -3613,6 +3800,95 @@ static void iwl_mvm_mac_flush(struct ieee80211_hw *hw,
        }
 }
 
+static int iwl_mvm_mac_get_survey(struct ieee80211_hw *hw, int idx,
+                                 struct survey_info *survey)
+{
+       struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+       int ret;
+
+       memset(survey, 0, sizeof(*survey));
+
+       /* only support global statistics right now */
+       if (idx != 0)
+               return -ENOENT;
+
+       if (!(mvm->fw->ucode_capa.capa[0] &
+                       IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS))
+               return -ENOENT;
+
+       mutex_lock(&mvm->mutex);
+
+       if (mvm->ucode_loaded) {
+               ret = iwl_mvm_request_statistics(mvm, false);
+               if (ret)
+                       goto out;
+       }
+
+       survey->filled = SURVEY_INFO_TIME |
+                        SURVEY_INFO_TIME_RX |
+                        SURVEY_INFO_TIME_TX |
+                        SURVEY_INFO_TIME_SCAN;
+       survey->time = mvm->accu_radio_stats.on_time_rf +
+                      mvm->radio_stats.on_time_rf;
+       do_div(survey->time, USEC_PER_MSEC);
+
+       survey->time_rx = mvm->accu_radio_stats.rx_time +
+                         mvm->radio_stats.rx_time;
+       do_div(survey->time_rx, USEC_PER_MSEC);
+
+       survey->time_tx = mvm->accu_radio_stats.tx_time +
+                         mvm->radio_stats.tx_time;
+       do_div(survey->time_tx, USEC_PER_MSEC);
+
+       survey->time_scan = mvm->accu_radio_stats.on_time_scan +
+                           mvm->radio_stats.on_time_scan;
+       do_div(survey->time_scan, USEC_PER_MSEC);
+
+ out:
+       mutex_unlock(&mvm->mutex);
+       return ret;
+}
+
+static void iwl_mvm_mac_sta_statistics(struct ieee80211_hw *hw,
+                                      struct ieee80211_vif *vif,
+                                      struct ieee80211_sta *sta,
+                                      struct station_info *sinfo)
+{
+       struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+       struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+
+       if (!(mvm->fw->ucode_capa.capa[0] &
+                               IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS))
+               return;
+
+       /* if beacon filtering isn't on mac80211 does it anyway */
+       if (!(vif->driver_flags & IEEE80211_VIF_BEACON_FILTER))
+               return;
+
+       if (!vif->bss_conf.assoc)
+               return;
+
+       mutex_lock(&mvm->mutex);
+
+       if (mvmvif->ap_sta_id != mvmsta->sta_id)
+               goto unlock;
+
+       if (iwl_mvm_request_statistics(mvm, false))
+               goto unlock;
+
+       sinfo->rx_beacon = mvmvif->beacon_stats.num_beacons +
+                          mvmvif->beacon_stats.accu_num_beacons;
+       sinfo->filled |= BIT(NL80211_STA_INFO_BEACON_RX);
+       if (mvmvif->beacon_stats.avg_signal) {
+               /* firmware only reports a value after RXing a few beacons */
+               sinfo->rx_beacon_signal_avg = mvmvif->beacon_stats.avg_signal;
+               sinfo->filled |= BIT(NL80211_STA_INFO_BEACON_SIGNAL_AVG);
+       }
+ unlock:
+       mutex_unlock(&mvm->mutex);
+}
+
 const struct ieee80211_ops iwl_mvm_hw_ops = {
        .tx = iwl_mvm_mac_tx,
        .ampdu_action = iwl_mvm_mac_ampdu_action,
@@ -3679,4 +3955,6 @@ const struct ieee80211_ops iwl_mvm_hw_ops = {
 #endif
        .set_default_unicast_key = iwl_mvm_set_default_unicast_key,
 #endif
+       .get_survey = iwl_mvm_mac_get_survey,
+       .sta_statistics = iwl_mvm_mac_sta_statistics,
 };
index 6c69d0584f6c880b917337d3ae011ebbac94a9e2..4b5c8f66df8baa865c7f6d0f8f3f885c9193ceef 100644 (file)
@@ -75,6 +75,7 @@
 #include "iwl-trans.h"
 #include "iwl-notif-wait.h"
 #include "iwl-eeprom-parse.h"
+#include "iwl-fw-file.h"
 #include "sta.h"
 #include "fw-api.h"
 #include "constants.h"
@@ -145,6 +146,19 @@ struct iwl_mvm_dump_ptrs {
        u32 op_mode_len;
 };
 
+/**
+ * struct iwl_mvm_dump_desc - describes the dump
+ * @len: length of trig_desc->data
+ * @trig_desc: the description of the dump
+ */
+struct iwl_mvm_dump_desc {
+       size_t len;
+       /* must be last */
+       struct iwl_fw_error_dump_trigger_desc trig_desc;
+};
+
+extern struct iwl_mvm_dump_desc iwl_mvm_dump_desc_assert;
+
 struct iwl_mvm_phy_ctxt {
        u16 id;
        u16 color;
@@ -337,8 +351,12 @@ struct iwl_mvm_vif_bf_data {
  * @beacon_skb: the skb used to hold the AP/GO beacon template
  * @smps_requests: the SMPS requests of differents parts of the driver,
  *     combined on update to yield the overall request to mac80211.
+ * @beacon_stats: beacon statistics, containing the # of received beacons,
+ *     # of received beacons accumulated over FW restart, and the current
+ *     average signal of beacons retrieved from the firmware
  */
 struct iwl_mvm_vif {
+       struct iwl_mvm *mvm;
        u16 id;
        u16 color;
        u8 ap_sta_id;
@@ -354,6 +372,11 @@ struct iwl_mvm_vif {
        bool ps_disabled;
        struct iwl_mvm_vif_bf_data bf_data;
 
+       struct {
+               u32 num_beacons, accu_num_beacons;
+               u8 avg_signal;
+       } beacon_stats;
+
        u32 ap_beacon_time;
 
        enum iwl_tsf_id tsf_id;
@@ -396,7 +419,6 @@ struct iwl_mvm_vif {
 #endif
 
 #ifdef CONFIG_IWLWIFI_DEBUGFS
-       struct iwl_mvm *mvm;
        struct dentry *dbgfs_dir;
        struct dentry *dbgfs_slink;
        struct iwl_dbgfs_pm dbgfs_pm;
@@ -593,6 +615,13 @@ struct iwl_mvm {
 
        struct mvm_statistics_rx rx_stats;
 
+       struct {
+               u64 rx_time;
+               u64 tx_time;
+               u64 on_time_rf;
+               u64 on_time_scan;
+       } radio_stats, accu_radio_stats;
+
        u8 queue_to_mac80211[IWL_MAX_HW_QUEUES];
        atomic_t mac80211_queue_stop_count[IEEE80211_MAX_QUEUES];
 
@@ -666,6 +695,7 @@ struct iwl_mvm {
 
        struct iwl_mvm_frame_stats drv_rx_stats;
        spinlock_t drv_stats_lock;
+       u16 dbgfs_rx_phyinfo;
 #endif
 
        struct iwl_mvm_phy_ctxt phy_ctxts[NUM_PHY_CTX];
@@ -687,8 +717,9 @@ struct iwl_mvm {
 
        /* -1 for always, 0 for never, >0 for that many times */
        s8 restart_fw;
-       struct work_struct fw_error_dump_wk;
-       enum iwl_fw_dbg_conf fw_dbg_conf;
+       u8 fw_dbg_conf;
+       struct delayed_work fw_dump_wk;
+       struct iwl_mvm_dump_desc *fw_dump_desc;
 
 #ifdef CONFIG_IWLWIFI_LEDS
        struct led_classdev led;
@@ -779,6 +810,9 @@ struct iwl_mvm {
        /* system time of last beacon (for AP/GO interface) */
        u32 ap_last_beacon_gp2;
 
+       bool lar_regdom_set;
+       enum iwl_mcc_source mcc_src;
+
        u8 low_latency_agg_frame_limit;
 
        /* TDLS channel switch data */
@@ -824,6 +858,7 @@ enum iwl_mvm_status {
        IWL_MVM_STATUS_IN_D0I3,
        IWL_MVM_STATUS_ROC_AUX_RUNNING,
        IWL_MVM_STATUS_D3_RECONFIG,
+       IWL_MVM_STATUS_DUMPING_FW_LOG,
 };
 
 static inline bool iwl_mvm_is_radio_killed(struct iwl_mvm *mvm)
@@ -878,11 +913,47 @@ static inline bool iwl_mvm_is_d0i3_supported(struct iwl_mvm *mvm)
               (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_D0I3_SUPPORT);
 }
 
+static inline bool iwl_mvm_is_lar_supported(struct iwl_mvm *mvm)
+{
+       bool nvm_lar = mvm->nvm_data->lar_enabled;
+       bool tlv_lar = mvm->fw->ucode_capa.capa[0] &
+               IWL_UCODE_TLV_CAPA_LAR_SUPPORT;
+
+       if (iwlwifi_mod_params.lar_disable)
+               return false;
+
+       /*
+        * Enable LAR only if it is supported by the FW (TLV) &&
+        * enabled in the NVM
+        */
+       if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_8000)
+               return nvm_lar && tlv_lar;
+       else
+               return tlv_lar;
+}
+
+static inline bool iwl_mvm_is_wifi_mcc_supported(struct iwl_mvm *mvm)
+{
+       return mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_WIFI_MCC_UPDATE;
+}
+
 static inline bool iwl_mvm_is_scd_cfg_supported(struct iwl_mvm *mvm)
 {
        return mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_SCD_CFG;
 }
 
+static inline bool iwl_mvm_bt_is_plcr_supported(struct iwl_mvm *mvm)
+{
+       return (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_BT_COEX_PLCR) &&
+               IWL_MVM_BT_COEX_CORUNNING;
+}
+
+static inline bool iwl_mvm_bt_is_rrc_supported(struct iwl_mvm *mvm)
+{
+       return (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_BT_COEX_RRC) &&
+               IWL_MVM_BT_COEX_RRC;
+}
+
 extern const u8 iwl_mvm_ac_to_tx_fifo[];
 
 struct iwl_rate_info {
@@ -951,12 +1022,13 @@ static inline void iwl_mvm_wait_for_async_handlers(struct iwl_mvm *mvm)
 }
 
 /* Statistics */
-int iwl_mvm_rx_reply_statistics(struct iwl_mvm *mvm,
-                               struct iwl_rx_cmd_buffer *rxb,
-                               struct iwl_device_cmd *cmd);
+void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm,
+                                 struct iwl_rx_packet *pkt);
 int iwl_mvm_rx_statistics(struct iwl_mvm *mvm,
                          struct iwl_rx_cmd_buffer *rxb,
                          struct iwl_device_cmd *cmd);
+int iwl_mvm_request_statistics(struct iwl_mvm *mvm, bool clear);
+void iwl_mvm_accu_radio_stats(struct iwl_mvm *mvm);
 
 /* NVM */
 int iwl_nvm_init(struct iwl_mvm *mvm, bool read_nvm_from_nic);
@@ -1067,18 +1139,11 @@ int iwl_mvm_binding_add_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
 int iwl_mvm_binding_remove_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
 
 /* Quota management */
-int iwl_mvm_update_quotas(struct iwl_mvm *mvm,
+int iwl_mvm_update_quotas(struct iwl_mvm *mvm, bool force_upload,
                          struct ieee80211_vif *disabled_vif);
 
 /* Scanning */
 int iwl_mvm_scan_size(struct iwl_mvm *mvm);
-int iwl_mvm_scan_request(struct iwl_mvm *mvm,
-                        struct ieee80211_vif *vif,
-                        struct cfg80211_scan_request *req);
-int iwl_mvm_rx_scan_response(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
-                            struct iwl_device_cmd *cmd);
-int iwl_mvm_rx_scan_complete(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
-                            struct iwl_device_cmd *cmd);
 int iwl_mvm_cancel_scan(struct iwl_mvm *mvm);
 int iwl_mvm_max_scan_ie_len(struct iwl_mvm *mvm, bool is_sched_scan);
 
@@ -1089,14 +1154,8 @@ int iwl_mvm_rx_scan_offload_complete_notif(struct iwl_mvm *mvm,
 int iwl_mvm_rx_scan_offload_iter_complete_notif(struct iwl_mvm *mvm,
                                                struct iwl_rx_cmd_buffer *rxb,
                                                struct iwl_device_cmd *cmd);
-int iwl_mvm_config_sched_scan(struct iwl_mvm *mvm,
-                             struct ieee80211_vif *vif,
-                             struct cfg80211_sched_scan_request *req,
-                             struct ieee80211_scan_ies *ies);
 int iwl_mvm_config_sched_scan_profiles(struct iwl_mvm *mvm,
                                       struct cfg80211_sched_scan_request *req);
-int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
-                            struct cfg80211_sched_scan_request *req);
 int iwl_mvm_scan_offload_start(struct iwl_mvm *mvm,
                               struct ieee80211_vif *vif,
                               struct cfg80211_sched_scan_request *req,
@@ -1225,7 +1284,7 @@ int iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm,
                             struct iwl_rx_cmd_buffer *rxb,
                             struct iwl_device_cmd *cmd);
 void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
-                          enum ieee80211_rssi_event rssi_event);
+                          enum ieee80211_rssi_event_data);
 void iwl_mvm_bt_coex_vif_change(struct iwl_mvm *mvm);
 u16 iwl_mvm_coex_agg_time_limit(struct iwl_mvm *mvm,
                                struct ieee80211_sta *sta);
@@ -1238,7 +1297,6 @@ bool iwl_mvm_bt_coex_is_tpc_allowed(struct iwl_mvm *mvm,
 u8 iwl_mvm_bt_coex_tx_prio(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
                           struct ieee80211_tx_info *info, u8 ac);
 
-bool iwl_mvm_bt_coex_is_ant_avail_old(struct iwl_mvm *mvm, u8 ant);
 bool iwl_mvm_bt_coex_is_shared_ant_avail_old(struct iwl_mvm *mvm);
 void iwl_mvm_bt_coex_vif_change_old(struct iwl_mvm *mvm);
 int iwl_send_bt_init_conf_old(struct iwl_mvm *mvm);
@@ -1246,7 +1304,7 @@ int iwl_mvm_rx_bt_coex_notif_old(struct iwl_mvm *mvm,
                                 struct iwl_rx_cmd_buffer *rxb,
                                 struct iwl_device_cmd *cmd);
 void iwl_mvm_bt_rssi_event_old(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
-                              enum ieee80211_rssi_event rssi_event);
+                              enum ieee80211_rssi_event_data);
 u16 iwl_mvm_coex_agg_time_limit_old(struct iwl_mvm *mvm,
                                    struct ieee80211_sta *sta);
 bool iwl_mvm_bt_coex_is_mimo_allowed_old(struct iwl_mvm *mvm,
@@ -1257,17 +1315,6 @@ int iwl_mvm_rx_ant_coupling_notif_old(struct iwl_mvm *mvm,
                                      struct iwl_rx_cmd_buffer *rxb,
                                      struct iwl_device_cmd *cmd);
 
-enum iwl_bt_kill_msk {
-       BT_KILL_MSK_DEFAULT,
-       BT_KILL_MSK_NEVER,
-       BT_KILL_MSK_ALWAYS,
-       BT_KILL_MSK_MAX,
-};
-
-extern const u8 iwl_bt_ack_kill_msk[BT_MAX_AG][BT_COEX_MAX_LUT];
-extern const u8 iwl_bt_cts_kill_msk[BT_MAX_AG][BT_COEX_MAX_LUT];
-extern const u32 iwl_bt_ctl_kill_msk[BT_KILL_MSK_MAX];
-
 /* beacon filtering */
 #ifdef CONFIG_IWLWIFI_DEBUGFS
 void
@@ -1352,9 +1399,6 @@ static inline void iwl_mvm_enable_agg_txq(struct iwl_mvm *mvm, int queue,
        iwl_mvm_enable_txq(mvm, queue, ssn, &cfg, wdg_timeout);
 }
 
-/* Assoc status */
-bool iwl_mvm_is_idle(struct iwl_mvm *mvm);
-
 /* Thermal management and CT-kill */
 void iwl_mvm_tt_tx_backoff(struct iwl_mvm *mvm, u32 backoff);
 void iwl_mvm_tt_temp_changed(struct iwl_mvm *mvm, u32 temp);
@@ -1367,6 +1411,23 @@ void iwl_mvm_tt_exit(struct iwl_mvm *mvm);
 void iwl_mvm_set_hw_ctkill_state(struct iwl_mvm *mvm, bool state);
 int iwl_mvm_get_temp(struct iwl_mvm *mvm);
 
+/* Location Aware Regulatory */
+struct iwl_mcc_update_resp *
+iwl_mvm_update_mcc(struct iwl_mvm *mvm, const char *alpha2,
+                  enum iwl_mcc_source src_id);
+int iwl_mvm_init_mcc(struct iwl_mvm *mvm);
+int iwl_mvm_rx_chub_update_mcc(struct iwl_mvm *mvm,
+                              struct iwl_rx_cmd_buffer *rxb,
+                              struct iwl_device_cmd *cmd);
+struct ieee80211_regdomain *iwl_mvm_get_regdomain(struct wiphy *wiphy,
+                                                 const char *alpha2,
+                                                 enum iwl_mcc_source src_id,
+                                                 bool *changed);
+struct ieee80211_regdomain *iwl_mvm_get_current_regdomain(struct iwl_mvm *mvm,
+                                                         bool *changed);
+int iwl_mvm_init_fw_regd(struct iwl_mvm *mvm);
+void iwl_mvm_update_changed_regdom(struct iwl_mvm *mvm);
+
 /* smart fifo */
 int iwl_mvm_sf_update(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
                      bool added_vif);
@@ -1405,7 +1466,62 @@ struct ieee80211_vif *iwl_mvm_get_bss_vif(struct iwl_mvm *mvm);
 void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error);
 void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm);
 
-int iwl_mvm_start_fw_dbg_conf(struct iwl_mvm *mvm, enum iwl_fw_dbg_conf id);
-void iwl_mvm_fw_dbg_collect(struct iwl_mvm *mvm);
+int iwl_mvm_start_fw_dbg_conf(struct iwl_mvm *mvm, u8 id);
+int iwl_mvm_fw_dbg_collect(struct iwl_mvm *mvm, enum iwl_fw_dbg_trigger trig,
+                          const char *str, size_t len, unsigned int delay);
+int iwl_mvm_fw_dbg_collect_desc(struct iwl_mvm *mvm,
+                               struct iwl_mvm_dump_desc *desc,
+                               unsigned int delay);
+void iwl_mvm_free_fw_dump_desc(struct iwl_mvm *mvm);
+int iwl_mvm_fw_dbg_collect_trig(struct iwl_mvm *mvm,
+                               struct iwl_fw_dbg_trigger_tlv *trigger,
+                               const char *str, size_t len);
+
+static inline bool
+iwl_fw_dbg_trigger_vif_match(struct iwl_fw_dbg_trigger_tlv *trig,
+                            struct ieee80211_vif *vif)
+{
+       u32 trig_vif = le32_to_cpu(trig->vif_type);
+
+       return trig_vif == IWL_FW_DBG_CONF_VIF_ANY || vif->type == trig_vif;
+}
+
+static inline bool
+iwl_fw_dbg_trigger_stop_conf_match(struct iwl_mvm *mvm,
+                                  struct iwl_fw_dbg_trigger_tlv *trig)
+{
+       return ((trig->mode & IWL_FW_DBG_TRIGGER_STOP) &&
+               (mvm->fw_dbg_conf == FW_DBG_INVALID ||
+               (BIT(mvm->fw_dbg_conf) & le32_to_cpu(trig->stop_conf_ids))));
+}
+
+static inline bool
+iwl_fw_dbg_trigger_check_stop(struct iwl_mvm *mvm,
+                             struct ieee80211_vif *vif,
+                             struct iwl_fw_dbg_trigger_tlv *trig)
+{
+       if (vif && !iwl_fw_dbg_trigger_vif_match(trig, vif))
+               return false;
+
+       return iwl_fw_dbg_trigger_stop_conf_match(mvm, trig);
+}
+
+static inline void
+iwl_fw_dbg_trigger_simple_stop(struct iwl_mvm *mvm,
+                              struct ieee80211_vif *vif,
+                              enum iwl_fw_dbg_trigger trig,
+                              const char *str, size_t len)
+{
+       struct iwl_fw_dbg_trigger_tlv *trigger;
+
+       if (!iwl_fw_dbg_trigger_enabled(mvm->fw, trig))
+               return;
+
+       trigger = iwl_fw_dbg_get_trigger(mvm->fw, trig);
+       if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trigger))
+               return;
+
+       iwl_mvm_fw_dbg_collect_trig(mvm, trigger, str, len);
+}
 
 #endif /* __IWL_MVM_H__ */
index 5383429d96c1c49f91539c8440b6016cee9dd31c..123e0a16aea88d185656b160c241763fd8b91d1b 100644 (file)
  *
  *****************************************************************************/
 #include <linux/firmware.h>
+#include <linux/rtnetlink.h>
+#include <linux/pci.h>
+#include <linux/acpi.h>
 #include "iwl-trans.h"
 #include "iwl-csr.h"
 #include "mvm.h"
 #include "iwl-eeprom-parse.h"
 #include "iwl-eeprom-read.h"
 #include "iwl-nvm-parse.h"
+#include "iwl-prph.h"
 
 /* Default NVM size to read */
 #define IWL_NVM_DEFAULT_CHUNK_SIZE (2*1024)
@@ -262,7 +266,9 @@ static struct iwl_nvm_data *
 iwl_parse_nvm_sections(struct iwl_mvm *mvm)
 {
        struct iwl_nvm_section *sections = mvm->nvm_sections;
-       const __le16 *hw, *sw, *calib, *regulatory, *mac_override;
+       const __le16 *hw, *sw, *calib, *regulatory, *mac_override, *phy_sku;
+       bool is_family_8000_a_step = false, lar_enabled;
+       u32 mac_addr0, mac_addr1;
 
        /* Checking for required sections */
        if (mvm->trans->cfg->device_family != IWL_DEVICE_FAMILY_8000) {
@@ -286,22 +292,43 @@ iwl_parse_nvm_sections(struct iwl_mvm *mvm)
                                "Can't parse mac_address, empty sections\n");
                        return NULL;
                }
+
+               if (CSR_HW_REV_STEP(mvm->trans->hw_rev) == SILICON_A_STEP)
+                       is_family_8000_a_step = true;
+
+               /* PHY_SKU section is mandatory in B0 */
+               if (!is_family_8000_a_step &&
+                   !mvm->nvm_sections[NVM_SECTION_TYPE_PHY_SKU].data) {
+                       IWL_ERR(mvm,
+                               "Can't parse phy_sku in B0, empty sections\n");
+                       return NULL;
+               }
        }
 
        if (WARN_ON(!mvm->cfg))
                return NULL;
 
+       /* read the mac address from WFMP registers */
+       mac_addr0 = iwl_trans_read_prph(mvm->trans, WFMP_MAC_ADDR_0);
+       mac_addr1 = iwl_trans_read_prph(mvm->trans, WFMP_MAC_ADDR_1);
+
        hw = (const __le16 *)sections[mvm->cfg->nvm_hw_section_num].data;
        sw = (const __le16 *)sections[NVM_SECTION_TYPE_SW].data;
        calib = (const __le16 *)sections[NVM_SECTION_TYPE_CALIBRATION].data;
        regulatory = (const __le16 *)sections[NVM_SECTION_TYPE_REGULATORY].data;
        mac_override =
                (const __le16 *)sections[NVM_SECTION_TYPE_MAC_OVERRIDE].data;
+       phy_sku = (const __le16 *)sections[NVM_SECTION_TYPE_PHY_SKU].data;
+
+       lar_enabled = !iwlwifi_mod_params.lar_disable &&
+                     (mvm->fw->ucode_capa.capa[0] &
+                      IWL_UCODE_TLV_CAPA_LAR_SUPPORT);
 
        return iwl_parse_nvm_data(mvm->trans->dev, mvm->cfg, hw, sw, calib,
-                                 regulatory, mac_override,
-                                 mvm->fw->valid_tx_ant,
-                                 mvm->fw->valid_rx_ant);
+                                 regulatory, mac_override, phy_sku,
+                                 mvm->fw->valid_tx_ant, mvm->fw->valid_rx_ant,
+                                 lar_enabled, is_family_8000_a_step,
+                                 mac_addr0, mac_addr1);
 }
 
 #define MAX_NVM_FILE_LEN       16384
@@ -570,3 +597,258 @@ int iwl_nvm_init(struct iwl_mvm *mvm, bool read_nvm_from_nic)
 
        return 0;
 }
+
+struct iwl_mcc_update_resp *
+iwl_mvm_update_mcc(struct iwl_mvm *mvm, const char *alpha2,
+                  enum iwl_mcc_source src_id)
+{
+       struct iwl_mcc_update_cmd mcc_update_cmd = {
+               .mcc = cpu_to_le16(alpha2[0] << 8 | alpha2[1]),
+               .source_id = (u8)src_id,
+       };
+       struct iwl_mcc_update_resp *mcc_resp, *resp_cp = NULL;
+       struct iwl_rx_packet *pkt;
+       struct iwl_host_cmd cmd = {
+               .id = MCC_UPDATE_CMD,
+               .flags = CMD_WANT_SKB,
+               .data = { &mcc_update_cmd },
+       };
+
+       int ret;
+       u32 status;
+       int resp_len, n_channels;
+       u16 mcc;
+
+       if (WARN_ON_ONCE(!iwl_mvm_is_lar_supported(mvm)))
+               return ERR_PTR(-EOPNOTSUPP);
+
+       cmd.len[0] = sizeof(struct iwl_mcc_update_cmd);
+
+       IWL_DEBUG_LAR(mvm, "send MCC update to FW with '%c%c' src = %d\n",
+                     alpha2[0], alpha2[1], src_id);
+
+       ret = iwl_mvm_send_cmd(mvm, &cmd);
+       if (ret)
+               return ERR_PTR(ret);
+
+       pkt = cmd.resp_pkt;
+       if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
+               IWL_ERR(mvm, "Bad return from MCC_UPDATE_COMMAND (0x%08X)\n",
+                       pkt->hdr.flags);
+               ret = -EIO;
+               goto exit;
+       }
+
+       /* Extract MCC response */
+       mcc_resp = (void *)pkt->data;
+       status = le32_to_cpu(mcc_resp->status);
+
+       mcc = le16_to_cpu(mcc_resp->mcc);
+
+       /* W/A for a FW/NVM issue - returns 0x00 for the world domain */
+       if (mcc == 0) {
+               mcc = 0x3030;  /* "00" - world */
+               mcc_resp->mcc = cpu_to_le16(mcc);
+       }
+
+       n_channels =  __le32_to_cpu(mcc_resp->n_channels);
+       IWL_DEBUG_LAR(mvm,
+                     "MCC response status: 0x%x. new MCC: 0x%x ('%c%c') change: %d n_chans: %d\n",
+                     status, mcc, mcc >> 8, mcc & 0xff,
+                     !!(status == MCC_RESP_NEW_CHAN_PROFILE), n_channels);
+
+       resp_len = sizeof(*mcc_resp) + n_channels * sizeof(__le32);
+       resp_cp = kmemdup(mcc_resp, resp_len, GFP_KERNEL);
+       if (!resp_cp) {
+               ret = -ENOMEM;
+               goto exit;
+       }
+
+       ret = 0;
+exit:
+       iwl_free_resp(&cmd);
+       if (ret)
+               return ERR_PTR(ret);
+       return resp_cp;
+}
+
+#ifdef CONFIG_ACPI
+#define WRD_METHOD             "WRDD"
+#define WRDD_WIFI              (0x07)
+#define WRDD_WIGIG             (0x10)
+
+static u32 iwl_mvm_wrdd_get_mcc(struct iwl_mvm *mvm, union acpi_object *wrdd)
+{
+       union acpi_object *mcc_pkg, *domain_type, *mcc_value;
+       u32 i;
+
+       if (wrdd->type != ACPI_TYPE_PACKAGE ||
+           wrdd->package.count < 2 ||
+           wrdd->package.elements[0].type != ACPI_TYPE_INTEGER ||
+           wrdd->package.elements[0].integer.value != 0) {
+               IWL_DEBUG_LAR(mvm, "Unsupported wrdd structure\n");
+               return 0;
+       }
+
+       for (i = 1 ; i < wrdd->package.count ; ++i) {
+               mcc_pkg = &wrdd->package.elements[i];
+
+               if (mcc_pkg->type != ACPI_TYPE_PACKAGE ||
+                   mcc_pkg->package.count < 2 ||
+                   mcc_pkg->package.elements[0].type != ACPI_TYPE_INTEGER ||
+                   mcc_pkg->package.elements[1].type != ACPI_TYPE_INTEGER) {
+                       mcc_pkg = NULL;
+                       continue;
+               }
+
+               domain_type = &mcc_pkg->package.elements[0];
+               if (domain_type->integer.value == WRDD_WIFI)
+                       break;
+
+               mcc_pkg = NULL;
+       }
+
+       if (mcc_pkg) {
+               mcc_value = &mcc_pkg->package.elements[1];
+               return mcc_value->integer.value;
+       }
+
+       return 0;
+}
+
+static int iwl_mvm_get_bios_mcc(struct iwl_mvm *mvm, char *mcc)
+{
+       acpi_handle root_handle;
+       acpi_handle handle;
+       struct acpi_buffer wrdd = {ACPI_ALLOCATE_BUFFER, NULL};
+       acpi_status status;
+       u32 mcc_val;
+       struct pci_dev *pdev = to_pci_dev(mvm->dev);
+
+       root_handle = ACPI_HANDLE(&pdev->dev);
+       if (!root_handle) {
+               IWL_DEBUG_LAR(mvm,
+                             "Could not retrieve root port ACPI handle\n");
+               return -ENOENT;
+       }
+
+       /* Get the method's handle */
+       status = acpi_get_handle(root_handle, (acpi_string)WRD_METHOD, &handle);
+       if (ACPI_FAILURE(status)) {
+               IWL_DEBUG_LAR(mvm, "WRD method not found\n");
+               return -ENOENT;
+       }
+
+       /* Call WRDD with no arguments */
+       status = acpi_evaluate_object(handle, NULL, NULL, &wrdd);
+       if (ACPI_FAILURE(status)) {
+               IWL_DEBUG_LAR(mvm, "WRDC invocation failed (0x%x)\n", status);
+               return -ENOENT;
+       }
+
+       mcc_val = iwl_mvm_wrdd_get_mcc(mvm, wrdd.pointer);
+       kfree(wrdd.pointer);
+       if (!mcc_val)
+               return -ENOENT;
+
+       mcc[0] = (mcc_val >> 8) & 0xff;
+       mcc[1] = mcc_val & 0xff;
+       mcc[2] = '\0';
+       return 0;
+}
+#else /* CONFIG_ACPI */
+static int iwl_mvm_get_bios_mcc(struct iwl_mvm *mvm, char *mcc)
+{
+       return -ENOENT;
+}
+#endif
+
+int iwl_mvm_init_mcc(struct iwl_mvm *mvm)
+{
+       bool tlv_lar;
+       bool nvm_lar;
+       int retval;
+       struct ieee80211_regdomain *regd;
+       char mcc[3];
+
+       if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_8000) {
+               tlv_lar = mvm->fw->ucode_capa.capa[0] &
+                       IWL_UCODE_TLV_CAPA_LAR_SUPPORT;
+               nvm_lar = mvm->nvm_data->lar_enabled;
+               if (tlv_lar != nvm_lar)
+                       IWL_INFO(mvm,
+                                "Conflict between TLV & NVM regarding enabling LAR (TLV = %s NVM =%s)\n",
+                                tlv_lar ? "enabled" : "disabled",
+                                nvm_lar ? "enabled" : "disabled");
+       }
+
+       if (!iwl_mvm_is_lar_supported(mvm))
+               return 0;
+
+       /*
+        * During HW restart, only replay the last set MCC to FW. Otherwise,
+        * queue an update to cfg80211 to retrieve the default alpha2 from FW.
+        */
+       if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
+               /* This should only be called during vif up and hold RTNL */
+               return iwl_mvm_init_fw_regd(mvm);
+       }
+
+       /*
+        * Driver regulatory hint for initial update, this also informs the
+        * firmware we support wifi location updates.
+        * Disallow scans that might crash the FW while the LAR regdomain
+        * is not set.
+        */
+       mvm->lar_regdom_set = false;
+
+       regd = iwl_mvm_get_current_regdomain(mvm, NULL);
+       if (IS_ERR_OR_NULL(regd))
+               return -EIO;
+
+       if (iwl_mvm_is_wifi_mcc_supported(mvm) &&
+           !iwl_mvm_get_bios_mcc(mvm, mcc)) {
+               kfree(regd);
+               regd = iwl_mvm_get_regdomain(mvm->hw->wiphy, mcc,
+                                            MCC_SOURCE_BIOS, NULL);
+               if (IS_ERR_OR_NULL(regd))
+                       return -EIO;
+       }
+
+       retval = regulatory_set_wiphy_regd_sync_rtnl(mvm->hw->wiphy, regd);
+       kfree(regd);
+       return retval;
+}
+
+int iwl_mvm_rx_chub_update_mcc(struct iwl_mvm *mvm,
+                              struct iwl_rx_cmd_buffer *rxb,
+                              struct iwl_device_cmd *cmd)
+{
+       struct iwl_rx_packet *pkt = rxb_addr(rxb);
+       struct iwl_mcc_chub_notif *notif = (void *)pkt->data;
+       enum iwl_mcc_source src;
+       char mcc[3];
+       struct ieee80211_regdomain *regd;
+
+       lockdep_assert_held(&mvm->mutex);
+
+       if (WARN_ON_ONCE(!iwl_mvm_is_lar_supported(mvm)))
+               return 0;
+
+       mcc[0] = notif->mcc >> 8;
+       mcc[1] = notif->mcc & 0xff;
+       mcc[2] = '\0';
+       src = notif->source_id;
+
+       IWL_DEBUG_LAR(mvm,
+                     "RX: received chub update mcc cmd (mcc '%s' src %d)\n",
+                     mcc, src);
+       regd = iwl_mvm_get_regdomain(mvm->hw->wiphy, mcc, src, NULL);
+       if (IS_ERR_OR_NULL(regd))
+               return 0;
+
+       regulatory_set_wiphy_regd(mvm->hw->wiphy, regd);
+       kfree(regd);
+
+       return 0;
+}
index 2dffc3600ed3faac6cf2777cc7a1f862c29e7b24..80121e41ca22f3aca2a09c59ba30016e9872caae 100644 (file)
@@ -82,7 +82,6 @@
 #include "rs.h"
 #include "fw-api-scan.h"
 #include "time-event.h"
-#include "iwl-fw-error-dump.h"
 
 #define DRV_DESCRIPTION        "The new Intel(R) wireless AGN driver for Linux"
 MODULE_DESCRIPTION(DRV_DESCRIPTION);
@@ -234,11 +233,10 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
                   iwl_mvm_rx_ant_coupling_notif, true),
 
        RX_HANDLER(TIME_EVENT_NOTIFICATION, iwl_mvm_rx_time_event_notif, false),
+       RX_HANDLER(MCC_CHUB_UPDATE_CMD, iwl_mvm_rx_chub_update_mcc, true),
 
        RX_HANDLER(EOSP_NOTIFICATION, iwl_mvm_rx_eosp_notif, false),
 
-       RX_HANDLER(SCAN_REQUEST_CMD, iwl_mvm_rx_scan_response, false),
-       RX_HANDLER(SCAN_COMPLETE_NOTIFICATION, iwl_mvm_rx_scan_complete, true),
        RX_HANDLER(SCAN_ITERATION_COMPLETE,
                   iwl_mvm_rx_scan_offload_iter_complete_notif, false),
        RX_HANDLER(SCAN_OFFLOAD_COMPLETE,
@@ -311,6 +309,7 @@ static const char *const iwl_mvm_cmd_strings[REPLY_MAX] = {
        CMD(REPLY_RX_MPDU_CMD),
        CMD(BEACON_NOTIFICATION),
        CMD(BEACON_TEMPLATE_CMD),
+       CMD(STATISTICS_CMD),
        CMD(STATISTICS_NOTIFICATION),
        CMD(EOSP_NOTIFICATION),
        CMD(REDUCE_TX_POWER_CMD),
@@ -359,6 +358,7 @@ static const char *const iwl_mvm_cmd_strings[REPLY_MAX] = {
        CMD(TDLS_CHANNEL_SWITCH_CMD),
        CMD(TDLS_CHANNEL_SWITCH_NOTIFICATION),
        CMD(TDLS_CONFIG_CMD),
+       CMD(MCC_UPDATE_CMD),
 };
 #undef CMD
 
@@ -456,7 +456,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
        INIT_WORK(&mvm->roc_done_wk, iwl_mvm_roc_done_wk);
        INIT_WORK(&mvm->sta_drained_wk, iwl_mvm_sta_drained_wk);
        INIT_WORK(&mvm->d0i3_exit_work, iwl_mvm_d0i3_exit_work);
-       INIT_WORK(&mvm->fw_error_dump_wk, iwl_mvm_fw_error_dump_wk);
+       INIT_DELAYED_WORK(&mvm->fw_dump_wk, iwl_mvm_fw_error_dump_wk);
        INIT_DELAYED_WORK(&mvm->tdls_cs.dwork, iwl_mvm_tdls_ch_switch_work);
 
        spin_lock_init(&mvm->d0i3_tx_lock);
@@ -504,6 +504,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
        trans->dbg_dest_reg_num = mvm->fw->dbg_dest_reg_num;
        memcpy(trans->dbg_conf_tlv, mvm->fw->dbg_conf_tlv,
               sizeof(trans->dbg_conf_tlv));
+       trans->dbg_trigger_tlv = mvm->fw->dbg_trigger_tlv;
 
        /* set up notification wait support */
        iwl_notification_wait_init(&mvm->notif_wait);
@@ -685,6 +686,38 @@ static void iwl_mvm_async_handlers_wk(struct work_struct *wk)
        mutex_unlock(&mvm->mutex);
 }
 
+static inline void iwl_mvm_rx_check_trigger(struct iwl_mvm *mvm,
+                                           struct iwl_rx_packet *pkt)
+{
+       struct iwl_fw_dbg_trigger_tlv *trig;
+       struct iwl_fw_dbg_trigger_cmd *cmds_trig;
+       char buf[32];
+       int i;
+
+       if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_FW_NOTIF))
+               return;
+
+       trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_FW_NOTIF);
+       cmds_trig = (void *)trig->data;
+
+       if (!iwl_fw_dbg_trigger_check_stop(mvm, NULL, trig))
+               return;
+
+       for (i = 0; i < ARRAY_SIZE(cmds_trig->cmds); i++) {
+               /* don't collect on CMD 0 */
+               if (!cmds_trig->cmds[i].cmd_id)
+                       break;
+
+               if (cmds_trig->cmds[i].cmd_id != pkt->hdr.cmd)
+                       continue;
+
+               memset(buf, 0, sizeof(buf));
+               snprintf(buf, sizeof(buf), "CMD 0x%02x received", pkt->hdr.cmd);
+               iwl_mvm_fw_dbg_collect_trig(mvm, trig, buf, sizeof(buf));
+               break;
+       }
+}
+
 static int iwl_mvm_rx_dispatch(struct iwl_op_mode *op_mode,
                               struct iwl_rx_cmd_buffer *rxb,
                               struct iwl_device_cmd *cmd)
@@ -693,6 +726,8 @@ static int iwl_mvm_rx_dispatch(struct iwl_op_mode *op_mode,
        struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
        u8 i;
 
+       iwl_mvm_rx_check_trigger(mvm, pkt);
+
        /*
         * Do the notification wait before RX handlers so
         * even if the RX handler consumes the RXB we have
@@ -827,7 +862,7 @@ static void iwl_mvm_reprobe_wk(struct work_struct *wk)
 static void iwl_mvm_fw_error_dump_wk(struct work_struct *work)
 {
        struct iwl_mvm *mvm =
-               container_of(work, struct iwl_mvm, fw_error_dump_wk);
+               container_of(work, struct iwl_mvm, fw_dump_wk.work);
 
        if (iwl_mvm_ref_sync(mvm, IWL_MVM_REF_FW_DBG_COLLECT))
                return;
@@ -837,8 +872,8 @@ static void iwl_mvm_fw_error_dump_wk(struct work_struct *work)
 
        /* start recording again if the firmware is not crashed */
        WARN_ON_ONCE((!test_bit(STATUS_FW_ERROR, &mvm->trans->status)) &&
-                     mvm->fw->dbg_dest_tlv &&
-                     iwl_mvm_start_fw_dbg_conf(mvm, mvm->fw_dbg_conf));
+                    mvm->fw->dbg_dest_tlv &&
+                    iwl_mvm_start_fw_dbg_conf(mvm, mvm->fw_dbg_conf));
 
        mutex_unlock(&mvm->mutex);
 
@@ -879,7 +914,7 @@ void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error)
         * can't recover this since we're already half suspended.
         */
        if (!mvm->restart_fw && fw_error) {
-               schedule_work(&mvm->fw_error_dump_wk);
+               iwl_mvm_fw_dbg_collect_desc(mvm, &iwl_mvm_dump_desc_assert, 0);
        } else if (test_and_set_bit(IWL_MVM_STATUS_IN_HW_RESTART,
                                    &mvm->status)) {
                struct iwl_mvm_reprobe *reprobe;
@@ -1236,6 +1271,10 @@ static void iwl_mvm_d0i3_exit_work(struct work_struct *wk)
        iwl_free_resp(&get_status_cmd);
 out:
        iwl_mvm_d0i3_enable_tx(mvm, qos_seq);
+
+       /* the FW might have updated the regdomain */
+       iwl_mvm_update_changed_regdom(mvm);
+
        iwl_mvm_unref(mvm, IWL_MVM_REF_EXIT_WORK);
        mutex_unlock(&mvm->mutex);
 }
index 5b43616eeb06332dedd68299650583ec292b201f..192b74bc8cf67270a7db805f846f16abf7a23081 100644 (file)
@@ -175,6 +175,10 @@ static void iwl_mvm_phy_ctxt_cmd_data(struct iwl_mvm *mvm,
        cmd->rxchain_info |= cpu_to_le32(idle_cnt << PHY_RX_CHAIN_CNT_POS);
        cmd->rxchain_info |= cpu_to_le32(active_cnt <<
                                         PHY_RX_CHAIN_MIMO_CNT_POS);
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+       if (unlikely(mvm->dbgfs_rx_phyinfo))
+               cmd->rxchain_info = cpu_to_le32(mvm->dbgfs_rx_phyinfo);
+#endif
 
        cmd->txchain_info = cpu_to_le32(iwl_mvm_get_valid_tx_ant(mvm));
 }
index 2620dd0c45f9638c949fd34b7482533b13c33126..d2c6ba9d326b4656b8f6e7007554fb3a5ba8e681 100644 (file)
@@ -66,6 +66,7 @@
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/slab.h>
+#include <linux/etherdevice.h>
 
 #include <net/mac80211.h>
 
@@ -357,7 +358,7 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
        cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK);
 
        if (!vif->bss_conf.ps || iwl_mvm_vif_low_latency(mvmvif) ||
-           !mvmvif->pm_enabled || iwl_mvm_tdls_sta_count(mvm, vif))
+           !mvmvif->pm_enabled)
                return;
 
        cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK);
@@ -491,7 +492,7 @@ void iwl_mvm_power_vif_assoc(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
 
        if (memcmp(vif->bss_conf.bssid, mvmvif->uapsd_misbehaving_bssid,
                   ETH_ALEN))
-               memset(mvmvif->uapsd_misbehaving_bssid, 0, ETH_ALEN);
+               eth_zero_addr(mvmvif->uapsd_misbehaving_bssid);
 }
 
 static void iwl_mvm_power_uapsd_misbehav_ap_iterator(void *_data, u8 *mac,
@@ -638,6 +639,10 @@ static void iwl_mvm_power_set_pm(struct iwl_mvm *mvm,
        if (vifs->ap_vif)
                ap_mvmvif = iwl_mvm_vif_from_mac80211(vifs->ap_vif);
 
+       /* don't allow PM if any TDLS stations exist */
+       if (iwl_mvm_tdls_sta_count(mvm, NULL))
+               return;
+
        /* enable PM on bss if bss stand alone */
        if (vifs->bss_active && !vifs->p2p_active && !vifs->ap_active) {
                bss_mvmvif->pm_enabled = true;
index dbb2594390e96195c194f9c9ebe8d0967b3924b7..509a66d05245bb37ab9deb7391530297181be0c7 100644 (file)
@@ -172,6 +172,7 @@ static void iwl_mvm_adjust_quota_for_noa(struct iwl_mvm *mvm,
 }
 
 int iwl_mvm_update_quotas(struct iwl_mvm *mvm,
+                         bool force_update,
                          struct ieee80211_vif *disabled_vif)
 {
        struct iwl_time_quota_cmd cmd = {};
@@ -309,7 +310,7 @@ int iwl_mvm_update_quotas(struct iwl_mvm *mvm,
                          "zero quota on binding %d\n", i);
        }
 
-       if (!send) {
+       if (!send && !force_update) {
                /* don't send a practically unchanged command, the firmware has
                 * to re-initialize a lot of state and that can have an adverse
                 * impact on it
index 078f24cf4af3927c66e2540bc6cc6404d0c36fc7..9140b0b701c75cd316c8d1063fdc1e4286339f7c 100644 (file)
@@ -160,6 +160,9 @@ static bool rs_mimo_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
                          struct iwl_scale_tbl_info *tbl,
                          const struct rs_tx_column *next_col)
 {
+       struct iwl_mvm_sta *mvmsta;
+       struct iwl_mvm_vif *mvmvif;
+
        if (!sta->ht_cap.ht_supported)
                return false;
 
@@ -172,6 +175,11 @@ static bool rs_mimo_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
        if (!iwl_mvm_bt_coex_is_mimo_allowed(mvm, sta))
                return false;
 
+       mvmsta = iwl_mvm_sta_from_mac80211(sta);
+       mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif);
+       if (iwl_mvm_vif_low_latency(mvmvif) && mvmsta->vif->p2p)
+               return false;
+
        return true;
 }
 
@@ -807,6 +815,8 @@ static int rs_rate_from_ucode_rate(const u32 ucode_rate,
                rate->ldpc = true;
        if (ucode_rate & RATE_MCS_VHT_STBC_MSK)
                rate->stbc = true;
+       if (ucode_rate & RATE_MCS_BF_MSK)
+               rate->bfer = true;
 
        rate->bw = ucode_rate & RATE_MCS_CHAN_WIDTH_MSK;
 
@@ -816,7 +826,9 @@ static int rs_rate_from_ucode_rate(const u32 ucode_rate,
 
                if (nss == 1) {
                        rate->type = LQ_HT_SISO;
-                       WARN_ON_ONCE(!rate->stbc && num_of_ant != 1);
+                       WARN_ONCE(!rate->stbc && !rate->bfer && num_of_ant != 1,
+                                 "stbc %d bfer %d",
+                                 rate->stbc, rate->bfer);
                } else if (nss == 2) {
                        rate->type = LQ_HT_MIMO2;
                        WARN_ON_ONCE(num_of_ant != 2);
@@ -829,7 +841,9 @@ static int rs_rate_from_ucode_rate(const u32 ucode_rate,
 
                if (nss == 1) {
                        rate->type = LQ_VHT_SISO;
-                       WARN_ON_ONCE(!rate->stbc && num_of_ant != 1);
+                       WARN_ONCE(!rate->stbc && !rate->bfer && num_of_ant != 1,
+                                 "stbc %d bfer %d",
+                                 rate->stbc, rate->bfer);
                } else if (nss == 2) {
                        rate->type = LQ_VHT_MIMO2;
                        WARN_ON_ONCE(num_of_ant != 2);
@@ -1008,13 +1022,41 @@ static void rs_get_lower_rate_down_column(struct iwl_lq_sta *lq_sta,
                rs_get_lower_rate_in_column(lq_sta, rate);
 }
 
-/* Simple function to compare two rate scale table types */
-static inline bool rs_rate_match(struct rs_rate *a,
-                                struct rs_rate *b)
+/* Check if both rates are identical
+ * allow_ant_mismatch enables matching a SISO rate on ANT_A or ANT_B
+ * with a rate indicating STBC/BFER and ANT_AB.
+ */
+static inline bool rs_rate_equal(struct rs_rate *a,
+                                struct rs_rate *b,
+                                bool allow_ant_mismatch)
+
+{
+       bool ant_match = (a->ant == b->ant) && (a->stbc == b->stbc) &&
+               (a->bfer == b->bfer);
+
+       if (allow_ant_mismatch) {
+               if (a->stbc || a->bfer) {
+                       WARN_ONCE(a->ant != ANT_AB, "stbc %d bfer %d ant %d",
+                                 a->stbc, a->bfer, a->ant);
+                       ant_match |= (b->ant == ANT_A || b->ant == ANT_B);
+               } else if (b->stbc || b->bfer) {
+                       WARN_ONCE(b->ant != ANT_AB, "stbc %d bfer %d ant %d",
+                                 b->stbc, b->bfer, b->ant);
+                       ant_match |= (a->ant == ANT_A || a->ant == ANT_B);
+               }
+       }
+
+       return (a->type == b->type) && (a->bw == b->bw) && (a->sgi == b->sgi) &&
+               (a->ldpc == b->ldpc) && (a->index == b->index) && ant_match;
+}
+
+/* Check if both rates share the same column */
+static inline bool rs_rate_column_match(struct rs_rate *a,
+                                       struct rs_rate *b)
 {
        bool ant_match;
 
-       if (a->stbc)
+       if (a->stbc || a->bfer)
                ant_match = (b->ant == ANT_A || b->ant == ANT_B);
        else
                ant_match = (a->ant == b->ant);
@@ -1023,16 +1065,35 @@ static inline bool rs_rate_match(struct rs_rate *a,
                && ant_match;
 }
 
-static u32 rs_ch_width_from_mac_flags(enum mac80211_rate_control_flags flags)
+static inline enum rs_column rs_get_column_from_rate(struct rs_rate *rate)
 {
-       if (flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
-               return RATE_MCS_CHAN_WIDTH_40;
-       else if (flags & IEEE80211_TX_RC_80_MHZ_WIDTH)
-               return RATE_MCS_CHAN_WIDTH_80;
-       else if (flags & IEEE80211_TX_RC_160_MHZ_WIDTH)
-               return RATE_MCS_CHAN_WIDTH_160;
+       if (is_legacy(rate)) {
+               if (rate->ant == ANT_A)
+                       return RS_COLUMN_LEGACY_ANT_A;
 
-       return RATE_MCS_CHAN_WIDTH_20;
+               if (rate->ant == ANT_B)
+                       return RS_COLUMN_LEGACY_ANT_B;
+
+               goto err;
+       }
+
+       if (is_siso(rate)) {
+               if (rate->ant == ANT_A || rate->stbc || rate->bfer)
+                       return rate->sgi ? RS_COLUMN_SISO_ANT_A_SGI :
+                               RS_COLUMN_SISO_ANT_A;
+
+               if (rate->ant == ANT_B)
+                       return rate->sgi ? RS_COLUMN_SISO_ANT_B_SGI :
+                               RS_COLUMN_SISO_ANT_B;
+
+               goto err;
+       }
+
+       if (is_mimo(rate))
+               return rate->sgi ? RS_COLUMN_MIMO2_SGI : RS_COLUMN_MIMO2;
+
+err:
+       return RS_COLUMN_INVALID;
 }
 
 static u8 rs_get_tid(struct ieee80211_hdr *hdr)
@@ -1055,15 +1116,17 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
 {
        int legacy_success;
        int retries;
-       int mac_index, i;
+       int i;
        struct iwl_lq_cmd *table;
-       enum mac80211_rate_control_flags mac_flags;
-       u32 ucode_rate;
-       struct rs_rate rate;
+       u32 lq_hwrate;
+       struct rs_rate lq_rate, tx_resp_rate;
        struct iwl_scale_tbl_info *curr_tbl, *other_tbl, *tmp_tbl;
        u8 reduced_txp = (uintptr_t)info->status.status_driver_data[0];
+       u32 tx_resp_hwrate = (uintptr_t)info->status.status_driver_data[1];
        struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
        struct iwl_lq_sta *lq_sta = &mvmsta->lq_sta;
+       bool allow_ant_mismatch = mvm->fw->ucode_capa.api[0] &
+               IWL_UCODE_TLV_API_LQ_SS_PARAMS;
 
        /* Treat uninitialized rate scaling data same as non-existing. */
        if (!lq_sta) {
@@ -1074,50 +1137,43 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
                return;
        }
 
-#ifdef CONFIG_MAC80211_DEBUGFS
-       /* Disable last tx check if we are debugging with fixed rate */
-       if (lq_sta->pers.dbg_fixed_rate) {
-               IWL_DEBUG_RATE(mvm, "Fixed rate. avoid rate scaling\n");
-               return;
-       }
-#endif
        /* This packet was aggregated but doesn't carry status info */
        if ((info->flags & IEEE80211_TX_CTL_AMPDU) &&
            !(info->flags & IEEE80211_TX_STAT_AMPDU))
                return;
 
-       /*
-        * Ignore this Tx frame response if its initial rate doesn't match
-        * that of latest Link Quality command.  There may be stragglers
-        * from a previous Link Quality command, but we're no longer interested
-        * in those; they're either from the "active" mode while we're trying
-        * to check "search" mode, or a prior "search" mode after we've moved
-        * to a new "search" mode (which might become the new "active" mode).
-        */
-       table = &lq_sta->lq;
-       ucode_rate = le32_to_cpu(table->rs_table[0]);
-       rs_rate_from_ucode_rate(ucode_rate, info->band, &rate);
-       if (info->band == IEEE80211_BAND_5GHZ)
-               rate.index -= IWL_FIRST_OFDM_RATE;
-       mac_flags = info->status.rates[0].flags;
-       mac_index = info->status.rates[0].idx;
-       /* For HT packets, map MCS to PLCP */
-       if (mac_flags & IEEE80211_TX_RC_MCS) {
-               /* Remove # of streams */
-               mac_index &= RATE_HT_MCS_RATE_CODE_MSK;
-               if (mac_index >= (IWL_RATE_9M_INDEX - IWL_FIRST_OFDM_RATE))
-                       mac_index++;
-               /*
-                * mac80211 HT index is always zero-indexed; we need to move
-                * HT OFDM rates after CCK rates in 2.4 GHz band
-                */
-               if (info->band == IEEE80211_BAND_2GHZ)
-                       mac_index += IWL_FIRST_OFDM_RATE;
-       } else if (mac_flags & IEEE80211_TX_RC_VHT_MCS) {
-               mac_index &= RATE_VHT_MCS_RATE_CODE_MSK;
-               if (mac_index >= (IWL_RATE_9M_INDEX - IWL_FIRST_OFDM_RATE))
-                       mac_index++;
+       rs_rate_from_ucode_rate(tx_resp_hwrate, info->band, &tx_resp_rate);
+
+#ifdef CONFIG_MAC80211_DEBUGFS
+       /* Disable last tx check if we are debugging with fixed rate but
+        * update tx stats */
+       if (lq_sta->pers.dbg_fixed_rate) {
+               int index = tx_resp_rate.index;
+               enum rs_column column;
+               int attempts, success;
+
+               column = rs_get_column_from_rate(&tx_resp_rate);
+               if (WARN_ONCE(column == RS_COLUMN_INVALID,
+                             "Can't map rate 0x%x to column",
+                             tx_resp_hwrate))
+                       return;
+
+               if (info->flags & IEEE80211_TX_STAT_AMPDU) {
+                       attempts = info->status.ampdu_len;
+                       success = info->status.ampdu_ack_len;
+               } else {
+                       attempts = info->status.rates[0].count;
+                       success = !!(info->flags & IEEE80211_TX_STAT_ACK);
+               }
+
+               lq_sta->pers.tx_stats[column][index].total += attempts;
+               lq_sta->pers.tx_stats[column][index].success += success;
+
+               IWL_DEBUG_RATE(mvm, "Fixed rate 0x%x success %d attempts %d\n",
+                              tx_resp_hwrate, success, attempts);
+               return;
        }
+#endif
 
        if (time_after(jiffies,
                       (unsigned long)(lq_sta->last_tx +
@@ -1133,21 +1189,23 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
        }
        lq_sta->last_tx = jiffies;
 
+       /* Ignore this Tx frame response if its initial rate doesn't match
+        * that of latest Link Quality command.  There may be stragglers
+        * from a previous Link Quality command, but we're no longer interested
+        * in those; they're either from the "active" mode while we're trying
+        * to check "search" mode, or a prior "search" mode after we've moved
+        * to a new "search" mode (which might become the new "active" mode).
+        */
+       table = &lq_sta->lq;
+       lq_hwrate = le32_to_cpu(table->rs_table[0]);
+       rs_rate_from_ucode_rate(lq_hwrate, info->band, &lq_rate);
+
        /* Here we actually compare this rate to the latest LQ command */
-       if ((mac_index < 0) ||
-           (rate.sgi != !!(mac_flags & IEEE80211_TX_RC_SHORT_GI)) ||
-           (rate.bw != rs_ch_width_from_mac_flags(mac_flags)) ||
-           (rate.ant != info->status.antenna) ||
-           (!!(ucode_rate & RATE_MCS_HT_MSK) !=
-            !!(mac_flags & IEEE80211_TX_RC_MCS)) ||
-           (!!(ucode_rate & RATE_MCS_VHT_MSK) !=
-            !!(mac_flags & IEEE80211_TX_RC_VHT_MCS)) ||
-           (!!(ucode_rate & RATE_HT_MCS_GF_MSK) !=
-            !!(mac_flags & IEEE80211_TX_RC_GREEN_FIELD)) ||
-           (rate.index != mac_index)) {
+       if (!rs_rate_equal(&tx_resp_rate, &lq_rate, allow_ant_mismatch)) {
                IWL_DEBUG_RATE(mvm,
-                              "initial rate %d does not match %d (0x%x)\n",
-                              mac_index, rate.index, ucode_rate);
+                              "initial tx resp rate 0x%x does not match 0x%x\n",
+                              tx_resp_hwrate, lq_hwrate);
+
                /*
                 * Since rates mis-match, the last LQ command may have failed.
                 * After IWL_MISSED_RATE_MAX mis-matches, resync the uCode with
@@ -1175,14 +1233,14 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
                other_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
        }
 
-       if (WARN_ON_ONCE(!rs_rate_match(&rate, &curr_tbl->rate))) {
+       if (WARN_ON_ONCE(!rs_rate_column_match(&lq_rate, &curr_tbl->rate))) {
                IWL_DEBUG_RATE(mvm,
                               "Neither active nor search matches tx rate\n");
                tmp_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
                rs_dump_rate(mvm, &tmp_tbl->rate, "ACTIVE");
                tmp_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
                rs_dump_rate(mvm, &tmp_tbl->rate, "SEARCH");
-               rs_dump_rate(mvm, &rate, "ACTUAL");
+               rs_dump_rate(mvm, &lq_rate, "ACTUAL");
 
                /*
                 * no matching table found, let's by-pass the data collection
@@ -1207,9 +1265,7 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
                if (info->status.ampdu_ack_len == 0)
                        info->status.ampdu_len = 1;
 
-               ucode_rate = le32_to_cpu(table->rs_table[0]);
-               rs_rate_from_ucode_rate(ucode_rate, info->band, &rate);
-               rs_collect_tx_data(mvm, lq_sta, curr_tbl, rate.index,
+               rs_collect_tx_data(mvm, lq_sta, curr_tbl, lq_rate.index,
                                   info->status.ampdu_len,
                                   info->status.ampdu_ack_len,
                                   reduced_txp);
@@ -1232,21 +1288,23 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
                legacy_success = !!(info->flags & IEEE80211_TX_STAT_ACK);
                /* Collect data for each rate used during failed TX attempts */
                for (i = 0; i <= retries; ++i) {
-                       ucode_rate = le32_to_cpu(table->rs_table[i]);
-                       rs_rate_from_ucode_rate(ucode_rate, info->band, &rate);
+                       lq_hwrate = le32_to_cpu(table->rs_table[i]);
+                       rs_rate_from_ucode_rate(lq_hwrate, info->band,
+                                               &lq_rate);
                        /*
                         * Only collect stats if retried rate is in the same RS
                         * table as active/search.
                         */
-                       if (rs_rate_match(&rate, &curr_tbl->rate))
+                       if (rs_rate_column_match(&lq_rate, &curr_tbl->rate))
                                tmp_tbl = curr_tbl;
-                       else if (rs_rate_match(&rate, &other_tbl->rate))
+                       else if (rs_rate_column_match(&lq_rate,
+                                                     &other_tbl->rate))
                                tmp_tbl = other_tbl;
                        else
                                continue;
 
-                       rs_collect_tx_data(mvm, lq_sta, tmp_tbl, rate.index, 1,
-                                          i < retries ? 0 : legacy_success,
+                       rs_collect_tx_data(mvm, lq_sta, tmp_tbl, lq_rate.index,
+                                          1, i < retries ? 0 : legacy_success,
                                           reduced_txp);
                }
 
@@ -1257,7 +1315,7 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
                }
        }
        /* The last TX rate is cached in lq_sta; it's set in if/else above */
-       lq_sta->last_rate_n_flags = ucode_rate;
+       lq_sta->last_rate_n_flags = lq_hwrate;
        IWL_DEBUG_RATE(mvm, "reduced txpower: %d\n", reduced_txp);
 done:
        /* See if there's a better rate or modulation mode to try. */
@@ -2554,6 +2612,7 @@ static void *rs_alloc_sta(void *mvm_rate, struct ieee80211_sta *sta,
 #ifdef CONFIG_MAC80211_DEBUGFS
        lq_sta->pers.dbg_fixed_rate = 0;
        lq_sta->pers.dbg_fixed_txp_reduction = TPC_INVALID;
+       lq_sta->pers.ss_force = RS_SS_FORCE_NONE;
 #endif
        lq_sta->pers.chains = 0;
        memset(lq_sta->pers.chain_signal, 0, sizeof(lq_sta->pers.chain_signal));
@@ -3079,19 +3138,21 @@ static void rs_set_lq_ss_params(struct iwl_mvm *mvm,
        if (!iwl_mvm_bt_coex_is_mimo_allowed(mvm, sta))
                goto out;
 
+#ifdef CONFIG_MAC80211_DEBUGFS
        /* Check if forcing the decision is configured.
         * Note that SISO is forced by not allowing STBC or BFER
         */
-       if (lq_sta->ss_force == RS_SS_FORCE_STBC)
+       if (lq_sta->pers.ss_force == RS_SS_FORCE_STBC)
                ss_params |= (LQ_SS_STBC_1SS_ALLOWED | LQ_SS_FORCE);
-       else if (lq_sta->ss_force == RS_SS_FORCE_BFER)
+       else if (lq_sta->pers.ss_force == RS_SS_FORCE_BFER)
                ss_params |= (LQ_SS_BFER_ALLOWED | LQ_SS_FORCE);
 
-       if (lq_sta->ss_force != RS_SS_FORCE_NONE) {
+       if (lq_sta->pers.ss_force != RS_SS_FORCE_NONE) {
                IWL_DEBUG_RATE(mvm, "Forcing single stream Tx decision %d\n",
-                              lq_sta->ss_force);
+                              lq_sta->pers.ss_force);
                goto out;
        }
+#endif
 
        if (lq_sta->stbc_capable)
                ss_params |= LQ_SS_STBC_1SS_ALLOWED;
@@ -3332,6 +3393,7 @@ static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file,
        struct iwl_mvm *mvm;
        struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
        struct rs_rate *rate = &tbl->rate;
+       u32 ss_params;
        mvm = lq_sta->pers.drv;
        buff = kmalloc(2048, GFP_KERNEL);
        if (!buff)
@@ -3351,16 +3413,16 @@ static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file,
                        (is_legacy(rate)) ? "legacy" :
                        is_vht(rate) ? "VHT" : "HT");
        if (!is_legacy(rate)) {
-               desc += sprintf(buff+desc, " %s",
+               desc += sprintf(buff + desc, " %s",
                   (is_siso(rate)) ? "SISO" : "MIMO2");
-                  desc += sprintf(buff+desc, " %s",
-                                  (is_ht20(rate)) ? "20MHz" :
-                                  (is_ht40(rate)) ? "40MHz" :
-                                  (is_ht80(rate)) ? "80Mhz" : "BAD BW");
-                  desc += sprintf(buff+desc, " %s %s %s\n",
-                                  (rate->sgi) ? "SGI" : "NGI",
-                                  (rate->ldpc) ? "LDPC" : "BCC",
-                                  (lq_sta->is_agg) ? "AGG on" : "");
+               desc += sprintf(buff + desc, " %s",
+                               (is_ht20(rate)) ? "20MHz" :
+                               (is_ht40(rate)) ? "40MHz" :
+                               (is_ht80(rate)) ? "80Mhz" : "BAD BW");
+               desc += sprintf(buff + desc, " %s %s %s\n",
+                               (rate->sgi) ? "SGI" : "NGI",
+                               (rate->ldpc) ? "LDPC" : "BCC",
+                               (lq_sta->is_agg) ? "AGG on" : "");
        }
        desc += sprintf(buff+desc, "last tx rate=0x%X\n",
                        lq_sta->last_rate_n_flags);
@@ -3378,6 +3440,16 @@ static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file,
                        lq_sta->lq.agg_frame_cnt_limit);
 
        desc += sprintf(buff+desc, "reduced tpc=%d\n", lq_sta->lq.reduced_tpc);
+       ss_params = le32_to_cpu(lq_sta->lq.ss_params);
+       desc += sprintf(buff+desc, "single stream params: %s%s%s%s\n",
+                       (ss_params & LQ_SS_PARAMS_VALID) ?
+                       "VALID" : "INVALID",
+                       (ss_params & LQ_SS_BFER_ALLOWED) ?
+                       ", BFER" : "",
+                       (ss_params & LQ_SS_STBC_1SS_ALLOWED) ?
+                       ", STBC" : "",
+                       (ss_params & LQ_SS_FORCE) ?
+                       ", FORCE" : "");
        desc += sprintf(buff+desc,
                        "Start idx [0]=0x%x [1]=0x%x [2]=0x%x [3]=0x%x\n",
                        lq_sta->lq.initial_rate_index[0],
@@ -3554,7 +3626,7 @@ static ssize_t iwl_dbgfs_ss_force_read(struct file *file,
        };
 
        pos += scnprintf(buf+pos, bufsz-pos, "%s\n",
-                        ss_force_name[lq_sta->ss_force]);
+                        ss_force_name[lq_sta->pers.ss_force]);
        return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
 }
 
@@ -3565,12 +3637,12 @@ static ssize_t iwl_dbgfs_ss_force_write(struct iwl_lq_sta *lq_sta, char *buf,
        int ret = 0;
 
        if (!strncmp("none", buf, 4)) {
-               lq_sta->ss_force = RS_SS_FORCE_NONE;
+               lq_sta->pers.ss_force = RS_SS_FORCE_NONE;
        } else if (!strncmp("siso", buf, 4)) {
-               lq_sta->ss_force = RS_SS_FORCE_SISO;
+               lq_sta->pers.ss_force = RS_SS_FORCE_SISO;
        } else if (!strncmp("stbc", buf, 4)) {
                if (lq_sta->stbc_capable) {
-                       lq_sta->ss_force = RS_SS_FORCE_STBC;
+                       lq_sta->pers.ss_force = RS_SS_FORCE_STBC;
                } else {
                        IWL_ERR(mvm,
                                "can't force STBC. peer doesn't support\n");
@@ -3578,7 +3650,7 @@ static ssize_t iwl_dbgfs_ss_force_write(struct iwl_lq_sta *lq_sta, char *buf,
                }
        } else if (!strncmp("bfer", buf, 4)) {
                if (lq_sta->bfer_capable) {
-                       lq_sta->ss_force = RS_SS_FORCE_BFER;
+                       lq_sta->pers.ss_force = RS_SS_FORCE_BFER;
                } else {
                        IWL_ERR(mvm,
                                "can't force BFER. peer doesn't support\n");
index dc4ef3dfafe192880bf6f42a9c6ab4901b75d861..e4aa9346a23103f4eb660d087d0f7b8542334e42 100644 (file)
@@ -170,6 +170,7 @@ struct rs_rate {
        bool sgi;
        bool ldpc;
        bool stbc;
+       bool bfer;
 };
 
 
@@ -331,14 +332,14 @@ struct iwl_lq_sta {
        /* tx power reduce for this sta */
        int tpc_reduce;
 
-       /* force STBC/BFER/SISO for testing */
-       enum rs_ss_force_opt ss_force;
-
        /* persistent fields - initialized only once - keep last! */
        struct lq_sta_pers {
 #ifdef CONFIG_MAC80211_DEBUGFS
                u32 dbg_fixed_rate;
                u8 dbg_fixed_txp_reduction;
+
+               /* force STBC/BFER/SISO for testing */
+               enum rs_ss_force_opt ss_force;
 #endif
                u8 chains;
                s8 chain_signal[IEEE80211_MAX_CHAINS];
index f922131b4eaba7a66ab8d2e14f749f5d8cb56206..6177e24f4c016d09c8496186d65c394d05bb5eb3 100644 (file)
@@ -6,7 +6,7 @@
  * GPL LICENSE SUMMARY
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -32,7 +32,7 @@
  * BSD LICENSE
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -345,6 +345,25 @@ int iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
                struct iwl_mvm_sta *mvmsta;
                mvmsta = iwl_mvm_sta_from_mac80211(sta);
                rs_update_last_rssi(mvm, &mvmsta->lq_sta, rx_status);
+
+               if (iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_RSSI) &&
+                   ieee80211_is_beacon(hdr->frame_control)) {
+                       struct iwl_fw_dbg_trigger_tlv *trig;
+                       struct iwl_fw_dbg_trigger_low_rssi *rssi_trig;
+                       bool trig_check;
+                       s32 rssi;
+
+                       trig = iwl_fw_dbg_get_trigger(mvm->fw,
+                                                     FW_DBG_TRIGGER_RSSI);
+                       rssi_trig = (void *)trig->data;
+                       rssi = le32_to_cpu(rssi_trig->rssi);
+
+                       trig_check =
+                               iwl_fw_dbg_trigger_check_stop(mvm, mvmsta->vif,
+                                                             trig);
+                       if (trig_check && rx_status->signal < rssi)
+                               iwl_mvm_fw_dbg_collect_trig(mvm, trig, NULL, 0);
+               }
        }
 
        rcu_read_unlock();
@@ -416,35 +435,43 @@ int iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
 }
 
 static void iwl_mvm_update_rx_statistics(struct iwl_mvm *mvm,
-                                        struct iwl_notif_statistics *stats)
+                                        struct mvm_statistics_rx *rx_stats)
 {
-       /*
-        * NOTE FW aggregates the statistics - BUT the statistics are cleared
-        * when the driver issues REPLY_STATISTICS_CMD 0x9c with CLEAR_STATS
-        * bit set.
-        */
        lockdep_assert_held(&mvm->mutex);
-       memcpy(&mvm->rx_stats, &stats->rx, sizeof(struct mvm_statistics_rx));
+
+       mvm->rx_stats = *rx_stats;
 }
 
 struct iwl_mvm_stat_data {
-       struct iwl_notif_statistics *stats;
        struct iwl_mvm *mvm;
+       __le32 mac_id;
+       __s8 beacon_filter_average_energy;
+       struct mvm_statistics_general_v8 *general;
 };
 
 static void iwl_mvm_stat_iterator(void *_data, u8 *mac,
                                  struct ieee80211_vif *vif)
 {
        struct iwl_mvm_stat_data *data = _data;
-       struct iwl_notif_statistics *stats = data->stats;
        struct iwl_mvm *mvm = data->mvm;
-       int sig = -stats->general.beacon_filter_average_energy;
+       int sig = -data->beacon_filter_average_energy;
        int last_event;
        int thold = vif->bss_conf.cqm_rssi_thold;
        int hyst = vif->bss_conf.cqm_rssi_hyst;
-       u16 id = le32_to_cpu(stats->rx.general.mac_id);
+       u16 id = le32_to_cpu(data->mac_id);
        struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
 
+       /* This doesn't need the MAC ID check since it's not taking the
+        * data copied into the "data" struct, but rather the data from
+        * the notification directly.
+        */
+       if (data->general) {
+               mvmvif->beacon_stats.num_beacons =
+                       le32_to_cpu(data->general->beacon_counter[mvmvif->id]);
+               mvmvif->beacon_stats.avg_signal =
+                       -data->general->beacon_average_energy[mvmvif->id];
+       }
+
        if (mvmvif->id != id)
                return;
 
@@ -500,34 +527,101 @@ static void iwl_mvm_stat_iterator(void *_data, u8 *mac,
        }
 }
 
-/*
- * iwl_mvm_rx_statistics - STATISTICS_NOTIFICATION handler
- *
- * TODO: This handler is implemented partially.
- */
-int iwl_mvm_rx_statistics(struct iwl_mvm *mvm,
-                         struct iwl_rx_cmd_buffer *rxb,
-                         struct iwl_device_cmd *cmd)
+static inline void
+iwl_mvm_rx_stats_check_trigger(struct iwl_mvm *mvm, struct iwl_rx_packet *pkt)
 {
-       struct iwl_rx_packet *pkt = rxb_addr(rxb);
-       struct iwl_notif_statistics *stats = (void *)&pkt->data;
+       struct iwl_fw_dbg_trigger_tlv *trig;
+       struct iwl_fw_dbg_trigger_stats *trig_stats;
+       u32 trig_offset, trig_thold;
+
+       if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_STATS))
+               return;
+
+       trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_STATS);
+       trig_stats = (void *)trig->data;
+
+       if (!iwl_fw_dbg_trigger_check_stop(mvm, NULL, trig))
+               return;
+
+       trig_offset = le32_to_cpu(trig_stats->stop_offset);
+       trig_thold = le32_to_cpu(trig_stats->stop_threshold);
+
+       if (WARN_ON_ONCE(trig_offset >= iwl_rx_packet_payload_len(pkt)))
+               return;
+
+       if (le32_to_cpup((__le32 *) (pkt->data + trig_offset)) < trig_thold)
+               return;
+
+       iwl_mvm_fw_dbg_collect_trig(mvm, trig, NULL, 0);
+}
+
+void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm,
+                                 struct iwl_rx_packet *pkt)
+{
+       size_t v8_len = sizeof(struct iwl_notif_statistics_v8);
+       size_t v10_len = sizeof(struct iwl_notif_statistics_v10);
        struct iwl_mvm_stat_data data = {
-               .stats = stats,
                .mvm = mvm,
        };
+       u32 temperature;
+
+       if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_STATS_V10) {
+               struct iwl_notif_statistics_v10 *stats = (void *)&pkt->data;
+
+               if (iwl_rx_packet_payload_len(pkt) != v10_len)
+                       goto invalid;
+
+               temperature = le32_to_cpu(stats->general.radio_temperature);
+               data.mac_id = stats->rx.general.mac_id;
+               data.beacon_filter_average_energy =
+                       stats->general.beacon_filter_average_energy;
+
+               iwl_mvm_update_rx_statistics(mvm, &stats->rx);
+
+               mvm->radio_stats.rx_time = le64_to_cpu(stats->general.rx_time);
+               mvm->radio_stats.tx_time = le64_to_cpu(stats->general.tx_time);
+               mvm->radio_stats.on_time_rf =
+                       le64_to_cpu(stats->general.on_time_rf);
+               mvm->radio_stats.on_time_scan =
+                       le64_to_cpu(stats->general.on_time_scan);
+
+               data.general = &stats->general;
+       } else {
+               struct iwl_notif_statistics_v8 *stats = (void *)&pkt->data;
+
+               if (iwl_rx_packet_payload_len(pkt) != v8_len)
+                       goto invalid;
+
+               temperature = le32_to_cpu(stats->general.radio_temperature);
+               data.mac_id = stats->rx.general.mac_id;
+               data.beacon_filter_average_energy =
+                       stats->general.beacon_filter_average_energy;
+
+               iwl_mvm_update_rx_statistics(mvm, &stats->rx);
+       }
+
+       iwl_mvm_rx_stats_check_trigger(mvm, pkt);
 
        /* Only handle rx statistics temperature changes if async temp
         * notifications are not supported
         */
        if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_ASYNC_DTM))
-               iwl_mvm_tt_temp_changed(mvm,
-                               le32_to_cpu(stats->general.radio_temperature));
-
-       iwl_mvm_update_rx_statistics(mvm, stats);
+               iwl_mvm_tt_temp_changed(mvm, temperature);
 
        ieee80211_iterate_active_interfaces(mvm->hw,
                                            IEEE80211_IFACE_ITER_NORMAL,
                                            iwl_mvm_stat_iterator,
                                            &data);
+       return;
+ invalid:
+       IWL_ERR(mvm, "received invalid statistics size (%d)!\n",
+               iwl_rx_packet_payload_len(pkt));
+}
+
+int iwl_mvm_rx_statistics(struct iwl_mvm *mvm,
+                         struct iwl_rx_cmd_buffer *rxb,
+                         struct iwl_device_cmd *cmd)
+{
+       iwl_mvm_handle_rx_statistics(mvm, rxb_addr(rxb));
        return 0;
 }
index c47c8051da7770f2a2a89c9b33868151c62f0ff6..a75bb150ea275ef18fd6449e8c1d5543fc466bf6 100644 (file)
@@ -82,6 +82,7 @@ struct iwl_mvm_scan_params {
        struct _dwell {
                u16 passive;
                u16 active;
+               u16 fragmented;
        } dwell[IEEE80211_NUM_BANDS];
 };
 
@@ -191,101 +192,6 @@ static u16 iwl_mvm_get_passive_dwell(struct iwl_mvm *mvm,
        return band == IEEE80211_BAND_2GHZ ? 100 + 20 : 100 + 10;
 }
 
-static void iwl_mvm_scan_fill_channels(struct iwl_scan_cmd *cmd,
-                                      struct cfg80211_scan_request *req,
-                                      bool basic_ssid,
-                                      struct iwl_mvm_scan_params *params)
-{
-       struct iwl_scan_channel *chan = (struct iwl_scan_channel *)
-               (cmd->data + le16_to_cpu(cmd->tx_cmd.len));
-       int i;
-       int type = BIT(req->n_ssids) - 1;
-       enum ieee80211_band band = req->channels[0]->band;
-
-       if (!basic_ssid)
-               type |= BIT(req->n_ssids);
-
-       for (i = 0; i < cmd->channel_count; i++) {
-               chan->channel = cpu_to_le16(req->channels[i]->hw_value);
-               chan->type = cpu_to_le32(type);
-               if (req->channels[i]->flags & IEEE80211_CHAN_NO_IR)
-                       chan->type &= cpu_to_le32(~SCAN_CHANNEL_TYPE_ACTIVE);
-               chan->active_dwell = cpu_to_le16(params->dwell[band].active);
-               chan->passive_dwell = cpu_to_le16(params->dwell[band].passive);
-               chan->iteration_count = cpu_to_le16(1);
-               chan++;
-       }
-}
-
-/*
- * Fill in probe request with the following parameters:
- * TA is our vif HW address, which mac80211 ensures we have.
- * Packet is broadcasted, so this is both SA and DA.
- * The probe request IE is made out of two: first comes the most prioritized
- * SSID if a directed scan is requested. Second comes whatever extra
- * information was given to us as the scan request IE.
- */
-static u16 iwl_mvm_fill_probe_req(struct ieee80211_mgmt *frame, const u8 *ta,
-                                 int n_ssids, const u8 *ssid, int ssid_len,
-                                 const u8 *band_ie, int band_ie_len,
-                                 const u8 *common_ie, int common_ie_len,
-                                 int left)
-{
-       int len = 0;
-       u8 *pos = NULL;
-
-       /* Make sure there is enough space for the probe request,
-        * two mandatory IEs and the data */
-       left -= 24;
-       if (left < 0)
-               return 0;
-
-       frame->frame_control = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ);
-       eth_broadcast_addr(frame->da);
-       memcpy(frame->sa, ta, ETH_ALEN);
-       eth_broadcast_addr(frame->bssid);
-       frame->seq_ctrl = 0;
-
-       len += 24;
-
-       /* for passive scans, no need to fill anything */
-       if (n_ssids == 0)
-               return (u16)len;
-
-       /* points to the payload of the request */
-       pos = &frame->u.probe_req.variable[0];
-
-       /* fill in our SSID IE */
-       left -= ssid_len + 2;
-       if (left < 0)
-               return 0;
-       *pos++ = WLAN_EID_SSID;
-       *pos++ = ssid_len;
-       if (ssid && ssid_len) { /* ssid_len may be == 0 even if ssid is valid */
-               memcpy(pos, ssid, ssid_len);
-               pos += ssid_len;
-       }
-
-       len += ssid_len + 2;
-
-       if (WARN_ON(left < band_ie_len + common_ie_len))
-               return len;
-
-       if (band_ie && band_ie_len) {
-               memcpy(pos, band_ie, band_ie_len);
-               pos += band_ie_len;
-               len += band_ie_len;
-       }
-
-       if (common_ie && common_ie_len) {
-               memcpy(pos, common_ie, common_ie_len);
-               pos += common_ie_len;
-               len += common_ie_len;
-       }
-
-       return (u16)len;
-}
-
 static void iwl_mvm_scan_condition_iterator(void *data, u8 *mac,
                                            struct ieee80211_vif *vif)
 {
@@ -325,7 +231,7 @@ static void iwl_mvm_scan_calc_params(struct iwl_mvm *mvm,
                         * If there is more than one active interface make
                         * passive scan more fragmented.
                         */
-                       frag_passive_dwell = (global_cnt < 2) ? 40 : 20;
+                       frag_passive_dwell = 40;
                        params->max_out_time = frag_passive_dwell;
                } else {
                        params->suspend_time = 120;
@@ -358,10 +264,10 @@ not_bound:
 
        for (band = IEEE80211_BAND_2GHZ; band < IEEE80211_NUM_BANDS; band++) {
                if (params->passive_fragmented)
-                       params->dwell[band].passive = frag_passive_dwell;
-               else
-                       params->dwell[band].passive =
-                               iwl_mvm_get_passive_dwell(mvm, band);
+                       params->dwell[band].fragmented = frag_passive_dwell;
+
+               params->dwell[band].passive = iwl_mvm_get_passive_dwell(mvm,
+                                                                       band);
                params->dwell[band].active = iwl_mvm_get_active_dwell(mvm, band,
                                                                      n_ssids);
        }
@@ -379,20 +285,11 @@ static int iwl_mvm_max_scan_ie_fw_cmd_room(struct iwl_mvm *mvm,
 {
        int max_probe_len;
 
-       if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN)
-               max_probe_len = SCAN_OFFLOAD_PROBE_REQ_SIZE;
-       else
-               max_probe_len = mvm->fw->ucode_capa.max_probe_length;
+       max_probe_len = SCAN_OFFLOAD_PROBE_REQ_SIZE;
 
        /* we create the 802.11 header and SSID element */
        max_probe_len -= 24 + 2;
 
-       /* basic ssid is added only for hw_scan with and old api */
-       if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_NO_BASIC_SSID) &&
-           !(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN) &&
-           !is_sched_scan)
-               max_probe_len -= 32;
-
        /* DS parameter set element is added on 2.4GHZ band if required */
        if (iwl_mvm_rrm_scan_needed(mvm))
                max_probe_len -= 3;
@@ -404,9 +301,6 @@ int iwl_mvm_max_scan_ie_len(struct iwl_mvm *mvm, bool is_sched_scan)
 {
        int max_ie_len = iwl_mvm_max_scan_ie_fw_cmd_room(mvm, is_sched_scan);
 
-       if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN))
-               return max_ie_len;
-
        /* TODO: [BUG] This function should return the maximum allowed size of
         * scan IEs, however the LMAC scan api contains both 2GHZ and 5GHZ IEs
         * in the same command. So the correct implementation of this function
@@ -420,129 +314,6 @@ int iwl_mvm_max_scan_ie_len(struct iwl_mvm *mvm, bool is_sched_scan)
        return max_ie_len;
 }
 
-int iwl_mvm_scan_request(struct iwl_mvm *mvm,
-                        struct ieee80211_vif *vif,
-                        struct cfg80211_scan_request *req)
-{
-       struct iwl_host_cmd hcmd = {
-               .id = SCAN_REQUEST_CMD,
-               .len = { 0, },
-               .data = { mvm->scan_cmd, },
-               .dataflags = { IWL_HCMD_DFL_NOCOPY, },
-       };
-       struct iwl_scan_cmd *cmd = mvm->scan_cmd;
-       int ret;
-       u32 status;
-       int ssid_len = 0;
-       u8 *ssid = NULL;
-       bool basic_ssid = !(mvm->fw->ucode_capa.flags &
-                          IWL_UCODE_TLV_FLAGS_NO_BASIC_SSID);
-       struct iwl_mvm_scan_params params = {};
-
-       lockdep_assert_held(&mvm->mutex);
-
-       /* we should have failed registration if scan_cmd was NULL */
-       if (WARN_ON(mvm->scan_cmd == NULL))
-               return -ENOMEM;
-
-       IWL_DEBUG_SCAN(mvm, "Handling mac80211 scan request\n");
-       mvm->scan_status = IWL_MVM_SCAN_OS;
-       memset(cmd, 0, ksize(cmd));
-
-       cmd->channel_count = (u8)req->n_channels;
-       cmd->quiet_time = cpu_to_le16(IWL_ACTIVE_QUIET_TIME);
-       cmd->quiet_plcp_th = cpu_to_le16(IWL_PLCP_QUIET_THRESH);
-       cmd->rxchain_sel_flags = iwl_mvm_scan_rx_chain(mvm);
-
-       iwl_mvm_scan_calc_params(mvm, vif, req->n_ssids, req->flags, &params);
-       cmd->max_out_time = cpu_to_le32(params.max_out_time);
-       cmd->suspend_time = cpu_to_le32(params.suspend_time);
-       if (params.passive_fragmented)
-               cmd->scan_flags |= SCAN_FLAGS_FRAGMENTED_SCAN;
-
-       cmd->rxon_flags = iwl_mvm_scan_rxon_flags(req->channels[0]->band);
-       cmd->filter_flags = cpu_to_le32(MAC_FILTER_ACCEPT_GRP |
-                                       MAC_FILTER_IN_BEACON);
-
-       if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
-               cmd->type = cpu_to_le32(SCAN_TYPE_DISCOVERY_FORCED);
-       else
-               cmd->type = cpu_to_le32(SCAN_TYPE_FORCED);
-
-       cmd->repeats = cpu_to_le32(1);
-
-       /*
-        * If the user asked for passive scan, don't change to active scan if
-        * you see any activity on the channel - remain passive.
-        */
-       if (req->n_ssids > 0) {
-               cmd->passive2active = cpu_to_le16(1);
-               cmd->scan_flags |= SCAN_FLAGS_PASSIVE2ACTIVE;
-               if (basic_ssid) {
-                       ssid = req->ssids[0].ssid;
-                       ssid_len = req->ssids[0].ssid_len;
-               }
-       } else {
-               cmd->passive2active = 0;
-               cmd->scan_flags &= ~SCAN_FLAGS_PASSIVE2ACTIVE;
-       }
-
-       iwl_mvm_scan_fill_ssids(cmd->direct_scan, req->ssids, req->n_ssids,
-                               basic_ssid ? 1 : 0);
-
-       cmd->tx_cmd.tx_flags = cpu_to_le32(TX_CMD_FLG_SEQ_CTL |
-                                          3 << TX_CMD_FLG_BT_PRIO_POS);
-
-       cmd->tx_cmd.sta_id = mvm->aux_sta.sta_id;
-       cmd->tx_cmd.life_time = cpu_to_le32(TX_CMD_LIFE_TIME_INFINITE);
-       cmd->tx_cmd.rate_n_flags =
-                       iwl_mvm_scan_rate_n_flags(mvm, req->channels[0]->band,
-                                                 req->no_cck);
-
-       cmd->tx_cmd.len =
-               cpu_to_le16(iwl_mvm_fill_probe_req(
-                           (struct ieee80211_mgmt *)cmd->data,
-                           vif->addr,
-                           req->n_ssids, ssid, ssid_len,
-                           req->ie, req->ie_len, NULL, 0,
-                           mvm->fw->ucode_capa.max_probe_length));
-
-       iwl_mvm_scan_fill_channels(cmd, req, basic_ssid, &params);
-
-       cmd->len = cpu_to_le16(sizeof(struct iwl_scan_cmd) +
-               le16_to_cpu(cmd->tx_cmd.len) +
-               (cmd->channel_count * sizeof(struct iwl_scan_channel)));
-       hcmd.len[0] = le16_to_cpu(cmd->len);
-
-       status = SCAN_RESPONSE_OK;
-       ret = iwl_mvm_send_cmd_status(mvm, &hcmd, &status);
-       if (!ret && status == SCAN_RESPONSE_OK) {
-               IWL_DEBUG_SCAN(mvm, "Scan request was sent successfully\n");
-       } else {
-               /*
-                * If the scan failed, it usually means that the FW was unable
-                * to allocate the time events. Warn on it, but maybe we
-                * should try to send the command again with different params.
-                */
-               IWL_ERR(mvm, "Scan failed! status 0x%x ret %d\n",
-                       status, ret);
-               mvm->scan_status = IWL_MVM_SCAN_NONE;
-               ret = -EIO;
-       }
-       return ret;
-}
-
-int iwl_mvm_rx_scan_response(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
-                         struct iwl_device_cmd *cmd)
-{
-       struct iwl_rx_packet *pkt = rxb_addr(rxb);
-       struct iwl_cmd_response *resp = (void *)pkt->data;
-
-       IWL_DEBUG_SCAN(mvm, "Scan response received. status 0x%x\n",
-                      le32_to_cpu(resp->status));
-       return 0;
-}
-
 int iwl_mvm_rx_scan_offload_iter_complete_notif(struct iwl_mvm *mvm,
                                                struct iwl_rx_cmd_buffer *rxb,
                                                struct iwl_device_cmd *cmd)
@@ -556,130 +327,25 @@ int iwl_mvm_rx_scan_offload_iter_complete_notif(struct iwl_mvm *mvm,
        return 0;
 }
 
-int iwl_mvm_rx_scan_complete(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
-                         struct iwl_device_cmd *cmd)
-{
-       struct iwl_rx_packet *pkt = rxb_addr(rxb);
-       struct iwl_scan_complete_notif *notif = (void *)pkt->data;
-
-       lockdep_assert_held(&mvm->mutex);
-
-       IWL_DEBUG_SCAN(mvm, "Scan complete: status=0x%x scanned channels=%d\n",
-                      notif->status, notif->scanned_channels);
-
-       if (mvm->scan_status == IWL_MVM_SCAN_OS)
-               mvm->scan_status = IWL_MVM_SCAN_NONE;
-       ieee80211_scan_completed(mvm->hw, notif->status != SCAN_COMP_STATUS_OK);
-
-       iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
-
-       return 0;
-}
-
 int iwl_mvm_rx_scan_offload_results(struct iwl_mvm *mvm,
                                    struct iwl_rx_cmd_buffer *rxb,
                                    struct iwl_device_cmd *cmd)
 {
-       struct iwl_rx_packet *pkt = rxb_addr(rxb);
-
-       if (!(mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN) &&
-           !(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN)) {
-               struct iwl_sched_scan_results *notif = (void *)pkt->data;
-
-               if (!(notif->client_bitmap & SCAN_CLIENT_SCHED_SCAN))
-                       return 0;
-       }
-
        IWL_DEBUG_SCAN(mvm, "Scheduled scan results\n");
        ieee80211_sched_scan_results(mvm->hw);
 
        return 0;
 }
 
-static bool iwl_mvm_scan_abort_notif(struct iwl_notif_wait_data *notif_wait,
-                                    struct iwl_rx_packet *pkt, void *data)
-{
-       struct iwl_mvm *mvm =
-               container_of(notif_wait, struct iwl_mvm, notif_wait);
-       struct iwl_scan_complete_notif *notif;
-       u32 *resp;
-
-       switch (pkt->hdr.cmd) {
-       case SCAN_ABORT_CMD:
-               resp = (void *)pkt->data;
-               if (*resp == CAN_ABORT_STATUS) {
-                       IWL_DEBUG_SCAN(mvm,
-                                      "Scan can be aborted, wait until completion\n");
-                       return false;
-               }
-
-               /*
-                * If scan cannot be aborted, it means that we had a
-                * SCAN_COMPLETE_NOTIFICATION in the pipe and it called
-                * ieee80211_scan_completed already.
-                */
-               IWL_DEBUG_SCAN(mvm, "Scan cannot be aborted, exit now: %d\n",
-                              *resp);
-               return true;
-
-       case SCAN_COMPLETE_NOTIFICATION:
-               notif = (void *)pkt->data;
-               IWL_DEBUG_SCAN(mvm, "Scan aborted: status 0x%x\n",
-                              notif->status);
-               return true;
-
-       default:
-               WARN_ON(1);
-               return false;
-       };
-}
-
-static int iwl_mvm_cancel_regular_scan(struct iwl_mvm *mvm)
-{
-       struct iwl_notification_wait wait_scan_abort;
-       static const u8 scan_abort_notif[] = { SCAN_ABORT_CMD,
-                                              SCAN_COMPLETE_NOTIFICATION };
-       int ret;
-
-       iwl_init_notification_wait(&mvm->notif_wait, &wait_scan_abort,
-                                  scan_abort_notif,
-                                  ARRAY_SIZE(scan_abort_notif),
-                                  iwl_mvm_scan_abort_notif, NULL);
-
-       ret = iwl_mvm_send_cmd_pdu(mvm, SCAN_ABORT_CMD, 0, 0, NULL);
-       if (ret) {
-               IWL_ERR(mvm, "Couldn't send SCAN_ABORT_CMD: %d\n", ret);
-               /* mac80211's state will be cleaned in the nic_restart flow */
-               goto out_remove_notif;
-       }
-
-       return iwl_wait_notification(&mvm->notif_wait, &wait_scan_abort, HZ);
-
-out_remove_notif:
-       iwl_remove_notification(&mvm->notif_wait, &wait_scan_abort);
-       return ret;
-}
-
 int iwl_mvm_rx_scan_offload_complete_notif(struct iwl_mvm *mvm,
                                           struct iwl_rx_cmd_buffer *rxb,
                                           struct iwl_device_cmd *cmd)
 {
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
-       u8 status, ebs_status;
+       struct iwl_periodic_scan_complete *scan_notif;
 
-       if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN) {
-               struct iwl_periodic_scan_complete *scan_notif;
+       scan_notif = (void *)pkt->data;
 
-               scan_notif = (void *)pkt->data;
-               status = scan_notif->status;
-               ebs_status = scan_notif->ebs_status;
-       } else  {
-               struct iwl_scan_offload_complete *scan_notif;
-
-               scan_notif = (void *)pkt->data;
-               status = scan_notif->status;
-               ebs_status = scan_notif->ebs_status;
-       }
        /* scan status must be locked for proper checking */
        lockdep_assert_held(&mvm->mutex);
 
@@ -687,9 +353,9 @@ int iwl_mvm_rx_scan_offload_complete_notif(struct iwl_mvm *mvm,
                       "%s completed, status %s, EBS status %s\n",
                       mvm->scan_status == IWL_MVM_SCAN_SCHED ?
                                "Scheduled scan" : "Scan",
-                      status == IWL_SCAN_OFFLOAD_COMPLETED ?
+                      scan_notif->status == IWL_SCAN_OFFLOAD_COMPLETED ?
                                "completed" : "aborted",
-                      ebs_status == IWL_SCAN_EBS_SUCCESS ?
+                      scan_notif->ebs_status == IWL_SCAN_EBS_SUCCESS ?
                                "success" : "failed");
 
 
@@ -700,64 +366,16 @@ int iwl_mvm_rx_scan_offload_complete_notif(struct iwl_mvm *mvm,
        } else if (mvm->scan_status == IWL_MVM_SCAN_OS) {
                mvm->scan_status = IWL_MVM_SCAN_NONE;
                ieee80211_scan_completed(mvm->hw,
-                                        status == IWL_SCAN_OFFLOAD_ABORTED);
+                               scan_notif->status == IWL_SCAN_OFFLOAD_ABORTED);
                iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
        }
 
-       if (ebs_status)
+       if (scan_notif->ebs_status)
                mvm->last_ebs_successful = false;
 
        return 0;
 }
 
-static void iwl_scan_offload_build_tx_cmd(struct iwl_mvm *mvm,
-                                         struct ieee80211_vif *vif,
-                                         struct ieee80211_scan_ies *ies,
-                                         enum ieee80211_band band,
-                                         struct iwl_tx_cmd *cmd,
-                                         u8 *data)
-{
-       u16 cmd_len;
-
-       cmd->tx_flags = cpu_to_le32(TX_CMD_FLG_SEQ_CTL);
-       cmd->life_time = cpu_to_le32(TX_CMD_LIFE_TIME_INFINITE);
-       cmd->sta_id = mvm->aux_sta.sta_id;
-
-       cmd->rate_n_flags = iwl_mvm_scan_rate_n_flags(mvm, band, false);
-
-       cmd_len = iwl_mvm_fill_probe_req((struct ieee80211_mgmt *)data,
-                                        vif->addr,
-                                        1, NULL, 0,
-                                        ies->ies[band], ies->len[band],
-                                        ies->common_ies, ies->common_ie_len,
-                                        SCAN_OFFLOAD_PROBE_REQ_SIZE);
-       cmd->len = cpu_to_le16(cmd_len);
-}
-
-static void iwl_build_scan_cmd(struct iwl_mvm *mvm,
-                              struct ieee80211_vif *vif,
-                              struct cfg80211_sched_scan_request *req,
-                              struct iwl_scan_offload_cmd *scan,
-                              struct iwl_mvm_scan_params *params)
-{
-       scan->channel_count = req->n_channels;
-       scan->quiet_time = cpu_to_le16(IWL_ACTIVE_QUIET_TIME);
-       scan->quiet_plcp_th = cpu_to_le16(IWL_PLCP_QUIET_THRESH);
-       scan->good_CRC_th = IWL_GOOD_CRC_TH_DEFAULT;
-       scan->rx_chain = iwl_mvm_scan_rx_chain(mvm);
-
-       scan->max_out_time = cpu_to_le32(params->max_out_time);
-       scan->suspend_time = cpu_to_le32(params->suspend_time);
-
-       scan->filter_flags |= cpu_to_le32(MAC_FILTER_ACCEPT_GRP |
-                                         MAC_FILTER_IN_BEACON);
-       scan->scan_type = cpu_to_le32(SCAN_TYPE_BACKGROUND);
-       scan->rep_count = cpu_to_le32(1);
-
-       if (params->passive_fragmented)
-               scan->scan_flags |= SCAN_FLAGS_FRAGMENTED_SCAN;
-}
-
 static int iwl_ssid_exist(u8 *ssid, u8 ssid_len, struct iwl_ssid_ie *ssid_list)
 {
        int i;
@@ -815,127 +433,6 @@ static void iwl_scan_offload_build_ssid(struct cfg80211_sched_scan_request *req,
        }
 }
 
-static void iwl_build_channel_cfg(struct iwl_mvm *mvm,
-                                 struct cfg80211_sched_scan_request *req,
-                                 u8 *channels_buffer,
-                                 enum ieee80211_band band,
-                                 int *head,
-                                 u32 ssid_bitmap,
-                                 struct iwl_mvm_scan_params *params)
-{
-       u32 n_channels = mvm->fw->ucode_capa.n_scan_channels;
-       __le32 *type = (__le32 *)channels_buffer;
-       __le16 *channel_number = (__le16 *)(type + n_channels);
-       __le16 *iter_count = channel_number + n_channels;
-       __le32 *iter_interval = (__le32 *)(iter_count + n_channels);
-       u8 *active_dwell = (u8 *)(iter_interval + n_channels);
-       u8 *passive_dwell = active_dwell + n_channels;
-       int i, index = 0;
-
-       for (i = 0; i < req->n_channels; i++) {
-               struct ieee80211_channel *chan = req->channels[i];
-
-               if (chan->band != band)
-                       continue;
-
-               index = *head;
-               (*head)++;
-
-               channel_number[index] = cpu_to_le16(chan->hw_value);
-               active_dwell[index] = params->dwell[band].active;
-               passive_dwell[index] = params->dwell[band].passive;
-
-               iter_count[index] = cpu_to_le16(1);
-               iter_interval[index] = 0;
-
-               if (!(chan->flags & IEEE80211_CHAN_NO_IR))
-                       type[index] |=
-                               cpu_to_le32(IWL_SCAN_OFFLOAD_CHANNEL_ACTIVE);
-
-               type[index] |= cpu_to_le32(IWL_SCAN_OFFLOAD_CHANNEL_FULL |
-                                          IWL_SCAN_OFFLOAD_CHANNEL_PARTIAL);
-
-               if (chan->flags & IEEE80211_CHAN_NO_HT40)
-                       type[index] |=
-                               cpu_to_le32(IWL_SCAN_OFFLOAD_CHANNEL_NARROW);
-
-               /* scan for all SSIDs from req->ssids */
-               type[index] |= cpu_to_le32(ssid_bitmap);
-       }
-}
-
-int iwl_mvm_config_sched_scan(struct iwl_mvm *mvm,
-                             struct ieee80211_vif *vif,
-                             struct cfg80211_sched_scan_request *req,
-                             struct ieee80211_scan_ies *ies)
-{
-       int band_2ghz = mvm->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels;
-       int band_5ghz = mvm->nvm_data->bands[IEEE80211_BAND_5GHZ].n_channels;
-       int head = 0;
-       u32 ssid_bitmap;
-       int cmd_len;
-       int ret;
-       u8 *probes;
-       bool basic_ssid = !(mvm->fw->ucode_capa.flags &
-                           IWL_UCODE_TLV_FLAGS_NO_BASIC_SSID);
-
-       struct iwl_scan_offload_cfg *scan_cfg;
-       struct iwl_host_cmd cmd = {
-               .id = SCAN_OFFLOAD_CONFIG_CMD,
-       };
-       struct iwl_mvm_scan_params params = {};
-
-       lockdep_assert_held(&mvm->mutex);
-
-       cmd_len = sizeof(struct iwl_scan_offload_cfg) +
-                 mvm->fw->ucode_capa.n_scan_channels * IWL_SCAN_CHAN_SIZE +
-                 2 * SCAN_OFFLOAD_PROBE_REQ_SIZE;
-
-       scan_cfg = kzalloc(cmd_len, GFP_KERNEL);
-       if (!scan_cfg)
-               return -ENOMEM;
-
-       probes = scan_cfg->data +
-               mvm->fw->ucode_capa.n_scan_channels * IWL_SCAN_CHAN_SIZE;
-
-       iwl_mvm_scan_calc_params(mvm, vif, req->n_ssids, 0, &params);
-       iwl_build_scan_cmd(mvm, vif, req, &scan_cfg->scan_cmd, &params);
-       scan_cfg->scan_cmd.len = cpu_to_le16(cmd_len);
-
-       iwl_scan_offload_build_ssid(req, scan_cfg->scan_cmd.direct_scan,
-                                   &ssid_bitmap, basic_ssid);
-       /* build tx frames for supported bands */
-       if (band_2ghz) {
-               iwl_scan_offload_build_tx_cmd(mvm, vif, ies,
-                                             IEEE80211_BAND_2GHZ,
-                                             &scan_cfg->scan_cmd.tx_cmd[0],
-                                             probes);
-               iwl_build_channel_cfg(mvm, req, scan_cfg->data,
-                                     IEEE80211_BAND_2GHZ, &head,
-                                     ssid_bitmap, &params);
-       }
-       if (band_5ghz) {
-               iwl_scan_offload_build_tx_cmd(mvm, vif, ies,
-                                             IEEE80211_BAND_5GHZ,
-                                             &scan_cfg->scan_cmd.tx_cmd[1],
-                                             probes +
-                                               SCAN_OFFLOAD_PROBE_REQ_SIZE);
-               iwl_build_channel_cfg(mvm, req, scan_cfg->data,
-                                     IEEE80211_BAND_5GHZ, &head,
-                                     ssid_bitmap, &params);
-       }
-
-       cmd.data[0] = scan_cfg;
-       cmd.len[0] = cmd_len;
-       cmd.dataflags[0] = IWL_HCMD_DFL_NOCOPY;
-
-       IWL_DEBUG_SCAN(mvm, "Sending scheduled scan config\n");
-
-       ret = iwl_mvm_send_cmd(mvm, &cmd);
-       kfree(scan_cfg);
-       return ret;
-}
-
 int iwl_mvm_config_sched_scan_profiles(struct iwl_mvm *mvm,
                                       struct cfg80211_sched_scan_request *req)
 {
@@ -1018,33 +515,6 @@ static bool iwl_mvm_scan_pass_all(struct iwl_mvm *mvm,
        return true;
 }
 
-int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
-                            struct cfg80211_sched_scan_request *req)
-{
-       struct iwl_scan_offload_req scan_req = {
-               .watchdog = IWL_SCHED_SCAN_WATCHDOG,
-
-               .schedule_line[0].iterations = IWL_FAST_SCHED_SCAN_ITERATIONS,
-               .schedule_line[0].delay = cpu_to_le16(req->interval / 1000),
-               .schedule_line[0].full_scan_mul = 1,
-
-               .schedule_line[1].iterations = 0xff,
-               .schedule_line[1].delay = cpu_to_le16(req->interval / 1000),
-               .schedule_line[1].full_scan_mul = IWL_FULL_SCAN_MULTIPLIER,
-       };
-
-       if (iwl_mvm_scan_pass_all(mvm, req))
-               scan_req.flags |= cpu_to_le16(IWL_SCAN_OFFLOAD_FLAG_PASS_ALL);
-
-       if (mvm->last_ebs_successful &&
-           mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_EBS_SUPPORT)
-               scan_req.flags |=
-                       cpu_to_le16(IWL_SCAN_OFFLOAD_FLAG_EBS_ACCURATE_MODE);
-
-       return iwl_mvm_send_cmd_pdu(mvm, SCAN_OFFLOAD_REQUEST_CMD, 0,
-                                   sizeof(scan_req), &scan_req);
-}
-
 int iwl_mvm_scan_offload_start(struct iwl_mvm *mvm,
                               struct ieee80211_vif *vif,
                               struct cfg80211_sched_scan_request *req,
@@ -1057,21 +527,12 @@ int iwl_mvm_scan_offload_start(struct iwl_mvm *mvm,
                if (ret)
                        return ret;
                ret = iwl_mvm_sched_scan_umac(mvm, vif, req, ies);
-       } else if ((mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN)) {
-               mvm->scan_status = IWL_MVM_SCAN_SCHED;
-               ret = iwl_mvm_config_sched_scan_profiles(mvm, req);
-               if (ret)
-                       return ret;
-               ret = iwl_mvm_unified_sched_scan_lmac(mvm, vif, req, ies);
        } else {
                mvm->scan_status = IWL_MVM_SCAN_SCHED;
-               ret = iwl_mvm_config_sched_scan(mvm, vif, req, ies);
-               if (ret)
-                       return ret;
                ret = iwl_mvm_config_sched_scan_profiles(mvm, req);
                if (ret)
                        return ret;
-               ret = iwl_mvm_sched_scan_start(mvm, req);
+               ret = iwl_mvm_unified_sched_scan_lmac(mvm, vif, req, ies);
        }
 
        return ret;
@@ -1088,9 +549,7 @@ static int iwl_mvm_send_scan_offload_abort(struct iwl_mvm *mvm)
        /* Exit instantly with error when device is not ready
         * to receive scan abort command or it does not perform
         * scheduled scan currently */
-       if (mvm->scan_status != IWL_MVM_SCAN_SCHED &&
-           (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN) ||
-            mvm->scan_status != IWL_MVM_SCAN_OS))
+       if (mvm->scan_status == IWL_MVM_SCAN_NONE)
                return -EIO;
 
        ret = iwl_mvm_send_cmd_status(mvm, &cmd, &status);
@@ -1133,13 +592,6 @@ int iwl_mvm_scan_offload_stop(struct iwl_mvm *mvm, bool notify)
                goto out;
        }
 
-       if (mvm->scan_status != IWL_MVM_SCAN_SCHED &&
-           (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN) ||
-            mvm->scan_status != IWL_MVM_SCAN_OS)) {
-               IWL_DEBUG_SCAN(mvm, "No scan to stop\n");
-               return 0;
-       }
-
        iwl_init_notification_wait(&mvm->notif_wait, &wait_scan_done,
                                   scan_done_notif,
                                   ARRAY_SIZE(scan_done_notif),
@@ -1316,7 +768,7 @@ iwl_mvm_build_generic_unified_scan_cmd(struct iwl_mvm *mvm,
        cmd->passive_dwell = params->dwell[IEEE80211_BAND_2GHZ].passive;
        if (params->passive_fragmented)
                cmd->fragmented_dwell =
-                               params->dwell[IEEE80211_BAND_2GHZ].passive;
+                               params->dwell[IEEE80211_BAND_2GHZ].fragmented;
        cmd->rx_chain_select = iwl_mvm_scan_rx_chain(mvm);
        cmd->max_out_time = cpu_to_le32(params->max_out_time);
        cmd->suspend_time = cpu_to_le32(params->suspend_time);
@@ -1579,9 +1031,7 @@ int iwl_mvm_cancel_scan(struct iwl_mvm *mvm)
                return 0;
        }
 
-       if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN)
-               return iwl_mvm_scan_offload_stop(mvm, true);
-       return iwl_mvm_cancel_regular_scan(mvm);
+       return iwl_mvm_scan_offload_stop(mvm, true);
 }
 
 /* UMAC scan API */
@@ -1764,7 +1214,7 @@ iwl_mvm_build_generic_umac_scan_cmd(struct iwl_mvm *mvm,
        cmd->passive_dwell = params->dwell[IEEE80211_BAND_2GHZ].passive;
        if (params->passive_fragmented)
                cmd->fragmented_dwell =
-                               params->dwell[IEEE80211_BAND_2GHZ].passive;
+                               params->dwell[IEEE80211_BAND_2GHZ].fragmented;
        cmd->max_out_time = cpu_to_le32(params->max_out_time);
        cmd->suspend_time = cpu_to_le32(params->suspend_time);
        cmd->scan_priority = cpu_to_le32(IWL_SCAN_PRIORITY_HIGH);
@@ -2158,14 +1608,8 @@ int iwl_mvm_scan_size(struct iwl_mvm *mvm)
                                mvm->fw->ucode_capa.n_scan_channels +
                        sizeof(struct iwl_scan_req_umac_tail);
 
-       if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN)
-               return sizeof(struct iwl_scan_req_unified_lmac) +
-                       sizeof(struct iwl_scan_channel_cfg_lmac) *
-                               mvm->fw->ucode_capa.n_scan_channels +
-                       sizeof(struct iwl_scan_probe_req);
-
-       return sizeof(struct iwl_scan_cmd) +
-               mvm->fw->ucode_capa.max_probe_length +
-                       mvm->fw->ucode_capa.n_scan_channels *
-               sizeof(struct iwl_scan_channel);
+       return sizeof(struct iwl_scan_req_unified_lmac) +
+               sizeof(struct iwl_scan_channel_cfg_lmac) *
+               mvm->fw->ucode_capa.n_scan_channels +
+               sizeof(struct iwl_scan_probe_req);
 }
index 7eb78e2c240a6e7778810869b9c31dc9b1ee2850..b0f59fdd287c787cbb0f65ca319aebfadb850d31 100644 (file)
@@ -99,7 +99,35 @@ static void iwl_mvm_bound_iface_iterator(void *_data, u8 *mac,
 
 /*
  * Aging and idle timeouts for the different possible scenarios
- * in SF_FULL_ON state.
+ * in default configuration
+ */
+static const
+__le32 sf_full_timeout_def[SF_NUM_SCENARIO][SF_NUM_TIMEOUT_TYPES] = {
+       {
+               cpu_to_le32(SF_SINGLE_UNICAST_AGING_TIMER_DEF),
+               cpu_to_le32(SF_SINGLE_UNICAST_IDLE_TIMER_DEF)
+       },
+       {
+               cpu_to_le32(SF_AGG_UNICAST_AGING_TIMER_DEF),
+               cpu_to_le32(SF_AGG_UNICAST_IDLE_TIMER_DEF)
+       },
+       {
+               cpu_to_le32(SF_MCAST_AGING_TIMER_DEF),
+               cpu_to_le32(SF_MCAST_IDLE_TIMER_DEF)
+       },
+       {
+               cpu_to_le32(SF_BA_AGING_TIMER_DEF),
+               cpu_to_le32(SF_BA_IDLE_TIMER_DEF)
+       },
+       {
+               cpu_to_le32(SF_TX_RE_AGING_TIMER_DEF),
+               cpu_to_le32(SF_TX_RE_IDLE_TIMER_DEF)
+       },
+};
+
+/*
+ * Aging and idle timeouts for the different possible scenarios
+ * in single BSS MAC configuration.
  */
 static const __le32 sf_full_timeout[SF_NUM_SCENARIO][SF_NUM_TIMEOUT_TYPES] = {
        {
@@ -124,7 +152,8 @@ static const __le32 sf_full_timeout[SF_NUM_SCENARIO][SF_NUM_TIMEOUT_TYPES] = {
        },
 };
 
-static void iwl_mvm_fill_sf_command(struct iwl_sf_cfg_cmd *sf_cmd,
+static void iwl_mvm_fill_sf_command(struct iwl_mvm *mvm,
+                                   struct iwl_sf_cfg_cmd *sf_cmd,
                                    struct ieee80211_sta *sta)
 {
        int i, j, watermark;
@@ -163,24 +192,38 @@ static void iwl_mvm_fill_sf_command(struct iwl_sf_cfg_cmd *sf_cmd,
                                        cpu_to_le32(SF_LONG_DELAY_AGING_TIMER);
                }
        }
-       BUILD_BUG_ON(sizeof(sf_full_timeout) !=
-                    sizeof(__le32) * SF_NUM_SCENARIO * SF_NUM_TIMEOUT_TYPES);
 
-       memcpy(sf_cmd->full_on_timeouts, sf_full_timeout,
-              sizeof(sf_full_timeout));
+       if (sta || IWL_UCODE_API(mvm->fw->ucode_ver) < 13) {
+               BUILD_BUG_ON(sizeof(sf_full_timeout) !=
+                            sizeof(__le32) * SF_NUM_SCENARIO *
+                            SF_NUM_TIMEOUT_TYPES);
+
+               memcpy(sf_cmd->full_on_timeouts, sf_full_timeout,
+                      sizeof(sf_full_timeout));
+       } else {
+               BUILD_BUG_ON(sizeof(sf_full_timeout_def) !=
+                            sizeof(__le32) * SF_NUM_SCENARIO *
+                            SF_NUM_TIMEOUT_TYPES);
+
+               memcpy(sf_cmd->full_on_timeouts, sf_full_timeout_def,
+                      sizeof(sf_full_timeout_def));
+       }
+
 }
 
 static int iwl_mvm_sf_config(struct iwl_mvm *mvm, u8 sta_id,
                             enum iwl_sf_state new_state)
 {
        struct iwl_sf_cfg_cmd sf_cmd = {
-               .state = cpu_to_le32(new_state),
+               .state = cpu_to_le32(SF_FULL_ON),
        };
        struct ieee80211_sta *sta;
        int ret = 0;
 
-       if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_SF_NO_DUMMY_NOTIF &&
-           mvm->cfg->disable_dummy_notification)
+       if (IWL_UCODE_API(mvm->fw->ucode_ver) < 13)
+               sf_cmd.state = cpu_to_le32(new_state);
+
+       if (mvm->cfg->disable_dummy_notification)
                sf_cmd.state |= cpu_to_le32(SF_CFG_DUMMY_NOTIF_OFF);
 
        /*
@@ -192,6 +235,8 @@ static int iwl_mvm_sf_config(struct iwl_mvm *mvm, u8 sta_id,
 
        switch (new_state) {
        case SF_UNINIT:
+               if (IWL_UCODE_API(mvm->fw->ucode_ver) >= 13)
+                       iwl_mvm_fill_sf_command(mvm, &sf_cmd, NULL);
                break;
        case SF_FULL_ON:
                if (sta_id == IWL_MVM_STATION_COUNT) {
@@ -206,11 +251,11 @@ static int iwl_mvm_sf_config(struct iwl_mvm *mvm, u8 sta_id,
                        rcu_read_unlock();
                        return -EINVAL;
                }
-               iwl_mvm_fill_sf_command(&sf_cmd, sta);
+               iwl_mvm_fill_sf_command(mvm, &sf_cmd, sta);
                rcu_read_unlock();
                break;
        case SF_INIT_OFF:
-               iwl_mvm_fill_sf_command(&sf_cmd, NULL);
+               iwl_mvm_fill_sf_command(mvm, &sf_cmd, NULL);
                break;
        default:
                WARN_ONCE(1, "Invalid state: %d. not sending Smart Fifo cmd\n",
index 5c23cddaaae34ea26a4722f9dbdc192e3e218ab5..50f9288368af169fe4eb5c31b8316f46dfd35c97 100644 (file)
@@ -273,7 +273,7 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm,
        else
                sta_id = mvm_sta->sta_id;
 
-       if (WARN_ON_ONCE(sta_id == IWL_MVM_STATION_COUNT))
+       if (sta_id == IWL_MVM_STATION_COUNT)
                return -ENOSPC;
 
        spin_lock_init(&mvm_sta->lock);
@@ -1681,9 +1681,6 @@ void iwl_mvm_sta_modify_disable_tx(struct iwl_mvm *mvm,
        };
        int ret;
 
-       if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_DISABLE_STA_TX))
-               return;
-
        ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC, sizeof(cmd), &cmd);
        if (ret)
                IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
index 4b81c0bf63b0a86173afde87ef2822c0dfa8860f..8d179ab67cc237026e8653d18e791d826d9ee0e4 100644 (file)
@@ -263,17 +263,23 @@ static void iwl_mvm_te_handle_notif(struct iwl_mvm *mvm,
                             "TE ended - current time %lu, estimated end %lu\n",
                             jiffies, te_data->end_jiffies);
 
-               if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE) {
+               switch (te_data->vif->type) {
+               case NL80211_IFTYPE_P2P_DEVICE:
                        ieee80211_remain_on_channel_expired(mvm->hw);
                        iwl_mvm_roc_finished(mvm);
+                       break;
+               case NL80211_IFTYPE_STATION:
+                       /*
+                        * By now, we should have finished association
+                        * and know the dtim period.
+                        */
+                       iwl_mvm_te_check_disconnect(mvm, te_data->vif,
+                               "No association and the time event is over already...");
+                       break;
+               default:
+                       break;
                }
 
-               /*
-                * By now, we should have finished association
-                * and know the dtim period.
-                */
-               iwl_mvm_te_check_disconnect(mvm, te_data->vif,
-                       "No association and the time event is over already...");
                iwl_mvm_te_clear_data(mvm, te_data);
        } else if (le32_to_cpu(notif->action) & TE_V2_NOTIF_HOST_EVENT_START) {
                te_data->running = true;
index 96a05406babf864fb0ff463e9b87dc4c27ba5299..ba34dda1ae36bc8ca562f2cbae7a443bf3bf1868 100644 (file)
@@ -664,6 +664,8 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
                info->status.rates[0].count = tx_resp->failure_frame + 1;
                iwl_mvm_hwrate_to_tx_status(le32_to_cpu(tx_resp->initial_rate),
                                            info);
+               info->status.status_driver_data[1] =
+                       (void *)(uintptr_t)le32_to_cpu(tx_resp->initial_rate);
 
                /* Single frame failure in an AMPDU queue => send BAR */
                if (txq_id >= mvm->first_agg_queue &&
@@ -909,6 +911,8 @@ static void iwl_mvm_tx_info_from_ba_notif(struct ieee80211_tx_info *info,
        info->status.tx_time = tid_data->tx_time;
        info->status.status_driver_data[0] =
                (void *)(uintptr_t)tid_data->reduced_tpc;
+       info->status.status_driver_data[1] =
+               (void *)(uintptr_t)tid_data->rate_n_flags;
 }
 
 int iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
index 8decf99532298dcb7fa3135421c441e5d6e98368..435faee0a28eee1d674d95f07fc934baef2e13b0 100644 (file)
@@ -332,7 +332,7 @@ static const char *desc_lookup(u32 num)
  * read with u32-sized accesses, any members with a different size
  * need to be ordered correctly though!
  */
-struct iwl_error_event_table {
+struct iwl_error_event_table_v1 {
        u32 valid;              /* (nonzero) valid, (0) log is empty */
        u32 error_id;           /* type of error */
        u32 pc;                 /* program counter */
@@ -377,7 +377,55 @@ struct iwl_error_event_table {
        u32 u_timestamp;        /* indicate when the date and time of the
                                 * compilation */
        u32 flow_handler;       /* FH read/write pointers, RX credit */
-} __packed;
+} __packed /* LOG_ERROR_TABLE_API_S_VER_1 */;
+
+struct iwl_error_event_table {
+       u32 valid;              /* (nonzero) valid, (0) log is empty */
+       u32 error_id;           /* type of error */
+       u32 pc;                 /* program counter */
+       u32 blink1;             /* branch link */
+       u32 blink2;             /* branch link */
+       u32 ilink1;             /* interrupt link */
+       u32 ilink2;             /* interrupt link */
+       u32 data1;              /* error-specific data */
+       u32 data2;              /* error-specific data */
+       u32 data3;              /* error-specific data */
+       u32 bcon_time;          /* beacon timer */
+       u32 tsf_low;            /* network timestamp function timer */
+       u32 tsf_hi;             /* network timestamp function timer */
+       u32 gp1;                /* GP1 timer register */
+       u32 gp2;                /* GP2 timer register */
+       u32 gp3;                /* GP3 timer register */
+       u32 major;              /* uCode version major */
+       u32 minor;              /* uCode version minor */
+       u32 hw_ver;             /* HW Silicon version */
+       u32 brd_ver;            /* HW board version */
+       u32 log_pc;             /* log program counter */
+       u32 frame_ptr;          /* frame pointer */
+       u32 stack_ptr;          /* stack pointer */
+       u32 hcmd;               /* last host command header */
+       u32 isr0;               /* isr status register LMPM_NIC_ISR0:
+                                * rxtx_flag */
+       u32 isr1;               /* isr status register LMPM_NIC_ISR1:
+                                * host_flag */
+       u32 isr2;               /* isr status register LMPM_NIC_ISR2:
+                                * enc_flag */
+       u32 isr3;               /* isr status register LMPM_NIC_ISR3:
+                                * time_flag */
+       u32 isr4;               /* isr status register LMPM_NIC_ISR4:
+                                * wico interrupt */
+       u32 isr_pref;           /* isr status register LMPM_NIC_PREF_STAT */
+       u32 wait_event;         /* wait event() caller address */
+       u32 l2p_control;        /* L2pControlField */
+       u32 l2p_duration;       /* L2pDurationField */
+       u32 l2p_mhvalid;        /* L2pMhValidBits */
+       u32 l2p_addr_match;     /* L2pAddrMatchStat */
+       u32 lmpm_pmg_sel;       /* indicate which clocks are turned on
+                                * (LMPM_PMG_SEL) */
+       u32 u_timestamp;        /* indicate when the date and time of the
+                                * compilation */
+       u32 flow_handler;       /* FH read/write pointers, RX credit */
+} __packed /* LOG_ERROR_TABLE_API_S_VER_2 */;
 
 /*
  * UMAC error struct - relevant starting from family 8000 chip.
@@ -396,11 +444,11 @@ struct iwl_umac_error_event_table {
        u32 data1;              /* error-specific data */
        u32 data2;              /* error-specific data */
        u32 data3;              /* error-specific data */
-       u32 umac_fw_ver;        /* UMAC version */
-       u32 umac_fw_api_ver;    /* UMAC FW API ver */
+       u32 umac_major;
+       u32 umac_minor;
        u32 frame_pointer;      /* core register 27*/
        u32 stack_pointer;      /* core register 28 */
-       u32 cmd_header; /* latest host cmd sent to UMAC */
+       u32 cmd_header;         /* latest host cmd sent to UMAC */
        u32 nic_isr_pref;       /* ISR status register */
 } __packed;
 
@@ -441,18 +489,18 @@ static void iwl_mvm_dump_umac_error_log(struct iwl_mvm *mvm)
        IWL_ERR(mvm, "0x%08X | umac data1\n", table.data1);
        IWL_ERR(mvm, "0x%08X | umac data2\n", table.data2);
        IWL_ERR(mvm, "0x%08X | umac data3\n", table.data3);
-       IWL_ERR(mvm, "0x%08X | umac version\n", table.umac_fw_ver);
-       IWL_ERR(mvm, "0x%08X | umac api version\n", table.umac_fw_api_ver);
+       IWL_ERR(mvm, "0x%08X | umac major\n", table.umac_major);
+       IWL_ERR(mvm, "0x%08X | umac minor\n", table.umac_minor);
        IWL_ERR(mvm, "0x%08X | frame pointer\n", table.frame_pointer);
        IWL_ERR(mvm, "0x%08X | stack pointer\n", table.stack_pointer);
        IWL_ERR(mvm, "0x%08X | last host cmd\n", table.cmd_header);
        IWL_ERR(mvm, "0x%08X | isr status reg\n", table.nic_isr_pref);
 }
 
-void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm)
+static void iwl_mvm_dump_nic_error_log_old(struct iwl_mvm *mvm)
 {
        struct iwl_trans *trans = mvm->trans;
-       struct iwl_error_event_table table;
+       struct iwl_error_event_table_v1 table;
        u32 base;
 
        base = mvm->error_event_table;
@@ -489,7 +537,7 @@ void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm)
                                      table.data1, table.data2, table.data3,
                                      table.blink1, table.blink2, table.ilink1,
                                      table.ilink2, table.bcon_time, table.gp1,
-                                     table.gp2, table.gp3, table.ucode_ver,
+                                     table.gp2, table.gp3, table.ucode_ver, 0,
                                      table.hw_ver, table.brd_ver);
        IWL_ERR(mvm, "0x%08X | %-28s\n", table.error_id,
                desc_lookup(table.error_id));
@@ -530,6 +578,92 @@ void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm)
                iwl_mvm_dump_umac_error_log(mvm);
 }
 
+void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm)
+{
+       struct iwl_trans *trans = mvm->trans;
+       struct iwl_error_event_table table;
+       u32 base;
+
+       if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_NEW_VERSION)) {
+               iwl_mvm_dump_nic_error_log_old(mvm);
+               return;
+       }
+
+       base = mvm->error_event_table;
+       if (mvm->cur_ucode == IWL_UCODE_INIT) {
+               if (!base)
+                       base = mvm->fw->init_errlog_ptr;
+       } else {
+               if (!base)
+                       base = mvm->fw->inst_errlog_ptr;
+       }
+
+       if (base < 0x800000) {
+               IWL_ERR(mvm,
+                       "Not valid error log pointer 0x%08X for %s uCode\n",
+                       base,
+                       (mvm->cur_ucode == IWL_UCODE_INIT)
+                                       ? "Init" : "RT");
+               return;
+       }
+
+       iwl_trans_read_mem_bytes(trans, base, &table, sizeof(table));
+
+       if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
+               IWL_ERR(trans, "Start IWL Error Log Dump:\n");
+               IWL_ERR(trans, "Status: 0x%08lX, count: %d\n",
+                       mvm->status, table.valid);
+       }
+
+       /* Do not change this output - scripts rely on it */
+
+       IWL_ERR(mvm, "Loaded firmware version: %s\n", mvm->fw->fw_version);
+
+       trace_iwlwifi_dev_ucode_error(trans->dev, table.error_id, table.tsf_low,
+                                     table.data1, table.data2, table.data3,
+                                     table.blink1, table.blink2, table.ilink1,
+                                     table.ilink2, table.bcon_time, table.gp1,
+                                     table.gp2, table.gp3, table.major,
+                                     table.minor, table.hw_ver, table.brd_ver);
+       IWL_ERR(mvm, "0x%08X | %-28s\n", table.error_id,
+               desc_lookup(table.error_id));
+       IWL_ERR(mvm, "0x%08X | uPc\n", table.pc);
+       IWL_ERR(mvm, "0x%08X | branchlink1\n", table.blink1);
+       IWL_ERR(mvm, "0x%08X | branchlink2\n", table.blink2);
+       IWL_ERR(mvm, "0x%08X | interruptlink1\n", table.ilink1);
+       IWL_ERR(mvm, "0x%08X | interruptlink2\n", table.ilink2);
+       IWL_ERR(mvm, "0x%08X | data1\n", table.data1);
+       IWL_ERR(mvm, "0x%08X | data2\n", table.data2);
+       IWL_ERR(mvm, "0x%08X | data3\n", table.data3);
+       IWL_ERR(mvm, "0x%08X | beacon time\n", table.bcon_time);
+       IWL_ERR(mvm, "0x%08X | tsf low\n", table.tsf_low);
+       IWL_ERR(mvm, "0x%08X | tsf hi\n", table.tsf_hi);
+       IWL_ERR(mvm, "0x%08X | time gp1\n", table.gp1);
+       IWL_ERR(mvm, "0x%08X | time gp2\n", table.gp2);
+       IWL_ERR(mvm, "0x%08X | time gp3\n", table.gp3);
+       IWL_ERR(mvm, "0x%08X | uCode version major\n", table.major);
+       IWL_ERR(mvm, "0x%08X | uCode version minor\n", table.minor);
+       IWL_ERR(mvm, "0x%08X | hw version\n", table.hw_ver);
+       IWL_ERR(mvm, "0x%08X | board version\n", table.brd_ver);
+       IWL_ERR(mvm, "0x%08X | hcmd\n", table.hcmd);
+       IWL_ERR(mvm, "0x%08X | isr0\n", table.isr0);
+       IWL_ERR(mvm, "0x%08X | isr1\n", table.isr1);
+       IWL_ERR(mvm, "0x%08X | isr2\n", table.isr2);
+       IWL_ERR(mvm, "0x%08X | isr3\n", table.isr3);
+       IWL_ERR(mvm, "0x%08X | isr4\n", table.isr4);
+       IWL_ERR(mvm, "0x%08X | isr_pref\n", table.isr_pref);
+       IWL_ERR(mvm, "0x%08X | wait_event\n", table.wait_event);
+       IWL_ERR(mvm, "0x%08X | l2p_control\n", table.l2p_control);
+       IWL_ERR(mvm, "0x%08X | l2p_duration\n", table.l2p_duration);
+       IWL_ERR(mvm, "0x%08X | l2p_mhvalid\n", table.l2p_mhvalid);
+       IWL_ERR(mvm, "0x%08X | l2p_addr_match\n", table.l2p_addr_match);
+       IWL_ERR(mvm, "0x%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel);
+       IWL_ERR(mvm, "0x%08X | timestamp\n", table.u_timestamp);
+       IWL_ERR(mvm, "0x%08X | flow_handler\n", table.flow_handler);
+
+       if (mvm->support_umac_log)
+               iwl_mvm_dump_umac_error_log(mvm);
+}
 void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, u16 ssn,
                        const struct iwl_trans_txq_scd_cfg *cfg,
                        unsigned int wdg_timeout)
@@ -643,6 +777,40 @@ void iwl_mvm_update_smps(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
        ieee80211_request_smps(vif, smps_mode);
 }
 
+int iwl_mvm_request_statistics(struct iwl_mvm *mvm, bool clear)
+{
+       struct iwl_statistics_cmd scmd = {
+               .flags = clear ? cpu_to_le32(IWL_STATISTICS_FLG_CLEAR) : 0,
+       };
+       struct iwl_host_cmd cmd = {
+               .id = STATISTICS_CMD,
+               .len[0] = sizeof(scmd),
+               .data[0] = &scmd,
+               .flags = CMD_WANT_SKB,
+       };
+       int ret;
+
+       ret = iwl_mvm_send_cmd(mvm, &cmd);
+       if (ret)
+               return ret;
+
+       iwl_mvm_handle_rx_statistics(mvm, cmd.resp_pkt);
+       iwl_free_resp(&cmd);
+
+       if (clear)
+               iwl_mvm_accu_radio_stats(mvm);
+
+       return 0;
+}
+
+void iwl_mvm_accu_radio_stats(struct iwl_mvm *mvm)
+{
+       mvm->accu_radio_stats.rx_time += mvm->radio_stats.rx_time;
+       mvm->accu_radio_stats.tx_time += mvm->radio_stats.tx_time;
+       mvm->accu_radio_stats.on_time_rf += mvm->radio_stats.on_time_rf;
+       mvm->accu_radio_stats.on_time_scan += mvm->radio_stats.on_time_scan;
+}
+
 static void iwl_mvm_diversity_iter(void *_data, u8 *mac,
                                   struct ieee80211_vif *vif)
 {
@@ -689,7 +857,7 @@ int iwl_mvm_update_low_latency(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
 
        mvmvif->low_latency = value;
 
-       res = iwl_mvm_update_quotas(mvm, NULL);
+       res = iwl_mvm_update_quotas(mvm, false, NULL);
        if (res)
                return res;
 
@@ -717,25 +885,6 @@ bool iwl_mvm_low_latency(struct iwl_mvm *mvm)
        return result;
 }
 
-static void iwl_mvm_idle_iter(void *_data, u8 *mac, struct ieee80211_vif *vif)
-{
-       bool *idle = _data;
-
-       if (!vif->bss_conf.idle)
-               *idle = false;
-}
-
-bool iwl_mvm_is_idle(struct iwl_mvm *mvm)
-{
-       bool idle = true;
-
-       ieee80211_iterate_active_interfaces_atomic(
-                       mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
-                       iwl_mvm_idle_iter, &idle);
-
-       return idle;
-}
-
 struct iwl_bss_iter_data {
        struct ieee80211_vif *vif;
        bool error;
index 686dd301cd536b68d616ea0507a3e1742e12240c..b1856973492237dcbee2ebf640ebeb0bf593fecb 100644 (file)
@@ -415,10 +415,35 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
 
 /* 8000 Series */
        {IWL_PCI_DEVICE(0x24F3, 0x0010, iwl8260_2ac_cfg)},
-       {IWL_PCI_DEVICE(0x24F3, 0x0004, iwl8260_2n_cfg)},
+       {IWL_PCI_DEVICE(0x24F3, 0x1010, iwl8260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x24F3, 0x0110, iwl8260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x24F3, 0x1110, iwl8260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x24F3, 0x0050, iwl8260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x24F3, 0x0250, iwl8260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x24F3, 0x1050, iwl8260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x24F3, 0x0150, iwl8260_2ac_cfg)},
        {IWL_PCI_DEVICE(0x24F4, 0x0030, iwl8260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x24F4, 0x1130, iwl8260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x24F4, 0x1030, iwl8260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x24F3, 0xC010, iwl8260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x24F3, 0xD010, iwl8260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x24F4, 0xC030, iwl8260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x24F4, 0xD030, iwl8260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x24F3, 0xC050, iwl8260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x24F3, 0xD050, iwl8260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x24F3, 0x8010, iwl8260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x24F3, 0x9010, iwl8260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x24F4, 0x8030, iwl8260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x24F4, 0x9030, iwl8260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x24F3, 0x8050, iwl8260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x24F3, 0x9050, iwl8260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x24F3, 0x0004, iwl8260_2n_cfg)},
        {IWL_PCI_DEVICE(0x24F5, 0x0010, iwl4165_2ac_cfg)},
        {IWL_PCI_DEVICE(0x24F6, 0x0030, iwl4165_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x24F3, 0x0810, iwl8260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x24F3, 0x0910, iwl8260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x24F3, 0x0850, iwl8260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x24F3, 0x0950, iwl8260_2ac_cfg)},
 #endif /* CONFIG_IWLMVM */
 
        {0}
index cae0eb8835ceae9d2c11190041417482a5b6f436..01996c9d98a79b1d62e3a665cd0c720df79ad04e 100644 (file)
@@ -217,6 +217,8 @@ struct iwl_pcie_txq_scratch_buf {
  * @active: stores if queue is active
  * @ampdu: true if this queue is an ampdu queue for an specific RA/TID
  * @wd_timeout: queue watchdog timeout (jiffies) - per queue
+ * @frozen: tx stuck queue timer is frozen
+ * @frozen_expiry_remainder: remember how long until the timer fires
  *
  * A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame
  * descriptors) and required locking structures.
@@ -228,9 +230,11 @@ struct iwl_txq {
        dma_addr_t scratchbufs_dma;
        struct iwl_pcie_txq_entry *entries;
        spinlock_t lock;
+       unsigned long frozen_expiry_remainder;
        struct timer_list stuck_timer;
        struct iwl_trans_pcie *trans_pcie;
        bool need_update;
+       bool frozen;
        u8 active;
        bool ampdu;
        unsigned long wd_timeout;
index 69935aa5a1b3c702ff147bc521b3621ae70a63b8..dc247325d8d7f9ebd11472bcf6c64e9a48d7bb14 100644 (file)
@@ -682,6 +682,43 @@ static int iwl_pcie_load_section(struct iwl_trans *trans, u8 section_num,
        return ret;
 }
 
+/*
+ * Driver Takes the ownership on secure machine before FW load
+ * and prevent race with the BT load.
+ * W/A for ROM bug. (should be remove in the next Si step)
+ */
+static int iwl_pcie_rsa_race_bug_wa(struct iwl_trans *trans)
+{
+       u32 val, loop = 1000;
+
+       /* Check the RSA semaphore is accessible - if not, we are in trouble */
+       val = iwl_read_prph(trans, PREG_AUX_BUS_WPROT_0);
+       if (val & (BIT(1) | BIT(17))) {
+               IWL_ERR(trans,
+                       "can't access the RSA semaphore it is write protected\n");
+               return 0;
+       }
+
+       /* take ownership on the AUX IF */
+       iwl_write_prph(trans, WFPM_CTRL_REG, WFPM_AUX_CTL_AUX_IF_MAC_OWNER_MSK);
+       iwl_write_prph(trans, AUX_MISC_MASTER1_EN, AUX_MISC_MASTER1_EN_SBE_MSK);
+
+       do {
+               iwl_write_prph(trans, AUX_MISC_MASTER1_SMPHR_STATUS, 0x1);
+               val = iwl_read_prph(trans, AUX_MISC_MASTER1_SMPHR_STATUS);
+               if (val == 0x1) {
+                       iwl_write_prph(trans, RSA_ENABLE, 0);
+                       return 0;
+               }
+
+               udelay(10);
+               loop--;
+       } while (loop > 0);
+
+       IWL_ERR(trans, "Failed to take ownership on secure machine\n");
+       return -EIO;
+}
+
 static int iwl_pcie_load_cpu_sections_8000b(struct iwl_trans *trans,
                                            const struct fw_img *image,
                                            int cpu,
@@ -898,6 +935,14 @@ static int iwl_pcie_load_given_ucode_8000b(struct iwl_trans *trans,
        IWL_DEBUG_FW(trans, "working with %s CPU\n",
                     image->is_dual_cpus ? "Dual" : "Single");
 
+       if (trans->dbg_dest_tlv)
+               iwl_pcie_apply_destination(trans);
+
+       /* TODO: remove in the next Si step */
+       ret = iwl_pcie_rsa_race_bug_wa(trans);
+       if (ret)
+               return ret;
+
        /* configure the ucode to be ready to get the secured image */
        /* release CPU reset */
        iwl_write_prph(trans, RELEASE_CPU_RESET, RELEASE_CPU_RESET_BIT);
@@ -914,9 +959,6 @@ static int iwl_pcie_load_given_ucode_8000b(struct iwl_trans *trans,
        if (ret)
                return ret;
 
-       if (trans->dbg_dest_tlv)
-               iwl_pcie_apply_destination(trans);
-
        /* wait for image verification to complete  */
        ret = iwl_poll_prph_bit(trans, LMPM_SECURE_BOOT_CPU1_STATUS_ADDR_B0,
                                LMPM_SECURE_BOOT_STATUS_SUCCESS,
@@ -1462,6 +1504,60 @@ static int iwl_trans_pcie_write_mem(struct iwl_trans *trans, u32 addr,
        return ret;
 }
 
+static void iwl_trans_pcie_freeze_txq_timer(struct iwl_trans *trans,
+                                           unsigned long txqs,
+                                           bool freeze)
+{
+       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+       int queue;
+
+       for_each_set_bit(queue, &txqs, BITS_PER_LONG) {
+               struct iwl_txq *txq = &trans_pcie->txq[queue];
+               unsigned long now;
+
+               spin_lock_bh(&txq->lock);
+
+               now = jiffies;
+
+               if (txq->frozen == freeze)
+                       goto next_queue;
+
+               IWL_DEBUG_TX_QUEUES(trans, "%s TXQ %d\n",
+                                   freeze ? "Freezing" : "Waking", queue);
+
+               txq->frozen = freeze;
+
+               if (txq->q.read_ptr == txq->q.write_ptr)
+                       goto next_queue;
+
+               if (freeze) {
+                       if (unlikely(time_after(now,
+                                               txq->stuck_timer.expires))) {
+                               /*
+                                * The timer should have fired, maybe it is
+                                * spinning right now on the lock.
+                                */
+                               goto next_queue;
+                       }
+                       /* remember how long until the timer fires */
+                       txq->frozen_expiry_remainder =
+                               txq->stuck_timer.expires - now;
+                       del_timer(&txq->stuck_timer);
+                       goto next_queue;
+               }
+
+               /*
+                * Wake a non-empty queue -> arm timer with the
+                * remainder before it froze
+                */
+               mod_timer(&txq->stuck_timer,
+                         now + txq->frozen_expiry_remainder);
+
+next_queue:
+               spin_unlock_bh(&txq->lock);
+       }
+}
+
 #define IWL_FLUSH_WAIT_MS      2000
 
 static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, u32 txq_bm)
@@ -1713,7 +1809,7 @@ static ssize_t iwl_dbgfs_tx_queue_read(struct file *file,
        int ret;
        size_t bufsz;
 
-       bufsz = sizeof(char) * 64 * trans->cfg->base_params->num_of_queues;
+       bufsz = sizeof(char) * 75 * trans->cfg->base_params->num_of_queues;
 
        if (!trans_pcie->txq)
                return -EAGAIN;
@@ -1726,11 +1822,11 @@ static ssize_t iwl_dbgfs_tx_queue_read(struct file *file,
                txq = &trans_pcie->txq[cnt];
                q = &txq->q;
                pos += scnprintf(buf + pos, bufsz - pos,
-                               "hwq %.2d: read=%u write=%u use=%d stop=%d need_update=%d%s\n",
+                               "hwq %.2d: read=%u write=%u use=%d stop=%d need_update=%d frozen=%d%s\n",
                                cnt, q->read_ptr, q->write_ptr,
                                !!test_bit(cnt, trans_pcie->queue_used),
                                 !!test_bit(cnt, trans_pcie->queue_stopped),
-                                txq->need_update,
+                                txq->need_update, txq->frozen,
                                 (cnt == trans_pcie->cmd_queue ? " HCMD" : ""));
        }
        ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
@@ -1961,24 +2057,25 @@ static const struct {
        { .start = 0x00a01c7c, .end = 0x00a01c7c },
        { .start = 0x00a01c28, .end = 0x00a01c54 },
        { .start = 0x00a01c5c, .end = 0x00a01c5c },
-       { .start = 0x00a01c84, .end = 0x00a01c84 },
+       { .start = 0x00a01c60, .end = 0x00a01cdc },
        { .start = 0x00a01ce0, .end = 0x00a01d0c },
        { .start = 0x00a01d18, .end = 0x00a01d20 },
        { .start = 0x00a01d2c, .end = 0x00a01d30 },
        { .start = 0x00a01d40, .end = 0x00a01d5c },
        { .start = 0x00a01d80, .end = 0x00a01d80 },
-       { .start = 0x00a01d98, .end = 0x00a01d98 },
+       { .start = 0x00a01d98, .end = 0x00a01d9c },
+       { .start = 0x00a01da8, .end = 0x00a01da8 },
+       { .start = 0x00a01db8, .end = 0x00a01df4 },
        { .start = 0x00a01dc0, .end = 0x00a01dfc },
        { .start = 0x00a01e00, .end = 0x00a01e2c },
        { .start = 0x00a01e40, .end = 0x00a01e60 },
+       { .start = 0x00a01e68, .end = 0x00a01e6c },
+       { .start = 0x00a01e74, .end = 0x00a01e74 },
        { .start = 0x00a01e84, .end = 0x00a01e90 },
        { .start = 0x00a01e9c, .end = 0x00a01ec4 },
-       { .start = 0x00a01ed0, .end = 0x00a01ed0 },
-       { .start = 0x00a01f00, .end = 0x00a01f14 },
-       { .start = 0x00a01f44, .end = 0x00a01f58 },
-       { .start = 0x00a01f80, .end = 0x00a01fa8 },
-       { .start = 0x00a01fb0, .end = 0x00a01fbc },
-       { .start = 0x00a01ff8, .end = 0x00a01ffc },
+       { .start = 0x00a01ed0, .end = 0x00a01ee0 },
+       { .start = 0x00a01f00, .end = 0x00a01f1c },
+       { .start = 0x00a01f44, .end = 0x00a01ffc },
        { .start = 0x00a02000, .end = 0x00a02048 },
        { .start = 0x00a02068, .end = 0x00a020f0 },
        { .start = 0x00a02100, .end = 0x00a02118 },
@@ -2305,6 +2402,7 @@ static const struct iwl_trans_ops trans_ops_pcie = {
        .dbgfs_register = iwl_trans_pcie_dbgfs_register,
 
        .wait_tx_queue_empty = iwl_trans_pcie_wait_txq_empty,
+       .freeze_txq_timer = iwl_trans_pcie_freeze_txq_timer,
 
        .write8 = iwl_trans_pcie_write8,
        .write32 = iwl_trans_pcie_write32,
@@ -2423,10 +2521,45 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
         * "dash" value). To keep hw_rev backwards compatible - we'll store it
         * in the old format.
         */
-       if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
+       if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) {
+               unsigned long flags;
+               int ret;
+
                trans->hw_rev = (trans->hw_rev & 0xfff0) |
                                (CSR_HW_REV_STEP(trans->hw_rev << 2) << 2);
 
+               /*
+                * in-order to recognize C step driver should read chip version
+                * id located at the AUX bus MISC address space.
+                */
+               iwl_set_bit(trans, CSR_GP_CNTRL,
+                           CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
+               udelay(2);
+
+               ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
+                                  CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
+                                  CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
+                                  25000);
+               if (ret < 0) {
+                       IWL_DEBUG_INFO(trans, "Failed to wake up the nic\n");
+                       goto out_pci_disable_msi;
+               }
+
+               if (iwl_trans_grab_nic_access(trans, false, &flags)) {
+                       u32 hw_step;
+
+                       hw_step = __iwl_read_prph(trans, WFPM_CTRL_REG);
+                       hw_step |= ENABLE_WFPM;
+                       __iwl_write_prph(trans, WFPM_CTRL_REG, hw_step);
+                       hw_step = __iwl_read_prph(trans, AUX_MISC_REG);
+                       hw_step = (hw_step >> HW_STEP_LOCATION_BITS) & 0xF;
+                       if (hw_step == 0x3)
+                               trans->hw_rev = (trans->hw_rev & 0xFFFFFFF3) |
+                                               (SILICON_C_STEP << 2);
+                       iwl_trans_release_nic_access(trans, &flags);
+               }
+       }
+
        trans->hw_id = (pdev->device << 16) + pdev->subsystem_device;
        snprintf(trans->hw_id_str, sizeof(trans->hw_id_str),
                 "PCI ID: 0x%04X:0x%04X", pdev->device, pdev->subsystem_device);
index af0bce736358dc2a6d12ed5c922f6349ce95f7b2..06952aadfd7b5d4dccfff9f9689cd804031ab0eb 100644 (file)
@@ -725,33 +725,50 @@ void iwl_trans_pcie_tx_reset(struct iwl_trans *trans)
        iwl_pcie_tx_start(trans, 0);
 }
 
+static void iwl_pcie_tx_stop_fh(struct iwl_trans *trans)
+{
+       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+       unsigned long flags;
+       int ch, ret;
+       u32 mask = 0;
+
+       spin_lock(&trans_pcie->irq_lock);
+
+       if (!iwl_trans_grab_nic_access(trans, false, &flags))
+               goto out;
+
+       /* Stop each Tx DMA channel */
+       for (ch = 0; ch < FH_TCSR_CHNL_NUM; ch++) {
+               iwl_write32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
+               mask |= FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch);
+       }
+
+       /* Wait for DMA channels to be idle */
+       ret = iwl_poll_bit(trans, FH_TSSR_TX_STATUS_REG, mask, mask, 5000);
+       if (ret < 0)
+               IWL_ERR(trans,
+                       "Failing on timeout while stopping DMA channel %d [0x%08x]\n",
+                       ch, iwl_read32(trans, FH_TSSR_TX_STATUS_REG));
+
+       iwl_trans_release_nic_access(trans, &flags);
+
+out:
+       spin_unlock(&trans_pcie->irq_lock);
+}
+
 /*
  * iwl_pcie_tx_stop - Stop all Tx DMA channels
  */
 int iwl_pcie_tx_stop(struct iwl_trans *trans)
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-       int ch, txq_id, ret;
+       int txq_id;
 
        /* Turn off all Tx DMA fifos */
-       spin_lock(&trans_pcie->irq_lock);
-
        iwl_scd_deactivate_fifos(trans);
 
-       /* Stop each Tx DMA channel, and wait for it to be idle */
-       for (ch = 0; ch < FH_TCSR_CHNL_NUM; ch++) {
-               iwl_write_direct32(trans,
-                                  FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
-               ret = iwl_poll_direct_bit(trans, FH_TSSR_TX_STATUS_REG,
-                       FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch), 1000);
-               if (ret < 0)
-                       IWL_ERR(trans,
-                               "Failing on timeout while stopping DMA channel %d [0x%08x]\n",
-                               ch,
-                               iwl_read_direct32(trans,
-                                                 FH_TSSR_TX_STATUS_REG));
-       }
-       spin_unlock(&trans_pcie->irq_lock);
+       /* Turn off all Tx DMA channels */
+       iwl_pcie_tx_stop_fh(trans);
 
        /*
         * This function can be called before the op_mode disabled the
@@ -912,9 +929,18 @@ error:
 
 static inline void iwl_pcie_txq_progress(struct iwl_txq *txq)
 {
+       lockdep_assert_held(&txq->lock);
+
        if (!txq->wd_timeout)
                return;
 
+       /*
+        * station is asleep and we send data - that must
+        * be uAPSD or PS-Poll. Don't rearm the timer.
+        */
+       if (txq->frozen)
+               return;
+
        /*
         * if empty delete timer, otherwise move timer forward
         * since we're making progress on this queue
@@ -1248,6 +1274,9 @@ void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id,
                        SCD_TX_STTS_QUEUE_OFFSET(txq_id);
        static const u32 zero_val[4] = {};
 
+       trans_pcie->txq[txq_id].frozen_expiry_remainder = 0;
+       trans_pcie->txq[txq_id].frozen = false;
+
        /*
         * Upon HW Rfkill - we stop the device, and then stop the queues
         * in the op_mode. Just for the sake of the simplicity of the op_mode,
index a92985a6ea21f0628166fb4b66ce6b0e5915ca5c..1a4d558022d8c1dff78b1181c6f9bc79e7573a12 100644 (file)
@@ -1356,8 +1356,8 @@ static int lbs_cfg_connect(struct wiphy *wiphy, struct net_device *dev,
 
        /* Find the BSS we want using available scan results */
        bss = cfg80211_get_bss(wiphy, sme->channel, sme->bssid,
-               sme->ssid, sme->ssid_len,
-               WLAN_CAPABILITY_ESS, WLAN_CAPABILITY_ESS);
+               sme->ssid, sme->ssid_len, IEEE80211_BSS_TYPE_ESS,
+               IEEE80211_PRIVACY_ANY);
        if (!bss) {
                wiphy_err(wiphy, "assoc: bss %pM not in scan results\n",
                          sme->bssid);
@@ -2000,7 +2000,7 @@ static int lbs_join_ibss(struct wiphy *wiphy, struct net_device *dev,
         * bss list is populated already */
        bss = cfg80211_get_bss(wiphy, params->chandef.chan, params->bssid,
                params->ssid, params->ssid_len,
-               WLAN_CAPABILITY_IBSS, WLAN_CAPABILITY_IBSS);
+               IEEE80211_BSS_TYPE_IBSS, IEEE80211_PRIVACY_ANY);
 
        if (bss) {
                ret = lbs_ibss_join_existing(priv, params, bss);
index cc6a0a586f0b748c054c4c0e8631ea0d706501cb..26cbf1dcc6620f0502314daf8cac579794c8f6e4 100644 (file)
@@ -742,8 +742,7 @@ void lbs_debugfs_init(void)
 
 void lbs_debugfs_remove(void)
 {
-       if (lbs_dir)
-                debugfs_remove(lbs_dir);
+       debugfs_remove(lbs_dir);
 }
 
 void lbs_debugfs_init_one(struct lbs_private *priv, struct net_device *dev)
index 569b64ecc6075f1fa028091b7b319c20fbda01c9..8079560f496581600cb658c4fa09d8e5d1faed81 100644 (file)
@@ -667,7 +667,7 @@ static int lbs_setup_firmware(struct lbs_private *priv)
        lbs_deb_enter(LBS_DEB_FW);
 
        /* Read MAC address from firmware */
-       memset(priv->current_addr, 0xff, ETH_ALEN);
+       eth_broadcast_addr(priv->current_addr);
        ret = lbs_update_hw_spec(priv);
        if (ret)
                goto done;
@@ -871,7 +871,7 @@ static int lbs_init_adapter(struct lbs_private *priv)
 
        lbs_deb_enter(LBS_DEB_MAIN);
 
-       memset(priv->current_addr, 0xff, ETH_ALEN);
+       eth_broadcast_addr(priv->current_addr);
 
        priv->connect_status = LBS_DISCONNECTED;
        priv->channel = DEFAULT_AD_HOC_CHANNEL;
index d576dd6665d38d182053a6a7622ff1ddd0b41926..1a20cee5febea93aa350e1ff401e88e04aa2c4b7 100644 (file)
@@ -365,7 +365,6 @@ static int if_usb_reset_device(struct if_usb_card *cardp)
 
        return ret;
 }
-EXPORT_SYMBOL_GPL(if_usb_reset_device);
 
 /**
  *  usb_tx_block - transfer data to the device
@@ -907,7 +906,6 @@ restart:
        lbtf_deb_leave_args(LBTF_DEB_USB, "ret %d", ret);
        return ret;
 }
-EXPORT_SYMBOL_GPL(if_usb_prog_firmware);
 
 
 #define if_usb_suspend NULL
index 25c5acc78bd141c218499eae06d1ed1c33f70e83..ed02e4bf2c26f5cc333d88b89347b89e577779bb 100644 (file)
@@ -152,7 +152,7 @@ static int lbtf_setup_firmware(struct lbtf_private *priv)
        /*
         * Read priv address from HW
         */
-       memset(priv->current_addr, 0xff, ETH_ALEN);
+       eth_broadcast_addr(priv->current_addr);
        ret = lbtf_update_hw_spec(priv);
        if (ret) {
                ret = -1;
@@ -199,7 +199,7 @@ out:
 static int lbtf_init_adapter(struct lbtf_private *priv)
 {
        lbtf_deb_enter(LBTF_DEB_MAIN);
-       memset(priv->current_addr, 0xff, ETH_ALEN);
+       eth_broadcast_addr(priv->current_addr);
        mutex_init(&priv->lock);
 
        priv->vif = NULL;
index 8908be6dbc48233db9183247e1928bfab6eaa491..d5c0a1af08b997367289c341149fb527e3e1edd6 100644 (file)
@@ -330,6 +330,83 @@ static const struct ieee80211_rate hwsim_rates[] = {
        { .bitrate = 540 }
 };
 
+#define OUI_QCA 0x001374
+#define QCA_NL80211_SUBCMD_TEST 1
+enum qca_nl80211_vendor_subcmds {
+       QCA_WLAN_VENDOR_ATTR_TEST = 8,
+       QCA_WLAN_VENDOR_ATTR_MAX = QCA_WLAN_VENDOR_ATTR_TEST
+};
+
+static const struct nla_policy
+hwsim_vendor_test_policy[QCA_WLAN_VENDOR_ATTR_MAX + 1] = {
+       [QCA_WLAN_VENDOR_ATTR_MAX] = { .type = NLA_U32 },
+};
+
+static int mac80211_hwsim_vendor_cmd_test(struct wiphy *wiphy,
+                                         struct wireless_dev *wdev,
+                                         const void *data, int data_len)
+{
+       struct sk_buff *skb;
+       struct nlattr *tb[QCA_WLAN_VENDOR_ATTR_MAX + 1];
+       int err;
+       u32 val;
+
+       err = nla_parse(tb, QCA_WLAN_VENDOR_ATTR_MAX, data, data_len,
+                       hwsim_vendor_test_policy);
+       if (err)
+               return err;
+       if (!tb[QCA_WLAN_VENDOR_ATTR_TEST])
+               return -EINVAL;
+       val = nla_get_u32(tb[QCA_WLAN_VENDOR_ATTR_TEST]);
+       wiphy_debug(wiphy, "%s: test=%u\n", __func__, val);
+
+       /* Send a vendor event as a test. Note that this would not normally be
+        * done within a command handler, but rather, based on some other
+        * trigger. For simplicity, this command is used to trigger the event
+        * here.
+        *
+        * event_idx = 0 (index in mac80211_hwsim_vendor_commands)
+        */
+       skb = cfg80211_vendor_event_alloc(wiphy, wdev, 100, 0, GFP_KERNEL);
+       if (skb) {
+               /* skb_put() or nla_put() will fill up data within
+                * NL80211_ATTR_VENDOR_DATA.
+                */
+
+               /* Add vendor data */
+               nla_put_u32(skb, QCA_WLAN_VENDOR_ATTR_TEST, val + 1);
+
+               /* Send the event - this will call nla_nest_end() */
+               cfg80211_vendor_event(skb, GFP_KERNEL);
+       }
+
+       /* Send a response to the command */
+       skb = cfg80211_vendor_cmd_alloc_reply_skb(wiphy, 10);
+       if (!skb)
+               return -ENOMEM;
+
+       /* skb_put() or nla_put() will fill up data within
+        * NL80211_ATTR_VENDOR_DATA
+        */
+       nla_put_u32(skb, QCA_WLAN_VENDOR_ATTR_TEST, val + 2);
+
+       return cfg80211_vendor_cmd_reply(skb);
+}
+
+static struct wiphy_vendor_command mac80211_hwsim_vendor_commands[] = {
+       {
+               .info = { .vendor_id = OUI_QCA,
+                         .subcmd = QCA_NL80211_SUBCMD_TEST },
+               .flags = WIPHY_VENDOR_CMD_NEED_NETDEV,
+               .doit = mac80211_hwsim_vendor_cmd_test,
+       }
+};
+
+/* Advertise support vendor specific events */
+static const struct nl80211_vendor_cmd_info mac80211_hwsim_vendor_events[] = {
+       { .vendor_id = OUI_QCA, .subcmd = 1 },
+};
+
 static const struct ieee80211_iface_limit hwsim_if_limits[] = {
        { .max = 1, .types = BIT(NL80211_IFTYPE_ADHOC) },
        { .max = 2048,  .types = BIT(NL80211_IFTYPE_STATION) |
@@ -906,8 +983,7 @@ static void mac80211_hwsim_tx_frame_nl(struct ieee80211_hw *hw,
                goto nla_put_failure;
        }
 
-       if (nla_put(skb, HWSIM_ATTR_ADDR_TRANSMITTER,
-                   ETH_ALEN, data->addresses[1].addr))
+       if (nla_put(skb, HWSIM_ATTR_ADDR_TRANSMITTER, ETH_ALEN, hdr->addr2))
                goto nla_put_failure;
 
        /* We get the skb->data */
@@ -1522,21 +1598,16 @@ static void mac80211_hwsim_bss_info_changed(struct ieee80211_hw *hw,
                vp->aid = info->aid;
        }
 
-       if (changed & BSS_CHANGED_BEACON_INT) {
-               wiphy_debug(hw->wiphy, "  BCNINT: %d\n", info->beacon_int);
-               data->beacon_int = info->beacon_int * 1024;
-       }
-
        if (changed & BSS_CHANGED_BEACON_ENABLED) {
-               wiphy_debug(hw->wiphy, "  BCN EN: %d\n", info->enable_beacon);
+               wiphy_debug(hw->wiphy, "  BCN EN: %d (BI=%u)\n",
+                           info->enable_beacon, info->beacon_int);
                vp->bcn_en = info->enable_beacon;
                if (data->started &&
                    !hrtimer_is_queued(&data->beacon_timer.timer) &&
                    info->enable_beacon) {
                        u64 tsf, until_tbtt;
                        u32 bcn_int;
-                       if (WARN_ON(!data->beacon_int))
-                               data->beacon_int = 1000 * 1024;
+                       data->beacon_int = info->beacon_int * 1024;
                        tsf = mac80211_hwsim_get_tsf(hw, vif);
                        bcn_int = data->beacon_int;
                        until_tbtt = bcn_int - do_div(tsf, bcn_int);
@@ -1550,8 +1621,10 @@ static void mac80211_hwsim_bss_info_changed(struct ieee80211_hw *hw,
                                mac80211_hwsim_bcn_en_iter, &count);
                        wiphy_debug(hw->wiphy, "  beaconing vifs remaining: %u",
                                    count);
-                       if (count == 0)
+                       if (count == 0) {
                                tasklet_hrtimer_cancel(&data->beacon_timer);
+                               data->beacon_int = 0;
+                       }
                }
        }
 
@@ -1911,7 +1984,7 @@ static void mac80211_hwsim_sw_scan_complete(struct ieee80211_hw *hw,
 
        printk(KERN_DEBUG "hwsim sw_scan_complete\n");
        hwsim->scanning = false;
-       memset(hwsim->scan_addr, 0, ETH_ALEN);
+       eth_zero_addr(hwsim->scan_addr);
 
        mutex_unlock(&hwsim->mutex);
 }
@@ -2267,7 +2340,7 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
        skb_queue_head_init(&data->pending);
 
        SET_IEEE80211_DEV(hw, data->dev);
-       memset(addr, 0, ETH_ALEN);
+       eth_zero_addr(addr);
        addr[0] = 0x02;
        addr[3] = idx >> 8;
        addr[4] = idx;
@@ -2420,6 +2493,12 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
        hw->max_rates = 4;
        hw->max_rate_tries = 11;
 
+       hw->wiphy->vendor_commands = mac80211_hwsim_vendor_commands;
+       hw->wiphy->n_vendor_commands =
+               ARRAY_SIZE(mac80211_hwsim_vendor_commands);
+       hw->wiphy->vendor_events = mac80211_hwsim_vendor_events;
+       hw->wiphy->n_vendor_events = ARRAY_SIZE(mac80211_hwsim_vendor_events);
+
        if (param->reg_strict)
                hw->wiphy->regulatory_flags |= REGULATORY_STRICT_REG;
        if (param->regd) {
@@ -2600,7 +2679,7 @@ static void hwsim_mon_setup(struct net_device *dev)
        ether_setup(dev);
        dev->tx_queue_len = 0;
        dev->type = ARPHRD_IEEE80211_RADIOTAP;
-       memset(dev->dev_addr, 0, ETH_ALEN);
+       eth_zero_addr(dev->dev_addr);
        dev->dev_addr[0] = 0x12;
 }
 
@@ -2611,7 +2690,7 @@ static struct mac80211_hwsim_data *get_hwsim_data_ref_from_addr(const u8 *addr)
 
        spin_lock_bh(&hwsim_radio_lock);
        list_for_each_entry(data, &hwsim_radios, list) {
-               if (memcmp(data->addresses[1].addr, addr, ETH_ALEN) == 0) {
+               if (mac80211_hwsim_addr_match(data, addr)) {
                        _found = true;
                        break;
                }
index 543148d27b01cb659dfc65dfe5c3a977c9b15b9d..433bd6837c79042b3a5e7daa2bb20f594fd7dd28 100644 (file)
@@ -159,6 +159,7 @@ int mwifiex_ret_11n_addba_req(struct mwifiex_private *priv,
        int tid;
        struct host_cmd_ds_11n_addba_rsp *add_ba_rsp = &resp->params.add_ba_rsp;
        struct mwifiex_tx_ba_stream_tbl *tx_ba_tbl;
+       struct mwifiex_ra_list_tbl *ra_list;
        u16 block_ack_param_set = le16_to_cpu(add_ba_rsp->block_ack_param_set);
 
        add_ba_rsp->ssn = cpu_to_le16((le16_to_cpu(add_ba_rsp->ssn))
@@ -166,7 +167,13 @@ int mwifiex_ret_11n_addba_req(struct mwifiex_private *priv,
 
        tid = (block_ack_param_set & IEEE80211_ADDBA_PARAM_TID_MASK)
               >> BLOCKACKPARAM_TID_POS;
+       ra_list = mwifiex_wmm_get_ralist_node(priv, tid, add_ba_rsp->
+               peer_mac_addr);
        if (le16_to_cpu(add_ba_rsp->status_code) != BA_RESULT_SUCCESS) {
+               if (ra_list) {
+                       ra_list->ba_status = BA_SETUP_NONE;
+                       ra_list->amsdu_in_ampdu = false;
+               }
                mwifiex_del_ba_tbl(priv, tid, add_ba_rsp->peer_mac_addr,
                                   TYPE_DELBA_SENT, true);
                if (add_ba_rsp->add_rsp_result != BA_RESULT_TIMEOUT)
@@ -185,6 +192,10 @@ int mwifiex_ret_11n_addba_req(struct mwifiex_private *priv,
                        tx_ba_tbl->amsdu = true;
                else
                        tx_ba_tbl->amsdu = false;
+               if (ra_list) {
+                       ra_list->amsdu_in_ampdu = tx_ba_tbl->amsdu;
+                       ra_list->ba_status = BA_SETUP_COMPLETE;
+               }
        } else {
                dev_err(priv->adapter->dev, "BA stream not created\n");
        }
@@ -515,6 +526,7 @@ void mwifiex_create_ba_tbl(struct mwifiex_private *priv, u8 *ra, int tid,
                           enum mwifiex_ba_status ba_status)
 {
        struct mwifiex_tx_ba_stream_tbl *new_node;
+       struct mwifiex_ra_list_tbl *ra_list;
        unsigned long flags;
 
        if (!mwifiex_get_ba_tbl(priv, tid, ra)) {
@@ -522,7 +534,11 @@ void mwifiex_create_ba_tbl(struct mwifiex_private *priv, u8 *ra, int tid,
                                   GFP_ATOMIC);
                if (!new_node)
                        return;
-
+               ra_list = mwifiex_wmm_get_ralist_node(priv, tid, ra);
+               if (ra_list) {
+                       ra_list->ba_status = ba_status;
+                       ra_list->amsdu_in_ampdu = false;
+               }
                INIT_LIST_HEAD(&new_node->list);
 
                new_node->tid = tid;
index 8e2e39422ad80edaa303c7a5877651d3ceb92a9f..afdd58aa90deda02793601bbf534e6eb0ad9180d 100644 (file)
@@ -77,22 +77,6 @@ mwifiex_is_station_ampdu_allowed(struct mwifiex_private *priv,
        return (node->ampdu_sta[tid] != BA_STREAM_NOT_ALLOWED) ? true : false;
 }
 
-/* This function checks whether AMSDU is allowed for BA stream. */
-static inline u8
-mwifiex_is_amsdu_in_ampdu_allowed(struct mwifiex_private *priv,
-                                 struct mwifiex_ra_list_tbl *ptr, int tid)
-{
-       struct mwifiex_tx_ba_stream_tbl *tx_tbl;
-
-       if (is_broadcast_ether_addr(ptr->ra))
-               return false;
-       tx_tbl = mwifiex_get_ba_tbl(priv, tid, ptr->ra);
-       if (tx_tbl)
-               return tx_tbl->amsdu;
-
-       return false;
-}
-
 /* This function checks whether AMPDU is allowed or not for a particular TID. */
 static inline u8
 mwifiex_is_ampdu_allowed(struct mwifiex_private *priv,
@@ -181,22 +165,6 @@ mwifiex_find_stream_to_delete(struct mwifiex_private *priv, int ptr_tid,
        return ret;
 }
 
-/*
- * This function checks whether BA stream is set up or not.
- */
-static inline int
-mwifiex_is_ba_stream_setup(struct mwifiex_private *priv,
-                          struct mwifiex_ra_list_tbl *ptr, int tid)
-{
-       struct mwifiex_tx_ba_stream_tbl *tx_tbl;
-
-       tx_tbl = mwifiex_get_ba_tbl(priv, tid, ptr->ra);
-       if (tx_tbl && IS_BASTREAM_SETUP(tx_tbl))
-               return true;
-
-       return false;
-}
-
 /*
  * This function checks whether associated station is 11n enabled
  */
index 9b983b5cebbdf0dbda7ff51596df1e13de759a09..6183e255e62ac380a614cb593cb226d27da5909d 100644 (file)
@@ -170,7 +170,7 @@ mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv,
        struct mwifiex_adapter *adapter = priv->adapter;
        struct sk_buff *skb_aggr, *skb_src;
        struct mwifiex_txinfo *tx_info_aggr, *tx_info_src;
-       int pad = 0, ret;
+       int pad = 0, aggr_num = 0, ret;
        struct mwifiex_tx_param tx_param;
        struct txpd *ptx_pd = NULL;
        struct timeval tv;
@@ -184,7 +184,8 @@ mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv,
        }
 
        tx_info_src = MWIFIEX_SKB_TXCB(skb_src);
-       skb_aggr = dev_alloc_skb(adapter->tx_buf_size);
+       skb_aggr = mwifiex_alloc_dma_align_buf(adapter->tx_buf_size,
+                                              GFP_ATOMIC | GFP_DMA);
        if (!skb_aggr) {
                dev_err(adapter->dev, "%s: alloc skb_aggr\n", __func__);
                spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
@@ -200,6 +201,7 @@ mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv,
 
        if (tx_info_src->flags & MWIFIEX_BUF_FLAG_TDLS_PKT)
                tx_info_aggr->flags |= MWIFIEX_BUF_FLAG_TDLS_PKT;
+       tx_info_aggr->flags |= MWIFIEX_BUF_FLAG_AGGR_PKT;
        skb_aggr->priority = skb_src->priority;
 
        do_gettimeofday(&tv);
@@ -211,11 +213,9 @@ mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv,
                        break;
 
                skb_src = skb_dequeue(&pra_list->skb_head);
-
                pra_list->total_pkt_count--;
-
                atomic_dec(&priv->wmm.tx_pkts_queued);
-
+               aggr_num++;
                spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
                                       ra_list_flags);
                mwifiex_11n_form_amsdu_pkt(skb_aggr, skb_src, &pad);
@@ -251,6 +251,12 @@ mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv,
                ptx_pd = (struct txpd *)skb_aggr->data;
 
        skb_push(skb_aggr, headroom);
+       tx_info_aggr->aggr_num = aggr_num * 2;
+       if (adapter->data_sent || adapter->tx_lock_flag) {
+               atomic_add(aggr_num * 2, &adapter->tx_queued);
+               skb_queue_tail(&adapter->tx_data_q, skb_aggr);
+               return 0;
+       }
 
        if (adapter->iface_type == MWIFIEX_USB) {
                adapter->data_sent = true;
index a2e8817b56d8d317a59e40f7901f2052384ecef4..f75f8acfaca0332cef494e8146d4e331db683385 100644 (file)
@@ -659,6 +659,7 @@ mwifiex_del_ba_tbl(struct mwifiex_private *priv, int tid, u8 *peer_mac,
 {
        struct mwifiex_rx_reorder_tbl *tbl;
        struct mwifiex_tx_ba_stream_tbl *ptx_tbl;
+       struct mwifiex_ra_list_tbl *ra_list;
        u8 cleanup_rx_reorder_tbl;
        unsigned long flags;
 
@@ -686,7 +687,11 @@ mwifiex_del_ba_tbl(struct mwifiex_private *priv, int tid, u8 *peer_mac,
                                "event: TID, RA not found in table\n");
                        return;
                }
-
+               ra_list = mwifiex_wmm_get_ralist_node(priv, tid, peer_mac);
+               if (ra_list) {
+                       ra_list->amsdu_in_ampdu = false;
+                       ra_list->ba_status = BA_SETUP_NONE;
+               }
                spin_lock_irqsave(&priv->tx_ba_stream_tbl_lock, flags);
                mwifiex_11n_delete_tx_ba_stream_tbl_entry(priv, ptx_tbl);
                spin_unlock_irqrestore(&priv->tx_ba_stream_tbl_lock, flags);
index 41c8e25df9544021278a0998c6b89fa3469d4c98..bf9020ff2d33cf1cf9dc2c044f06a403b655e593 100644 (file)
@@ -717,6 +717,9 @@ mwifiex_cfg80211_init_p2p_go(struct mwifiex_private *priv)
 
 static int mwifiex_deinit_priv_params(struct mwifiex_private *priv)
 {
+       struct mwifiex_adapter *adapter = priv->adapter;
+       unsigned long flags;
+
        priv->mgmt_frame_mask = 0;
        if (mwifiex_send_cmd(priv, HostCmd_CMD_MGMT_FRAME_REG,
                             HostCmd_ACT_GEN_SET, 0,
@@ -727,6 +730,25 @@ static int mwifiex_deinit_priv_params(struct mwifiex_private *priv)
        }
 
        mwifiex_deauthenticate(priv, NULL);
+
+       spin_lock_irqsave(&adapter->main_proc_lock, flags);
+       adapter->main_locked = true;
+       if (adapter->mwifiex_processing) {
+               spin_unlock_irqrestore(&adapter->main_proc_lock, flags);
+               flush_workqueue(adapter->workqueue);
+       } else {
+               spin_unlock_irqrestore(&adapter->main_proc_lock, flags);
+       }
+
+       spin_lock_irqsave(&adapter->rx_proc_lock, flags);
+       adapter->rx_locked = true;
+       if (adapter->rx_processing) {
+               spin_unlock_irqrestore(&adapter->rx_proc_lock, flags);
+               flush_workqueue(adapter->rx_workqueue);
+       } else {
+       spin_unlock_irqrestore(&adapter->rx_proc_lock, flags);
+       }
+
        mwifiex_free_priv(priv);
        priv->wdev.iftype = NL80211_IFTYPE_UNSPECIFIED;
        priv->bss_mode = NL80211_IFTYPE_UNSPECIFIED;
@@ -740,6 +762,9 @@ mwifiex_init_new_priv_params(struct mwifiex_private *priv,
                             struct net_device *dev,
                             enum nl80211_iftype type)
 {
+       struct mwifiex_adapter *adapter = priv->adapter;
+       unsigned long flags;
+
        mwifiex_init_priv(priv);
 
        priv->bss_mode = type;
@@ -770,6 +795,14 @@ mwifiex_init_new_priv_params(struct mwifiex_private *priv,
                return -EOPNOTSUPP;
        }
 
+       spin_lock_irqsave(&adapter->main_proc_lock, flags);
+       adapter->main_locked = false;
+       spin_unlock_irqrestore(&adapter->main_proc_lock, flags);
+
+       spin_lock_irqsave(&adapter->rx_proc_lock, flags);
+       adapter->rx_locked = false;
+       spin_unlock_irqrestore(&adapter->rx_proc_lock, flags);
+
        return 0;
 }
 
@@ -1563,7 +1596,7 @@ mwifiex_cfg80211_del_station(struct wiphy *wiphy, struct net_device *dev,
 
        wiphy_dbg(wiphy, "%s: mac address %pM\n", __func__, params->mac);
 
-       memset(deauth_mac, 0, ETH_ALEN);
+       eth_zero_addr(deauth_mac);
 
        spin_lock_irqsave(&priv->sta_list_spinlock, flags);
        sta_node = mwifiex_get_sta_entry(priv, params->mac);
@@ -1786,7 +1819,7 @@ mwifiex_cfg80211_disconnect(struct wiphy *wiphy, struct net_device *dev,
        wiphy_dbg(wiphy, "info: successfully disconnected from %pM:"
                " reason code %d\n", priv->cfg_bssid, reason_code);
 
-       memset(priv->cfg_bssid, 0, ETH_ALEN);
+       eth_zero_addr(priv->cfg_bssid);
        priv->hs2_enabled = false;
 
        return 0;
@@ -1954,13 +1987,13 @@ done:
                if (mode == NL80211_IFTYPE_ADHOC)
                        bss = cfg80211_get_bss(priv->wdev.wiphy, channel,
                                               bssid, ssid, ssid_len,
-                                              WLAN_CAPABILITY_IBSS,
-                                              WLAN_CAPABILITY_IBSS);
+                                              IEEE80211_BSS_TYPE_IBSS,
+                                              IEEE80211_PRIVACY_ANY);
                else
                        bss = cfg80211_get_bss(priv->wdev.wiphy, channel,
                                               bssid, ssid, ssid_len,
-                                              WLAN_CAPABILITY_ESS,
-                                              WLAN_CAPABILITY_ESS);
+                                              IEEE80211_BSS_TYPE_ESS,
+                                              IEEE80211_PRIVACY_ANY);
 
                if (!bss) {
                        if (is_scanning_required) {
@@ -2046,7 +2079,7 @@ mwifiex_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
                dev_dbg(priv->adapter->dev,
                        "info: association to bssid %pM failed\n",
                        priv->cfg_bssid);
-               memset(priv->cfg_bssid, 0, ETH_ALEN);
+               eth_zero_addr(priv->cfg_bssid);
 
                if (ret > 0)
                        cfg80211_connect_result(priv->netdev, priv->cfg_bssid,
@@ -2194,7 +2227,7 @@ mwifiex_cfg80211_leave_ibss(struct wiphy *wiphy, struct net_device *dev)
        if (mwifiex_deauthenticate(priv, NULL))
                return -EFAULT;
 
-       memset(priv->cfg_bssid, 0, ETH_ALEN);
+       eth_zero_addr(priv->cfg_bssid);
 
        return 0;
 }
@@ -2397,12 +2430,12 @@ mwifiex_setup_ht_caps(struct ieee80211_sta_ht_cap *ht_info,
        ht_info->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
 }
 
-#define MWIFIEX_MAX_WQ_LEN  30
 /*
- *  create a new virtual interface with the given name
+ *  create a new virtual interface with the given name and name assign type
  */
 struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy,
                                              const char *name,
+                                             unsigned char name_assign_type,
                                              enum nl80211_iftype type,
                                              u32 *flags,
                                              struct vif_params *params)
@@ -2411,7 +2444,6 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy,
        struct mwifiex_private *priv;
        struct net_device *dev;
        void *mdev_priv;
-       char dfs_cac_str[MWIFIEX_MAX_WQ_LEN], dfs_chsw_str[MWIFIEX_MAX_WQ_LEN];
 
        if (!adapter)
                return ERR_PTR(-EFAULT);
@@ -2523,7 +2555,7 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy,
        }
 
        dev = alloc_netdev_mqs(sizeof(struct mwifiex_private *), name,
-                              NET_NAME_UNKNOWN, ether_setup,
+                              name_assign_type, ether_setup,
                               IEEE80211_NUM_ACS, 1);
        if (!dev) {
                wiphy_err(wiphy, "no memory available for netdevice\n");
@@ -2576,12 +2608,10 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy,
                return ERR_PTR(-EFAULT);
        }
 
-       strcpy(dfs_cac_str, "MWIFIEX_DFS_CAC");
-       strcat(dfs_cac_str, name);
-       priv->dfs_cac_workqueue = alloc_workqueue(dfs_cac_str,
+       priv->dfs_cac_workqueue = alloc_workqueue("MWIFIEX_DFS_CAC%s",
                                                  WQ_HIGHPRI |
                                                  WQ_MEM_RECLAIM |
-                                                 WQ_UNBOUND, 1);
+                                                 WQ_UNBOUND, 1, name);
        if (!priv->dfs_cac_workqueue) {
                wiphy_err(wiphy, "cannot register virtual network device\n");
                free_netdev(dev);
@@ -2594,11 +2624,9 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy,
 
        INIT_DELAYED_WORK(&priv->dfs_cac_work, mwifiex_dfs_cac_work_queue);
 
-       strcpy(dfs_chsw_str, "MWIFIEX_DFS_CHSW");
-       strcat(dfs_chsw_str, name);
-       priv->dfs_chan_sw_workqueue = alloc_workqueue(dfs_chsw_str,
+       priv->dfs_chan_sw_workqueue = alloc_workqueue("MWIFIEX_DFS_CHSW%s",
                                                      WQ_HIGHPRI | WQ_UNBOUND |
-                                                     WQ_MEM_RECLAIM, 1);
+                                                     WQ_MEM_RECLAIM, 1, name);
        if (!priv->dfs_chan_sw_workqueue) {
                wiphy_err(wiphy, "cannot register virtual network device\n");
                free_netdev(dev);
@@ -2738,24 +2766,71 @@ mwifiex_is_pattern_supported(struct cfg80211_pkt_pattern *pat, s8 *byte_seq,
 }
 
 #ifdef CONFIG_PM
-static int mwifiex_set_mef_filter(struct mwifiex_private *priv,
-                                 struct cfg80211_wowlan *wowlan)
+static void mwifiex_set_auto_arp_mef_entry(struct mwifiex_private *priv,
+                                          struct mwifiex_mef_entry *mef_entry)
+{
+       int i, filt_num = 0, num_ipv4 = 0;
+       struct in_device *in_dev;
+       struct in_ifaddr *ifa;
+       __be32 ips[MWIFIEX_MAX_SUPPORTED_IPADDR];
+       struct mwifiex_adapter *adapter = priv->adapter;
+
+       mef_entry->mode = MEF_MODE_HOST_SLEEP;
+       mef_entry->action = MEF_ACTION_AUTO_ARP;
+
+       /* Enable ARP offload feature */
+       memset(ips, 0, sizeof(ips));
+       for (i = 0; i < MWIFIEX_MAX_BSS_NUM; i++) {
+               if (adapter->priv[i]->netdev) {
+                       in_dev = __in_dev_get_rtnl(adapter->priv[i]->netdev);
+                       if (!in_dev)
+                               continue;
+                       ifa = in_dev->ifa_list;
+                       if (!ifa || !ifa->ifa_local)
+                               continue;
+                       ips[i] = ifa->ifa_local;
+                       num_ipv4++;
+               }
+       }
+
+       for (i = 0; i < num_ipv4; i++) {
+               if (!ips[i])
+                       continue;
+               mef_entry->filter[filt_num].repeat = 1;
+               memcpy(mef_entry->filter[filt_num].byte_seq,
+                      (u8 *)&ips[i], sizeof(ips[i]));
+               mef_entry->filter[filt_num].
+                       byte_seq[MWIFIEX_MEF_MAX_BYTESEQ] =
+                       sizeof(ips[i]);
+               mef_entry->filter[filt_num].offset = 46;
+               mef_entry->filter[filt_num].filt_type = TYPE_EQ;
+               if (filt_num) {
+                       mef_entry->filter[filt_num].filt_action =
+                               TYPE_OR;
+               }
+               filt_num++;
+       }
+
+       mef_entry->filter[filt_num].repeat = 1;
+       mef_entry->filter[filt_num].byte_seq[0] = 0x08;
+       mef_entry->filter[filt_num].byte_seq[1] = 0x06;
+       mef_entry->filter[filt_num].byte_seq[MWIFIEX_MEF_MAX_BYTESEQ] = 2;
+       mef_entry->filter[filt_num].offset = 20;
+       mef_entry->filter[filt_num].filt_type = TYPE_EQ;
+       mef_entry->filter[filt_num].filt_action = TYPE_AND;
+}
+
+static int mwifiex_set_wowlan_mef_entry(struct mwifiex_private *priv,
+                                       struct mwifiex_ds_mef_cfg *mef_cfg,
+                                       struct mwifiex_mef_entry *mef_entry,
+                                       struct cfg80211_wowlan *wowlan)
 {
        int i, filt_num = 0, ret = 0;
        bool first_pat = true;
        u8 byte_seq[MWIFIEX_MEF_MAX_BYTESEQ + 1];
        const u8 ipv4_mc_mac[] = {0x33, 0x33};
        const u8 ipv6_mc_mac[] = {0x01, 0x00, 0x5e};
-       struct mwifiex_ds_mef_cfg mef_cfg;
-       struct mwifiex_mef_entry *mef_entry;
 
-       mef_entry = kzalloc(sizeof(*mef_entry), GFP_KERNEL);
-       if (!mef_entry)
-               return -ENOMEM;
-
-       memset(&mef_cfg, 0, sizeof(mef_cfg));
-       mef_cfg.num_entries = 1;
-       mef_cfg.mef_entry = mef_entry;
        mef_entry->mode = MEF_MODE_HOST_SLEEP;
        mef_entry->action = MEF_ACTION_ALLOW_AND_WAKEUP_HOST;
 
@@ -2772,20 +2847,19 @@ static int mwifiex_set_mef_filter(struct mwifiex_private *priv,
                if (!wowlan->patterns[i].pkt_offset) {
                        if (!(byte_seq[0] & 0x01) &&
                            (byte_seq[MWIFIEX_MEF_MAX_BYTESEQ] == 1)) {
-                               mef_cfg.criteria |= MWIFIEX_CRITERIA_UNICAST;
+                               mef_cfg->criteria |= MWIFIEX_CRITERIA_UNICAST;
                                continue;
                        } else if (is_broadcast_ether_addr(byte_seq)) {
-                               mef_cfg.criteria |= MWIFIEX_CRITERIA_BROADCAST;
+                               mef_cfg->criteria |= MWIFIEX_CRITERIA_BROADCAST;
                                continue;
                        } else if ((!memcmp(byte_seq, ipv4_mc_mac, 2) &&
                                    (byte_seq[MWIFIEX_MEF_MAX_BYTESEQ] == 2)) ||
                                   (!memcmp(byte_seq, ipv6_mc_mac, 3) &&
                                    (byte_seq[MWIFIEX_MEF_MAX_BYTESEQ] == 3))) {
-                               mef_cfg.criteria |= MWIFIEX_CRITERIA_MULTICAST;
+                               mef_cfg->criteria |= MWIFIEX_CRITERIA_MULTICAST;
                                continue;
                        }
                }
-
                mef_entry->filter[filt_num].repeat = 1;
                mef_entry->filter[filt_num].offset =
                        wowlan->patterns[i].pkt_offset;
@@ -2802,7 +2876,7 @@ static int mwifiex_set_mef_filter(struct mwifiex_private *priv,
        }
 
        if (wowlan->magic_pkt) {
-               mef_cfg.criteria |= MWIFIEX_CRITERIA_UNICAST;
+               mef_cfg->criteria |= MWIFIEX_CRITERIA_UNICAST;
                mef_entry->filter[filt_num].repeat = 16;
                memcpy(mef_entry->filter[filt_num].byte_seq, priv->curr_addr,
                                ETH_ALEN);
@@ -2823,6 +2897,34 @@ static int mwifiex_set_mef_filter(struct mwifiex_private *priv,
                mef_entry->filter[filt_num].filt_type = TYPE_EQ;
                mef_entry->filter[filt_num].filt_action = TYPE_OR;
        }
+       return ret;
+}
+
+static int mwifiex_set_mef_filter(struct mwifiex_private *priv,
+                                 struct cfg80211_wowlan *wowlan)
+{
+       int ret = 0, num_entries = 1;
+       struct mwifiex_ds_mef_cfg mef_cfg;
+       struct mwifiex_mef_entry *mef_entry;
+
+       if (wowlan->n_patterns || wowlan->magic_pkt)
+               num_entries++;
+
+       mef_entry = kcalloc(num_entries, sizeof(*mef_entry), GFP_KERNEL);
+       if (!mef_entry)
+               return -ENOMEM;
+
+       memset(&mef_cfg, 0, sizeof(mef_cfg));
+       mef_cfg.criteria |= MWIFIEX_CRITERIA_BROADCAST |
+               MWIFIEX_CRITERIA_UNICAST;
+       mef_cfg.num_entries = num_entries;
+       mef_cfg.mef_entry = mef_entry;
+
+       mwifiex_set_auto_arp_mef_entry(priv, &mef_entry[0]);
+
+       if (wowlan->n_patterns || wowlan->magic_pkt)
+               ret = mwifiex_set_wowlan_mef_entry(priv, &mef_cfg,
+                                                  &mef_entry[1], wowlan);
 
        if (!mef_cfg.criteria)
                mef_cfg.criteria = MWIFIEX_CRITERIA_BROADCAST |
@@ -2830,8 +2932,8 @@ static int mwifiex_set_mef_filter(struct mwifiex_private *priv,
                        MWIFIEX_CRITERIA_MULTICAST;
 
        ret = mwifiex_send_cmd(priv, HostCmd_CMD_MEF_CFG,
-                       HostCmd_ACT_GEN_SET, 0, &mef_cfg, true);
-
+                       HostCmd_ACT_GEN_SET, 0,
+                       &mef_cfg, true);
        kfree(mef_entry);
        return ret;
 }
@@ -2841,27 +2943,33 @@ static int mwifiex_cfg80211_suspend(struct wiphy *wiphy,
 {
        struct mwifiex_adapter *adapter = mwifiex_cfg80211_get_adapter(wiphy);
        struct mwifiex_ds_hs_cfg hs_cfg;
-       int ret = 0;
-       struct mwifiex_private *priv =
-                       mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA);
+       int i, ret = 0;
+       struct mwifiex_private *priv;
+
+       for (i = 0; i < adapter->priv_num; i++) {
+               priv = adapter->priv[i];
+               mwifiex_abort_cac(priv);
+       }
+
+       mwifiex_cancel_all_pending_cmd(adapter);
 
        if (!wowlan) {
                dev_warn(adapter->dev, "None of the WOWLAN triggers enabled\n");
                return 0;
        }
 
+       priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA);
+
        if (!priv->media_connected) {
                dev_warn(adapter->dev,
                         "Can not configure WOWLAN in disconnected state\n");
                return 0;
        }
 
-       if (wowlan->n_patterns || wowlan->magic_pkt) {
-               ret = mwifiex_set_mef_filter(priv, wowlan);
-               if (ret) {
-                       dev_err(adapter->dev, "Failed to set MEF filter\n");
-                       return ret;
-               }
+       ret = mwifiex_set_mef_filter(priv, wowlan);
+       if (ret) {
+               dev_err(adapter->dev, "Failed to set MEF filter\n");
+               return ret;
        }
 
        if (wowlan->disconnect) {
index 88d0eade6bb128565def33ab001cdb01c864b040..38f24e0427d28b02a979eaec1ce066e648829048 100644 (file)
@@ -33,6 +33,7 @@
 #define MWIFIEX_MAX_BSS_NUM         (3)
 
 #define MWIFIEX_DMA_ALIGN_SZ       64
+#define MWIFIEX_RX_HEADROOM        64
 #define MAX_TXPD_SZ                32
 #define INTF_HDR_ALIGN              4
 
@@ -82,6 +83,7 @@
 #define MWIFIEX_BUF_FLAG_TDLS_PKT         BIT(2)
 #define MWIFIEX_BUF_FLAG_EAPOL_TX_STATUS   BIT(3)
 #define MWIFIEX_BUF_FLAG_ACTION_TX_STATUS  BIT(4)
+#define MWIFIEX_BUF_FLAG_AGGR_PKT          BIT(5)
 
 #define MWIFIEX_BRIDGED_PKTS_THR_HIGH      1024
 #define MWIFIEX_BRIDGED_PKTS_THR_LOW        128
 
 #define MWIFIEX_A_BAND_START_FREQ      5000
 
+/* SDIO Aggr data packet special info */
+#define SDIO_MAX_AGGR_BUF_SIZE         (256 * 255)
+#define BLOCK_NUMBER_OFFSET            15
+#define SDIO_HEADER_OFFSET             28
+
 enum mwifiex_bss_type {
        MWIFIEX_BSS_TYPE_STA = 0,
        MWIFIEX_BSS_TYPE_UAP = 1,
@@ -167,10 +174,11 @@ struct mwifiex_wait_queue {
 };
 
 struct mwifiex_rxinfo {
+       struct sk_buff *parent;
        u8 bss_num;
        u8 bss_type;
-       struct sk_buff *parent;
        u8 use_count;
+       u8 buf_type;
 };
 
 struct mwifiex_txinfo {
@@ -178,6 +186,7 @@ struct mwifiex_txinfo {
        u8 flags;
        u8 bss_num;
        u8 bss_type;
+       u8 aggr_num;
        u32 pkt_len;
        u8 ack_frame_id;
        u64 cookie;
index df553e86a0ad3bea74fc4ccee87db7c6e3d77e53..59d8964dd0dcaaadc39d0c09f872fe46c5488c4d 100644 (file)
@@ -197,6 +197,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
 
 #define ISSUPP_11NENABLED(FwCapInfo) (FwCapInfo & BIT(11))
 #define ISSUPP_TDLS_ENABLED(FwCapInfo) (FwCapInfo & BIT(14))
+#define ISSUPP_SDIO_SPA_ENABLED(FwCapInfo) (FwCapInfo & BIT(16))
 
 #define MWIFIEX_DEF_HT_CAP     (IEEE80211_HT_CAP_DSSSCCK40 | \
                                 (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT) | \
@@ -353,6 +354,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
 #define HostCmd_CMD_REMAIN_ON_CHAN                    0x010d
 #define HostCmd_CMD_11AC_CFG                         0x0112
 #define HostCmd_CMD_TDLS_OPER                         0x0122
+#define HostCmd_CMD_SDIO_SP_RX_AGGR_CFG               0x0223
 
 #define PROTOCOL_NO_SECURITY        0x01
 #define PROTOCOL_STATIC_WEP         0x02
@@ -523,9 +525,11 @@ enum P2P_MODES {
 #define TYPE_OR                                (MAX_OPERAND+5)
 #define MEF_MODE_HOST_SLEEP                    1
 #define MEF_ACTION_ALLOW_AND_WAKEUP_HOST       3
+#define MEF_ACTION_AUTO_ARP                    0x10
 #define MWIFIEX_CRITERIA_BROADCAST     BIT(0)
 #define MWIFIEX_CRITERIA_UNICAST       BIT(1)
 #define MWIFIEX_CRITERIA_MULTICAST     BIT(3)
+#define MWIFIEX_MAX_SUPPORTED_IPADDR              4
 
 #define ACT_TDLS_DELETE            0x00
 #define ACT_TDLS_CREATE            0x01
@@ -1240,6 +1244,12 @@ struct host_cmd_ds_chan_rpt_event {
        u8 tlvbuf[0];
 } __packed;
 
+struct host_cmd_sdio_sp_rx_aggr_cfg {
+       u8 action;
+       u8 enable;
+       __le16 block_size;
+} __packed;
+
 struct mwifiex_fixed_bcn_param {
        __le64 timestamp;
        __le16 beacon_period;
@@ -1962,6 +1972,7 @@ struct host_cmd_ds_command {
                struct host_cmd_ds_coalesce_cfg coalesce_cfg;
                struct host_cmd_ds_tdls_oper tdls_oper;
                struct host_cmd_ds_chan_rpt_req chan_rpt_req;
+               struct host_cmd_sdio_sp_rx_aggr_cfg sdio_rx_aggr_cfg;
        } params;
 } __packed;
 
index b77ba743e1c498c1bae30b11ae0b571937b971c8..e12192f5cfad306b8cd9d4e5fce7ec2bd2e67957 100644 (file)
@@ -76,7 +76,7 @@ int mwifiex_init_priv(struct mwifiex_private *priv)
        u32 i;
 
        priv->media_connected = false;
-       memset(priv->curr_addr, 0xff, ETH_ALEN);
+       eth_broadcast_addr(priv->curr_addr);
 
        priv->pkt_tx_ctrl = 0;
        priv->bss_mode = NL80211_IFTYPE_UNSPECIFIED;
@@ -266,18 +266,15 @@ static void mwifiex_init_adapter(struct mwifiex_adapter *adapter)
 
        mwifiex_wmm_init(adapter);
 
-       if (adapter->sleep_cfm) {
-               sleep_cfm_buf = (struct mwifiex_opt_sleep_confirm *)
-                                               adapter->sleep_cfm->data;
-               memset(sleep_cfm_buf, 0, adapter->sleep_cfm->len);
-               sleep_cfm_buf->command =
-                               cpu_to_le16(HostCmd_CMD_802_11_PS_MODE_ENH);
-               sleep_cfm_buf->size =
-                               cpu_to_le16(adapter->sleep_cfm->len);
-               sleep_cfm_buf->result = 0;
-               sleep_cfm_buf->action = cpu_to_le16(SLEEP_CONFIRM);
-               sleep_cfm_buf->resp_ctrl = cpu_to_le16(RESP_NEEDED);
-       }
+       sleep_cfm_buf = (struct mwifiex_opt_sleep_confirm *)
+                                       adapter->sleep_cfm->data;
+       memset(sleep_cfm_buf, 0, adapter->sleep_cfm->len);
+       sleep_cfm_buf->command = cpu_to_le16(HostCmd_CMD_802_11_PS_MODE_ENH);
+       sleep_cfm_buf->size = cpu_to_le16(adapter->sleep_cfm->len);
+       sleep_cfm_buf->result = 0;
+       sleep_cfm_buf->action = cpu_to_le16(SLEEP_CONFIRM);
+       sleep_cfm_buf->resp_ctrl = cpu_to_le16(RESP_NEEDED);
+
        memset(&adapter->sleep_params, 0, sizeof(adapter->sleep_params));
        memset(&adapter->sleep_period, 0, sizeof(adapter->sleep_period));
        adapter->tx_lock_flag = false;
@@ -296,10 +293,9 @@ static void mwifiex_init_adapter(struct mwifiex_adapter *adapter)
        memset(&adapter->arp_filter, 0, sizeof(adapter->arp_filter));
        adapter->arp_filter_size = 0;
        adapter->max_mgmt_ie_index = MAX_MGMT_IE_INDEX;
-       adapter->ext_scan = false;
        adapter->key_api_major_ver = 0;
        adapter->key_api_minor_ver = 0;
-       memset(adapter->perm_addr, 0xff, ETH_ALEN);
+       eth_broadcast_addr(adapter->perm_addr);
        adapter->iface_limit.sta_intf = MWIFIEX_MAX_STA_NUM;
        adapter->iface_limit.uap_intf = MWIFIEX_MAX_UAP_NUM;
        adapter->iface_limit.p2p_intf = MWIFIEX_MAX_P2P_NUM;
@@ -482,6 +478,7 @@ int mwifiex_init_lock_list(struct mwifiex_adapter *adapter)
        spin_lock_init(&adapter->rx_proc_lock);
 
        skb_queue_head_init(&adapter->rx_data_q);
+       skb_queue_head_init(&adapter->tx_data_q);
 
        for (i = 0; i < adapter->priv_num; ++i) {
                INIT_LIST_HEAD(&adapter->bss_prio_tbl[i].bss_prio_head);
@@ -689,6 +686,10 @@ mwifiex_shutdown_drv(struct mwifiex_adapter *adapter)
                }
        }
 
+       atomic_set(&adapter->tx_queued, 0);
+       while ((skb = skb_dequeue(&adapter->tx_data_q)))
+               mwifiex_write_data_complete(adapter, skb, 0, 0);
+
        spin_lock_irqsave(&adapter->rx_proc_lock, flags);
 
        while ((skb = skb_dequeue(&adapter->rx_data_q))) {
index 7e74b4fccddd56e67eb8b28453553e4083eec895..03a95c7d34bf9ef1e12524d836c3adb3eab8e8fd 100644 (file)
@@ -131,10 +131,39 @@ static int mwifiex_unregister(struct mwifiex_adapter *adapter)
        return 0;
 }
 
+void mwifiex_queue_main_work(struct mwifiex_adapter *adapter)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&adapter->main_proc_lock, flags);
+       if (adapter->mwifiex_processing) {
+               adapter->more_task_flag = true;
+               spin_unlock_irqrestore(&adapter->main_proc_lock, flags);
+       } else {
+               spin_unlock_irqrestore(&adapter->main_proc_lock, flags);
+               queue_work(adapter->workqueue, &adapter->main_work);
+       }
+}
+EXPORT_SYMBOL_GPL(mwifiex_queue_main_work);
+
+static void mwifiex_queue_rx_work(struct mwifiex_adapter *adapter)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&adapter->rx_proc_lock, flags);
+       if (adapter->rx_processing) {
+               spin_unlock_irqrestore(&adapter->rx_proc_lock, flags);
+       } else {
+               spin_unlock_irqrestore(&adapter->rx_proc_lock, flags);
+               queue_work(adapter->rx_workqueue, &adapter->rx_work);
+       }
+}
+
 static int mwifiex_process_rx(struct mwifiex_adapter *adapter)
 {
        unsigned long flags;
        struct sk_buff *skb;
+       struct mwifiex_rxinfo *rx_info;
 
        spin_lock_irqsave(&adapter->rx_proc_lock, flags);
        if (adapter->rx_processing || adapter->rx_locked) {
@@ -154,9 +183,16 @@ static int mwifiex_process_rx(struct mwifiex_adapter *adapter)
                        if (adapter->if_ops.submit_rem_rx_urbs)
                                adapter->if_ops.submit_rem_rx_urbs(adapter);
                        adapter->delay_main_work = false;
-                       queue_work(adapter->workqueue, &adapter->main_work);
+                       mwifiex_queue_main_work(adapter);
+               }
+               rx_info = MWIFIEX_SKB_RXCB(skb);
+               if (rx_info->buf_type == MWIFIEX_TYPE_AGGR_DATA) {
+                       if (adapter->if_ops.deaggr_pkt)
+                               adapter->if_ops.deaggr_pkt(adapter, skb);
+                       dev_kfree_skb_any(skb);
+               } else {
+                       mwifiex_handle_rx_packet(adapter, skb);
                }
-               mwifiex_handle_rx_packet(adapter, skb);
        }
        spin_lock_irqsave(&adapter->rx_proc_lock, flags);
        adapter->rx_processing = false;
@@ -189,15 +225,17 @@ int mwifiex_main_process(struct mwifiex_adapter *adapter)
        spin_lock_irqsave(&adapter->main_proc_lock, flags);
 
        /* Check if already processing */
-       if (adapter->mwifiex_processing) {
+       if (adapter->mwifiex_processing || adapter->main_locked) {
+               adapter->more_task_flag = true;
                spin_unlock_irqrestore(&adapter->main_proc_lock, flags);
                goto exit_main_proc;
        } else {
                adapter->mwifiex_processing = true;
-               spin_unlock_irqrestore(&adapter->main_proc_lock, flags);
        }
 process_start:
        do {
+               adapter->more_task_flag = false;
+               spin_unlock_irqrestore(&adapter->main_proc_lock, flags);
                if ((adapter->hw_status == MWIFIEX_HW_STATUS_CLOSING) ||
                    (adapter->hw_status == MWIFIEX_HW_STATUS_NOT_READY))
                        break;
@@ -212,9 +250,7 @@ process_start:
                if (atomic_read(&adapter->rx_pending) >= HIGH_RX_PENDING &&
                    adapter->iface_type != MWIFIEX_USB) {
                        adapter->delay_main_work = true;
-                       if (!adapter->rx_processing)
-                               queue_work(adapter->rx_workqueue,
-                                          &adapter->rx_work);
+                       mwifiex_queue_rx_work(adapter);
                        break;
                }
 
@@ -227,24 +263,26 @@ process_start:
                }
 
                if (adapter->rx_work_enabled && adapter->data_received)
-                       queue_work(adapter->rx_workqueue, &adapter->rx_work);
+                       mwifiex_queue_rx_work(adapter);
 
                /* Need to wake up the card ? */
                if ((adapter->ps_state == PS_STATE_SLEEP) &&
                    (adapter->pm_wakeup_card_req &&
                     !adapter->pm_wakeup_fw_try) &&
                    (is_command_pending(adapter) ||
+                    !skb_queue_empty(&adapter->tx_data_q) ||
                     !mwifiex_wmm_lists_empty(adapter))) {
                        adapter->pm_wakeup_fw_try = true;
                        mod_timer(&adapter->wakeup_timer, jiffies + (HZ*3));
                        adapter->if_ops.wakeup(adapter);
+                       spin_lock_irqsave(&adapter->main_proc_lock, flags);
                        continue;
                }
 
                if (IS_CARD_RX_RCVD(adapter)) {
                        adapter->data_received = false;
                        adapter->pm_wakeup_fw_try = false;
-                       del_timer_sync(&adapter->wakeup_timer);
+                       del_timer(&adapter->wakeup_timer);
                        if (adapter->ps_state == PS_STATE_SLEEP)
                                adapter->ps_state = PS_STATE_AWAKE;
                } else {
@@ -257,7 +295,8 @@ process_start:
 
                        if ((!adapter->scan_chan_gap_enabled &&
                             adapter->scan_processing) || adapter->data_sent ||
-                           mwifiex_wmm_lists_empty(adapter)) {
+                           (mwifiex_wmm_lists_empty(adapter) &&
+                            skb_queue_empty(&adapter->tx_data_q))) {
                                if (adapter->cmd_sent || adapter->curr_cmd ||
                                    (!is_command_pending(adapter)))
                                        break;
@@ -295,8 +334,10 @@ process_start:
                if ((adapter->ps_state == PS_STATE_SLEEP) ||
                    (adapter->ps_state == PS_STATE_PRE_SLEEP) ||
                    (adapter->ps_state == PS_STATE_SLEEP_CFM) ||
-                   adapter->tx_lock_flag)
+                   adapter->tx_lock_flag){
+                       spin_lock_irqsave(&adapter->main_proc_lock, flags);
                        continue;
+               }
 
                if (!adapter->cmd_sent && !adapter->curr_cmd) {
                        if (mwifiex_exec_next_cmd(adapter) == -1) {
@@ -305,6 +346,20 @@ process_start:
                        }
                }
 
+               if ((adapter->scan_chan_gap_enabled ||
+                    !adapter->scan_processing) &&
+                   !adapter->data_sent &&
+                   !skb_queue_empty(&adapter->tx_data_q)) {
+                       mwifiex_process_tx_queue(adapter);
+                       if (adapter->hs_activated) {
+                               adapter->is_hs_configured = false;
+                               mwifiex_hs_activated_event
+                                       (mwifiex_get_priv
+                                       (adapter, MWIFIEX_BSS_ROLE_ANY),
+                                       false);
+                       }
+               }
+
                if ((adapter->scan_chan_gap_enabled ||
                     !adapter->scan_processing) &&
                    !adapter->data_sent && !mwifiex_wmm_lists_empty(adapter)) {
@@ -320,7 +375,8 @@ process_start:
 
                if (adapter->delay_null_pkt && !adapter->cmd_sent &&
                    !adapter->curr_cmd && !is_command_pending(adapter) &&
-                   mwifiex_wmm_lists_empty(adapter)) {
+                   (mwifiex_wmm_lists_empty(adapter) &&
+                    skb_queue_empty(&adapter->tx_data_q))) {
                        if (!mwifiex_send_null_packet
                            (mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA),
                             MWIFIEX_TxPD_POWER_MGMT_NULL_PACKET |
@@ -330,15 +386,12 @@ process_start:
                        }
                        break;
                }
+               spin_lock_irqsave(&adapter->main_proc_lock, flags);
        } while (true);
 
        spin_lock_irqsave(&adapter->main_proc_lock, flags);
-       if (!adapter->delay_main_work &&
-           (adapter->int_status || IS_CARD_RX_RCVD(adapter))) {
-               spin_unlock_irqrestore(&adapter->main_proc_lock, flags);
+       if (adapter->more_task_flag)
                goto process_start;
-       }
-
        adapter->mwifiex_processing = false;
        spin_unlock_irqrestore(&adapter->main_proc_lock, flags);
 
@@ -466,7 +519,7 @@ static void mwifiex_fw_dpc(const struct firmware *firmware, void *context)
 
        rtnl_lock();
        /* Create station interface by default */
-       wdev = mwifiex_add_virtual_intf(adapter->wiphy, "mlan%d",
+       wdev = mwifiex_add_virtual_intf(adapter->wiphy, "mlan%d", NET_NAME_ENUM,
                                        NL80211_IFTYPE_STATION, NULL, NULL);
        if (IS_ERR(wdev)) {
                dev_err(adapter->dev, "cannot create default STA interface\n");
@@ -475,7 +528,7 @@ static void mwifiex_fw_dpc(const struct firmware *firmware, void *context)
        }
 
        if (driver_mode & MWIFIEX_DRIVER_MODE_UAP) {
-               wdev = mwifiex_add_virtual_intf(adapter->wiphy, "uap%d",
+               wdev = mwifiex_add_virtual_intf(adapter->wiphy, "uap%d", NET_NAME_ENUM,
                                                NL80211_IFTYPE_AP, NULL, NULL);
                if (IS_ERR(wdev)) {
                        dev_err(adapter->dev, "cannot create AP interface\n");
@@ -485,7 +538,7 @@ static void mwifiex_fw_dpc(const struct firmware *firmware, void *context)
        }
 
        if (driver_mode & MWIFIEX_DRIVER_MODE_P2P) {
-               wdev = mwifiex_add_virtual_intf(adapter->wiphy, "p2p%d",
+               wdev = mwifiex_add_virtual_intf(adapter->wiphy, "p2p%d", NET_NAME_ENUM,
                                                NL80211_IFTYPE_P2P_CLIENT, NULL,
                                                NULL);
                if (IS_ERR(wdev)) {
@@ -604,7 +657,7 @@ int mwifiex_queue_tx_pkt(struct mwifiex_private *priv, struct sk_buff *skb)
        atomic_inc(&priv->adapter->tx_pending);
        mwifiex_wmm_add_buf_txqueue(priv, skb);
 
-       queue_work(priv->adapter->workqueue, &priv->adapter->main_work);
+       mwifiex_queue_main_work(priv->adapter);
 
        return 0;
 }
@@ -1096,9 +1149,6 @@ mwifiex_add_card(void *card, struct semaphore *sem,
                INIT_WORK(&adapter->rx_work, mwifiex_rx_work_queue);
        }
 
-       if (adapter->if_ops.iface_work)
-               INIT_WORK(&adapter->iface_work, adapter->if_ops.iface_work);
-
        /* Register the device. Fill up the private data structure with relevant
           information from the card. */
        if (adapter->if_ops.register_dev(adapter)) {
index f0a6af179af03ba2ba2497c6a296f5f450b18463..fe1256044a6c9bca9b9e8475cf8a0487ca7cd0b8 100644 (file)
@@ -35,6 +35,7 @@
 #include <linux/ctype.h>
 #include <linux/of.h>
 #include <linux/idr.h>
+#include <linux/inetdevice.h>
 
 #include "decl.h"
 #include "ioctl.h"
@@ -58,6 +59,8 @@ enum {
 
 #define MWIFIEX_MAX_AP                         64
 
+#define MWIFIEX_MAX_PKTS_TXQ                   16
+
 #define MWIFIEX_DEFAULT_WATCHDOG_TIMEOUT       (5 * HZ)
 
 #define MWIFIEX_TIMER_10S                      10000
@@ -118,6 +121,7 @@ enum {
 
 #define MWIFIEX_TYPE_CMD                               1
 #define MWIFIEX_TYPE_DATA                              0
+#define MWIFIEX_TYPE_AGGR_DATA                         10
 #define MWIFIEX_TYPE_EVENT                             3
 
 #define MAX_BITMAP_RATES_SIZE                  18
@@ -140,6 +144,9 @@ enum {
 
 #define MWIFIEX_DRV_INFO_SIZE_MAX 0x40000
 
+/* Address alignment */
+#define MWIFIEX_ALIGN_ADDR(p, a) (((long)(p) + (a) - 1) & ~((a) - 1))
+
 struct mwifiex_dbg {
        u32 num_cmd_host_to_card_failure;
        u32 num_cmd_sleep_cfm_host_to_card_failure;
@@ -207,6 +214,12 @@ struct mwifiex_tx_aggr {
        u8 amsdu;
 };
 
+enum mwifiex_ba_status {
+       BA_SETUP_NONE = 0,
+       BA_SETUP_INPROGRESS,
+       BA_SETUP_COMPLETE
+};
+
 struct mwifiex_ra_list_tbl {
        struct list_head list;
        struct sk_buff_head skb_head;
@@ -215,6 +228,8 @@ struct mwifiex_ra_list_tbl {
        u16 max_amsdu;
        u16 ba_pkt_count;
        u8 ba_packet_thr;
+       enum mwifiex_ba_status ba_status;
+       u8 amsdu_in_ampdu;
        u16 total_pkt_count;
        bool tdls_link;
 };
@@ -598,11 +613,6 @@ struct mwifiex_private {
        struct mwifiex_11h_intf_state state_11h;
 };
 
-enum mwifiex_ba_status {
-       BA_SETUP_NONE = 0,
-       BA_SETUP_INPROGRESS,
-       BA_SETUP_COMPLETE
-};
 
 struct mwifiex_tx_ba_stream_tbl {
        struct list_head list;
@@ -735,6 +745,7 @@ struct mwifiex_if_ops {
        int (*clean_pcie_ring) (struct mwifiex_adapter *adapter);
        void (*iface_work)(struct work_struct *work);
        void (*submit_rem_rx_urbs)(struct mwifiex_adapter *adapter);
+       void (*deaggr_pkt)(struct mwifiex_adapter *, struct sk_buff *);
 };
 
 struct mwifiex_adapter {
@@ -768,14 +779,18 @@ struct mwifiex_adapter {
        bool rx_processing;
        bool delay_main_work;
        bool rx_locked;
+       bool main_locked;
        struct mwifiex_bss_prio_tbl bss_prio_tbl[MWIFIEX_MAX_BSS_NUM];
        /* spin lock for init/shutdown */
        spinlock_t mwifiex_lock;
        /* spin lock for main process */
        spinlock_t main_proc_lock;
        u32 mwifiex_processing;
+       u8 more_task_flag;
        u16 tx_buf_size;
        u16 curr_tx_buf_size;
+       bool sdio_rx_aggr_enable;
+       u16 sdio_rx_block_size;
        u32 ioport;
        enum MWIFIEX_HARDWARE_STATUS hw_status;
        u16 number_of_antenna;
@@ -810,6 +825,8 @@ struct mwifiex_adapter {
        spinlock_t scan_pending_q_lock;
        /* spin lock for RX processing routine */
        spinlock_t rx_proc_lock;
+       struct sk_buff_head tx_data_q;
+       atomic_t tx_queued;
        u32 scan_processing;
        u16 region_code;
        struct mwifiex_802_11d_domain_reg domain_reg;
@@ -881,8 +898,6 @@ struct mwifiex_adapter {
        bool ext_scan;
        u8 fw_api_ver;
        u8 key_api_major_ver, key_api_minor_ver;
-       struct work_struct iface_work;
-       unsigned long iface_work_flags;
        struct memory_type_mapping *mem_type_mapping_tbl;
        u8 num_mem_types;
        u8 curr_mem_idx;
@@ -896,6 +911,8 @@ struct mwifiex_adapter {
        bool auto_tdls;
 };
 
+void mwifiex_process_tx_queue(struct mwifiex_adapter *adapter);
+
 int mwifiex_init_lock_list(struct mwifiex_adapter *adapter);
 
 void mwifiex_set_trans_start(struct net_device *dev);
@@ -1318,6 +1335,7 @@ u8 mwifiex_chan_type_to_sec_chan_offset(enum nl80211_channel_type chan_type);
 
 struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy,
                                              const char *name,
+                                             unsigned char name_assign_type,
                                              enum nl80211_iftype type,
                                              u32 *flags,
                                              struct vif_params *params);
@@ -1417,6 +1435,8 @@ u8 mwifiex_adjust_data_rate(struct mwifiex_private *priv,
                            u8 rx_rate, u8 ht_info);
 
 void mwifiex_dump_drv_info(struct mwifiex_adapter *adapter);
+void *mwifiex_alloc_dma_align_buf(int rx_len, gfp_t flags);
+void mwifiex_queue_main_work(struct mwifiex_adapter *adapter);
 
 #ifdef CONFIG_DEBUG_FS
 void mwifiex_debugfs_init(void);
index a5828da5936534595825137fd733c7f66dcc0bd6..bcc7751d883c3773b558ef8c8b8bb8c3a8a3c474 100644 (file)
@@ -203,7 +203,7 @@ static int mwifiex_pcie_probe(struct pci_dev *pdev,
                card->pcie.reg = data->reg;
                card->pcie.blksz_fw_dl = data->blksz_fw_dl;
                card->pcie.tx_buf_size = data->tx_buf_size;
-               card->pcie.supports_fw_dump = data->supports_fw_dump;
+               card->pcie.can_dump_fw = data->can_dump_fw;
                card->pcie.can_ext_scan = data->can_ext_scan;
        }
 
@@ -234,8 +234,6 @@ static void mwifiex_pcie_remove(struct pci_dev *pdev)
        if (!adapter || !adapter->priv_num)
                return;
 
-       cancel_work_sync(&adapter->iface_work);
-
        if (user_rmmod) {
 #ifdef CONFIG_PM_SLEEP
                if (adapter->is_suspended)
@@ -498,7 +496,8 @@ static int mwifiex_init_rxq_ring(struct mwifiex_adapter *adapter)
 
        for (i = 0; i < MWIFIEX_MAX_TXRX_BD; i++) {
                /* Allocate skb here so that firmware can DMA data from it */
-               skb = dev_alloc_skb(MWIFIEX_RX_DATA_BUF_SIZE);
+               skb = mwifiex_alloc_dma_align_buf(MWIFIEX_RX_DATA_BUF_SIZE,
+                                                 GFP_KERNEL | GFP_DMA);
                if (!skb) {
                        dev_err(adapter->dev,
                                "Unable to allocate skb for RX ring.\n");
@@ -1297,7 +1296,8 @@ static int mwifiex_pcie_process_recv_data(struct mwifiex_adapter *adapter)
                        }
                }
 
-               skb_tmp = dev_alloc_skb(MWIFIEX_RX_DATA_BUF_SIZE);
+               skb_tmp = mwifiex_alloc_dma_align_buf(MWIFIEX_RX_DATA_BUF_SIZE,
+                                                     GFP_KERNEL | GFP_DMA);
                if (!skb_tmp) {
                        dev_err(adapter->dev,
                                "Unable to allocate skb.\n");
@@ -2099,7 +2099,7 @@ static irqreturn_t mwifiex_pcie_interrupt(int irq, void *context)
                goto exit;
 
        mwifiex_interrupt_status(adapter);
-       queue_work(adapter->workqueue, &adapter->main_work);
+       mwifiex_queue_main_work(adapter);
 
 exit:
        return IRQ_HANDLED;
@@ -2271,7 +2271,7 @@ static void mwifiex_pcie_fw_dump_work(struct mwifiex_adapter *adapter)
        int ret;
        static char *env[] = { "DRIVER=mwifiex_pcie", "EVENT=fw_dump", NULL };
 
-       if (!card->pcie.supports_fw_dump)
+       if (!card->pcie.can_dump_fw)
                return;
 
        for (idx = 0; idx < ARRAY_SIZE(mem_type_mapping_tbl); idx++) {
@@ -2371,25 +2371,26 @@ done:
        adapter->curr_mem_idx = 0;
 }
 
+static unsigned long iface_work_flags;
+static struct mwifiex_adapter *save_adapter;
 static void mwifiex_pcie_work(struct work_struct *work)
 {
-       struct mwifiex_adapter *adapter =
-                       container_of(work, struct mwifiex_adapter, iface_work);
-
        if (test_and_clear_bit(MWIFIEX_IFACE_WORK_FW_DUMP,
-                              &adapter->iface_work_flags))
-               mwifiex_pcie_fw_dump_work(adapter);
+                              &iface_work_flags))
+               mwifiex_pcie_fw_dump_work(save_adapter);
 }
 
+static DECLARE_WORK(pcie_work, mwifiex_pcie_work);
 /* This function dumps FW information */
 static void mwifiex_pcie_fw_dump(struct mwifiex_adapter *adapter)
 {
-       if (test_bit(MWIFIEX_IFACE_WORK_FW_DUMP, &adapter->iface_work_flags))
+       save_adapter = adapter;
+       if (test_bit(MWIFIEX_IFACE_WORK_FW_DUMP, &iface_work_flags))
                return;
 
-       set_bit(MWIFIEX_IFACE_WORK_FW_DUMP, &adapter->iface_work_flags);
+       set_bit(MWIFIEX_IFACE_WORK_FW_DUMP, &iface_work_flags);
 
-       schedule_work(&adapter->iface_work);
+       schedule_work(&pcie_work);
 }
 
 /*
@@ -2617,7 +2618,6 @@ static struct mwifiex_if_ops pcie_ops = {
        .init_fw_port =                 mwifiex_pcie_init_fw_port,
        .clean_pcie_ring =              mwifiex_clean_pcie_ring_buf,
        .fw_dump =                      mwifiex_pcie_fw_dump,
-       .iface_work =                   mwifiex_pcie_work,
 };
 
 /*
@@ -2663,6 +2663,7 @@ static void mwifiex_pcie_cleanup_module(void)
        /* Set the flag as user is removing this module. */
        user_rmmod = 1;
 
+       cancel_work_sync(&pcie_work);
        pci_unregister_driver(&mwifiex_pcie);
 }
 
index 666d40e9dbc36495238537af92345d37e537c173..0e7ee8b72358f7feba632f43349113a6e662b210 100644 (file)
@@ -205,7 +205,7 @@ struct mwifiex_pcie_device {
        const struct mwifiex_pcie_card_reg *reg;
        u16 blksz_fw_dl;
        u16 tx_buf_size;
-       bool supports_fw_dump;
+       bool can_dump_fw;
        bool can_ext_scan;
 };
 
@@ -214,7 +214,7 @@ static const struct mwifiex_pcie_device mwifiex_pcie8766 = {
        .reg            = &mwifiex_reg_8766,
        .blksz_fw_dl = MWIFIEX_PCIE_BLOCK_SIZE_FW_DNLD,
        .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K,
-       .supports_fw_dump = false,
+       .can_dump_fw = false,
        .can_ext_scan = true,
 };
 
@@ -223,7 +223,7 @@ static const struct mwifiex_pcie_device mwifiex_pcie8897 = {
        .reg            = &mwifiex_reg_8897,
        .blksz_fw_dl = MWIFIEX_PCIE_BLOCK_SIZE_FW_DNLD,
        .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K,
-       .supports_fw_dump = true,
+       .can_dump_fw = true,
        .can_ext_scan = true,
 };
 
index 91e36cda9543e4cd20e1755572f8a31a24774b88..d10320f89bc16f0f87604fa3840a4a0b72e2530d 100644 (file)
@@ -47,6 +47,7 @@
 static u8 user_rmmod;
 
 static struct mwifiex_if_ops sdio_ops;
+static unsigned long iface_work_flags;
 
 static struct semaphore add_remove_card_sem;
 
@@ -105,8 +106,8 @@ mwifiex_sdio_probe(struct sdio_func *func, const struct sdio_device_id *id)
                card->tx_buf_size = data->tx_buf_size;
                card->mp_tx_agg_buf_size = data->mp_tx_agg_buf_size;
                card->mp_rx_agg_buf_size = data->mp_rx_agg_buf_size;
-               card->supports_fw_dump = data->supports_fw_dump;
-               card->auto_tdls = data->auto_tdls;
+               card->can_dump_fw = data->can_dump_fw;
+               card->can_auto_tdls = data->can_auto_tdls;
                card->can_ext_scan = data->can_ext_scan;
        }
 
@@ -200,8 +201,6 @@ mwifiex_sdio_remove(struct sdio_func *func)
        if (!adapter || !adapter->priv_num)
                return;
 
-       cancel_work_sync(&adapter->iface_work);
-
        if (user_rmmod) {
                if (adapter->is_suspended)
                        mwifiex_sdio_resume(adapter->dev);
@@ -1042,6 +1041,59 @@ static int mwifiex_check_fw_status(struct mwifiex_adapter *adapter,
        return ret;
 }
 
+/*
+ * This function decode sdio aggreation pkt.
+ *
+ * Based on the the data block size and pkt_len,
+ * skb data will be decoded to few packets.
+ */
+static void mwifiex_deaggr_sdio_pkt(struct mwifiex_adapter *adapter,
+                                   struct sk_buff *skb)
+{
+       u32 total_pkt_len, pkt_len;
+       struct sk_buff *skb_deaggr;
+       u32 pkt_type;
+       u16 blk_size;
+       u8 blk_num;
+       u8 *data;
+
+       data = skb->data;
+       total_pkt_len = skb->len;
+
+       while (total_pkt_len >= (SDIO_HEADER_OFFSET + INTF_HEADER_LEN)) {
+               if (total_pkt_len < adapter->sdio_rx_block_size)
+                       break;
+               blk_num = *(data + BLOCK_NUMBER_OFFSET);
+               blk_size = adapter->sdio_rx_block_size * blk_num;
+               if (blk_size > total_pkt_len) {
+                       dev_err(adapter->dev, "%s: error in pkt,\t"
+                               "blk_num=%d, blk_size=%d, total_pkt_len=%d\n",
+                               __func__, blk_num, blk_size, total_pkt_len);
+                       break;
+               }
+               pkt_len = le16_to_cpu(*(__le16 *)(data + SDIO_HEADER_OFFSET));
+               pkt_type = le16_to_cpu(*(__le16 *)(data + SDIO_HEADER_OFFSET +
+                                        2));
+               if ((pkt_len + SDIO_HEADER_OFFSET) > blk_size) {
+                       dev_err(adapter->dev, "%s: error in pkt,\t"
+                               "pkt_len=%d, blk_size=%d\n",
+                               __func__, pkt_len, blk_size);
+                       break;
+               }
+               skb_deaggr = mwifiex_alloc_dma_align_buf(pkt_len,
+                                                        GFP_KERNEL | GFP_DMA);
+               if (!skb_deaggr)
+                       break;
+               skb_put(skb_deaggr, pkt_len);
+               memcpy(skb_deaggr->data, data + SDIO_HEADER_OFFSET, pkt_len);
+               skb_pull(skb_deaggr, INTF_HEADER_LEN);
+
+               mwifiex_handle_rx_packet(adapter, skb_deaggr);
+               data += blk_size;
+               total_pkt_len -= blk_size;
+       }
+}
+
 /*
  * This function decodes a received packet.
  *
@@ -1055,11 +1107,28 @@ static int mwifiex_decode_rx_packet(struct mwifiex_adapter *adapter,
        u8 *cmd_buf;
        __le16 *curr_ptr = (__le16 *)skb->data;
        u16 pkt_len = le16_to_cpu(*curr_ptr);
+       struct mwifiex_rxinfo *rx_info;
 
-       skb_trim(skb, pkt_len);
-       skb_pull(skb, INTF_HEADER_LEN);
+       if (upld_typ != MWIFIEX_TYPE_AGGR_DATA) {
+               skb_trim(skb, pkt_len);
+               skb_pull(skb, INTF_HEADER_LEN);
+       }
 
        switch (upld_typ) {
+       case MWIFIEX_TYPE_AGGR_DATA:
+               dev_dbg(adapter->dev, "info: --- Rx: Aggr Data packet ---\n");
+               rx_info = MWIFIEX_SKB_RXCB(skb);
+               rx_info->buf_type = MWIFIEX_TYPE_AGGR_DATA;
+               if (adapter->rx_work_enabled) {
+                       skb_queue_tail(&adapter->rx_data_q, skb);
+                       atomic_inc(&adapter->rx_pending);
+                       adapter->data_received = true;
+               } else {
+                       mwifiex_deaggr_sdio_pkt(adapter, skb);
+                       dev_kfree_skb_any(skb);
+               }
+               break;
+
        case MWIFIEX_TYPE_DATA:
                dev_dbg(adapter->dev, "info: --- Rx: Data packet ---\n");
                if (adapter->rx_work_enabled) {
@@ -1127,17 +1196,17 @@ static int mwifiex_decode_rx_packet(struct mwifiex_adapter *adapter,
  * provided there is space left, processed and finally uploaded.
  */
 static int mwifiex_sdio_card_to_host_mp_aggr(struct mwifiex_adapter *adapter,
-                                            struct sk_buff *skb, u8 port)
+                                            u16 rx_len, u8 port)
 {
        struct sdio_mmc_card *card = adapter->card;
        s32 f_do_rx_aggr = 0;
        s32 f_do_rx_cur = 0;
        s32 f_aggr_cur = 0;
+       s32 f_post_aggr_cur = 0;
        struct sk_buff *skb_deaggr;
-       u32 pind;
-       u32 pkt_len, pkt_type, mport;
+       struct sk_buff *skb = NULL;
+       u32 pkt_len, pkt_type, mport, pind;
        u8 *curr_ptr;
-       u32 rx_len = skb->len;
 
        if ((card->has_control_mask) && (port == CTRL_PORT)) {
                /* Read the command Resp without aggr */
@@ -1164,12 +1233,12 @@ static int mwifiex_sdio_card_to_host_mp_aggr(struct mwifiex_adapter *adapter,
                dev_dbg(adapter->dev, "info: %s: not last packet\n", __func__);
 
                if (MP_RX_AGGR_IN_PROGRESS(card)) {
-                       if (MP_RX_AGGR_BUF_HAS_ROOM(card, skb->len)) {
+                       if (MP_RX_AGGR_BUF_HAS_ROOM(card, rx_len)) {
                                f_aggr_cur = 1;
                        } else {
                                /* No room in Aggr buf, do rx aggr now */
                                f_do_rx_aggr = 1;
-                               f_do_rx_cur = 1;
+                               f_post_aggr_cur = 1;
                        }
                } else {
                        /* Rx aggr not in progress */
@@ -1182,7 +1251,7 @@ static int mwifiex_sdio_card_to_host_mp_aggr(struct mwifiex_adapter *adapter,
 
                if (MP_RX_AGGR_IN_PROGRESS(card)) {
                        f_do_rx_aggr = 1;
-                       if (MP_RX_AGGR_BUF_HAS_ROOM(card, skb->len))
+                       if (MP_RX_AGGR_BUF_HAS_ROOM(card, rx_len))
                                f_aggr_cur = 1;
                        else
                                /* No room in Aggr buf, do rx aggr now */
@@ -1195,7 +1264,7 @@ static int mwifiex_sdio_card_to_host_mp_aggr(struct mwifiex_adapter *adapter,
        if (f_aggr_cur) {
                dev_dbg(adapter->dev, "info: current packet aggregation\n");
                /* Curr pkt can be aggregated */
-               mp_rx_aggr_setup(card, skb, port);
+               mp_rx_aggr_setup(card, rx_len, port);
 
                if (MP_RX_AGGR_PKT_LIMIT_REACHED(card) ||
                    mp_rx_aggr_port_limit_reached(card)) {
@@ -1238,16 +1307,29 @@ static int mwifiex_sdio_card_to_host_mp_aggr(struct mwifiex_adapter *adapter,
                curr_ptr = card->mpa_rx.buf;
 
                for (pind = 0; pind < card->mpa_rx.pkt_cnt; pind++) {
+                       u32 *len_arr = card->mpa_rx.len_arr;
 
                        /* get curr PKT len & type */
                        pkt_len = le16_to_cpu(*(__le16 *) &curr_ptr[0]);
                        pkt_type = le16_to_cpu(*(__le16 *) &curr_ptr[2]);
 
                        /* copy pkt to deaggr buf */
-                       skb_deaggr = card->mpa_rx.skb_arr[pind];
+                       skb_deaggr = mwifiex_alloc_dma_align_buf(len_arr[pind],
+                                                                GFP_KERNEL |
+                                                                GFP_DMA);
+                       if (!skb_deaggr) {
+                               dev_err(adapter->dev, "skb allocation failure drop pkt len=%d type=%d\n",
+                                       pkt_len, pkt_type);
+                               curr_ptr += len_arr[pind];
+                               continue;
+                       }
 
-                       if ((pkt_type == MWIFIEX_TYPE_DATA) && (pkt_len <=
-                                        card->mpa_rx.len_arr[pind])) {
+                       skb_put(skb_deaggr, len_arr[pind]);
+
+                       if ((pkt_type == MWIFIEX_TYPE_DATA ||
+                            (pkt_type == MWIFIEX_TYPE_AGGR_DATA &&
+                             adapter->sdio_rx_aggr_enable)) &&
+                           (pkt_len <= len_arr[pind])) {
 
                                memcpy(skb_deaggr->data, curr_ptr, pkt_len);
 
@@ -1257,13 +1339,15 @@ static int mwifiex_sdio_card_to_host_mp_aggr(struct mwifiex_adapter *adapter,
                                mwifiex_decode_rx_packet(adapter, skb_deaggr,
                                                         pkt_type);
                        } else {
-                               dev_err(adapter->dev, "wrong aggr pkt:"
-                                       " type=%d len=%d max_len=%d\n",
+                               dev_err(adapter->dev, " drop wrong aggr pkt:\t"
+                                       "sdio_single_port_rx_aggr=%d\t"
+                                       "type=%d len=%d max_len=%d\n",
+                                       adapter->sdio_rx_aggr_enable,
                                        pkt_type, pkt_len,
-                                       card->mpa_rx.len_arr[pind]);
+                                       len_arr[pind]);
                                dev_kfree_skb_any(skb_deaggr);
                        }
-                       curr_ptr += card->mpa_rx.len_arr[pind];
+                       curr_ptr += len_arr[pind];
                }
                MP_RX_AGGR_BUF_RESET(card);
        }
@@ -1273,28 +1357,46 @@ rx_curr_single:
                dev_dbg(adapter->dev, "info: RX: port: %d, rx_len: %d\n",
                        port, rx_len);
 
+               skb = mwifiex_alloc_dma_align_buf(rx_len, GFP_KERNEL | GFP_DMA);
+               if (!skb) {
+                       dev_err(adapter->dev, "single skb allocated fail,\t"
+                               "drop pkt port=%d len=%d\n", port, rx_len);
+                       if (mwifiex_sdio_card_to_host(adapter, &pkt_type,
+                                                     card->mpa_rx.buf, rx_len,
+                                                     adapter->ioport + port))
+                               goto error;
+                       return 0;
+               }
+
+               skb_put(skb, rx_len);
+
                if (mwifiex_sdio_card_to_host(adapter, &pkt_type,
                                              skb->data, skb->len,
                                              adapter->ioport + port))
                        goto error;
+               if (!adapter->sdio_rx_aggr_enable &&
+                   pkt_type == MWIFIEX_TYPE_AGGR_DATA) {
+                       dev_err(adapter->dev, "drop wrong pkt type %d\t"
+                               "current SDIO RX Aggr not enabled\n",
+                               pkt_type);
+                       dev_kfree_skb_any(skb);
+                       return 0;
+               }
 
                mwifiex_decode_rx_packet(adapter, skb, pkt_type);
        }
+       if (f_post_aggr_cur) {
+               dev_dbg(adapter->dev, "info: current packet aggregation\n");
+               /* Curr pkt can be aggregated */
+               mp_rx_aggr_setup(card, rx_len, port);
+       }
 
        return 0;
-
 error:
-       if (MP_RX_AGGR_IN_PROGRESS(card)) {
-               /* Multiport-aggregation transfer failed - cleanup */
-               for (pind = 0; pind < card->mpa_rx.pkt_cnt; pind++) {
-                       /* copy pkt to deaggr buf */
-                       skb_deaggr = card->mpa_rx.skb_arr[pind];
-                       dev_kfree_skb_any(skb_deaggr);
-               }
+       if (MP_RX_AGGR_IN_PROGRESS(card))
                MP_RX_AGGR_BUF_RESET(card);
-       }
 
-       if (f_do_rx_cur)
+       if (f_do_rx_cur && skb)
                /* Single transfer pending. Free curr buff also */
                dev_kfree_skb_any(skb);
 
@@ -1356,8 +1458,9 @@ static int mwifiex_process_int_status(struct mwifiex_adapter *adapter)
                     MWIFIEX_RX_DATA_BUF_SIZE)
                        return -1;
                rx_len = (u16) (rx_blocks * MWIFIEX_SDIO_BLOCK_SIZE);
+               dev_dbg(adapter->dev, "info: rx_len = %d\n", rx_len);
 
-               skb = dev_alloc_skb(rx_len);
+               skb = mwifiex_alloc_dma_align_buf(rx_len, GFP_KERNEL | GFP_DMA);
                if (!skb)
                        return -1;
 
@@ -1447,27 +1550,16 @@ static int mwifiex_process_int_status(struct mwifiex_adapter *adapter)
                                 1) / MWIFIEX_SDIO_BLOCK_SIZE;
                        if (rx_len <= INTF_HEADER_LEN ||
                            (rx_blocks * MWIFIEX_SDIO_BLOCK_SIZE) >
-                            MWIFIEX_RX_DATA_BUF_SIZE) {
+                            card->mpa_rx.buf_size) {
                                dev_err(adapter->dev, "invalid rx_len=%d\n",
                                        rx_len);
                                return -1;
                        }
-                       rx_len = (u16) (rx_blocks * MWIFIEX_SDIO_BLOCK_SIZE);
-
-                       skb = dev_alloc_skb(rx_len);
-
-                       if (!skb) {
-                               dev_err(adapter->dev, "%s: failed to alloc skb",
-                                       __func__);
-                               return -1;
-                       }
 
-                       skb_put(skb, rx_len);
-
-                       dev_dbg(adapter->dev, "info: rx_len = %d skb->len = %d\n",
-                               rx_len, skb->len);
+                       rx_len = (u16) (rx_blocks * MWIFIEX_SDIO_BLOCK_SIZE);
+                       dev_dbg(adapter->dev, "info: rx_len = %d\n", rx_len);
 
-                       if (mwifiex_sdio_card_to_host_mp_aggr(adapter, skb,
+                       if (mwifiex_sdio_card_to_host_mp_aggr(adapter, rx_len,
                                                              port)) {
                                dev_err(adapter->dev, "card_to_host_mpa failed:"
                                        " int status=%#x\n", sdio_ireg);
@@ -1735,6 +1827,7 @@ static int mwifiex_alloc_sdio_mpa_buffers(struct mwifiex_adapter *adapter,
                                   u32 mpa_tx_buf_size, u32 mpa_rx_buf_size)
 {
        struct sdio_mmc_card *card = adapter->card;
+       u32 rx_buf_size;
        int ret = 0;
 
        card->mpa_tx.buf = kzalloc(mpa_tx_buf_size, GFP_KERNEL);
@@ -1745,13 +1838,15 @@ static int mwifiex_alloc_sdio_mpa_buffers(struct mwifiex_adapter *adapter,
 
        card->mpa_tx.buf_size = mpa_tx_buf_size;
 
-       card->mpa_rx.buf = kzalloc(mpa_rx_buf_size, GFP_KERNEL);
+       rx_buf_size = max_t(u32, mpa_rx_buf_size,
+                           (u32)SDIO_MAX_AGGR_BUF_SIZE);
+       card->mpa_rx.buf = kzalloc(rx_buf_size, GFP_KERNEL);
        if (!card->mpa_rx.buf) {
                ret = -1;
                goto error;
        }
 
-       card->mpa_rx.buf_size = mpa_rx_buf_size;
+       card->mpa_rx.buf_size = rx_buf_size;
 
 error:
        if (ret) {
@@ -1887,7 +1982,7 @@ static int mwifiex_init_sdio(struct mwifiex_adapter *adapter)
                return -1;
        }
 
-       adapter->auto_tdls = card->auto_tdls;
+       adapter->auto_tdls = card->can_auto_tdls;
        adapter->ext_scan = card->can_ext_scan;
        return ret;
 }
@@ -1950,6 +2045,7 @@ mwifiex_update_mp_end_port(struct mwifiex_adapter *adapter, u16 port)
                port, card->mp_data_port_mask);
 }
 
+static struct mwifiex_adapter *save_adapter;
 static void mwifiex_sdio_card_reset_work(struct mwifiex_adapter *adapter)
 {
        struct sdio_mmc_card *card = adapter->card;
@@ -2018,10 +2114,8 @@ rdwr_status mwifiex_sdio_rdwr_firmware(struct mwifiex_adapter *adapter,
 }
 
 /* This function dump firmware memory to file */
-static void mwifiex_sdio_fw_dump_work(struct work_struct *work)
+static void mwifiex_sdio_fw_dump_work(struct mwifiex_adapter *adapter)
 {
-       struct mwifiex_adapter *adapter =
-                       container_of(work, struct mwifiex_adapter, iface_work);
        struct sdio_mmc_card *card = adapter->card;
        int ret = 0;
        unsigned int reg, reg_start, reg_end;
@@ -2032,7 +2126,7 @@ static void mwifiex_sdio_fw_dump_work(struct work_struct *work)
 
        mwifiex_dump_drv_info(adapter);
 
-       if (!card->supports_fw_dump)
+       if (!card->can_dump_fw)
                return;
 
        for (idx = 0; idx < ARRAY_SIZE(mem_type_mapping_tbl); idx++) {
@@ -2143,36 +2237,36 @@ done:
 
 static void mwifiex_sdio_work(struct work_struct *work)
 {
-       struct mwifiex_adapter *adapter =
-                       container_of(work, struct mwifiex_adapter, iface_work);
-
-       if (test_and_clear_bit(MWIFIEX_IFACE_WORK_CARD_RESET,
-                              &adapter->iface_work_flags))
-               mwifiex_sdio_card_reset_work(adapter);
        if (test_and_clear_bit(MWIFIEX_IFACE_WORK_FW_DUMP,
-                              &adapter->iface_work_flags))
-               mwifiex_sdio_fw_dump_work(work);
+                              &iface_work_flags))
+               mwifiex_sdio_fw_dump_work(save_adapter);
+       if (test_and_clear_bit(MWIFIEX_IFACE_WORK_CARD_RESET,
+                              &iface_work_flags))
+               mwifiex_sdio_card_reset_work(save_adapter);
 }
 
+static DECLARE_WORK(sdio_work, mwifiex_sdio_work);
 /* This function resets the card */
 static void mwifiex_sdio_card_reset(struct mwifiex_adapter *adapter)
 {
-       if (test_bit(MWIFIEX_IFACE_WORK_CARD_RESET, &adapter->iface_work_flags))
+       save_adapter = adapter;
+       if (test_bit(MWIFIEX_IFACE_WORK_CARD_RESET, &iface_work_flags))
                return;
 
-       set_bit(MWIFIEX_IFACE_WORK_CARD_RESET, &adapter->iface_work_flags);
+       set_bit(MWIFIEX_IFACE_WORK_CARD_RESET, &iface_work_flags);
 
-       schedule_work(&adapter->iface_work);
+       schedule_work(&sdio_work);
 }
 
 /* This function dumps FW information */
 static void mwifiex_sdio_fw_dump(struct mwifiex_adapter *adapter)
 {
-       if (test_bit(MWIFIEX_IFACE_WORK_FW_DUMP, &adapter->iface_work_flags))
+       save_adapter = adapter;
+       if (test_bit(MWIFIEX_IFACE_WORK_FW_DUMP, &iface_work_flags))
                return;
 
-       set_bit(MWIFIEX_IFACE_WORK_FW_DUMP, &adapter->iface_work_flags);
-       schedule_work(&adapter->iface_work);
+       set_bit(MWIFIEX_IFACE_WORK_FW_DUMP, &iface_work_flags);
+       schedule_work(&sdio_work);
 }
 
 /* Function to dump SDIO function registers and SDIO scratch registers in case
@@ -2288,9 +2382,9 @@ static struct mwifiex_if_ops sdio_ops = {
        .cmdrsp_complete = mwifiex_sdio_cmdrsp_complete,
        .event_complete = mwifiex_sdio_event_complete,
        .card_reset = mwifiex_sdio_card_reset,
-       .iface_work = mwifiex_sdio_work,
        .fw_dump = mwifiex_sdio_fw_dump,
        .reg_dump = mwifiex_sdio_reg_dump,
+       .deaggr_pkt = mwifiex_deaggr_sdio_pkt,
 };
 
 /*
@@ -2327,6 +2421,7 @@ mwifiex_sdio_cleanup_module(void)
 
        /* Set the flag as user is removing this module. */
        user_rmmod = 1;
+       cancel_work_sync(&sdio_work);
 
        sdio_unregister_driver(&mwifiex_sdio);
 }
index 957cca24661828c4574f66bee9ac0ecd0126e5b8..6f645cf47369baddaa10bc6489837fa914a14a57 100644 (file)
@@ -67,6 +67,8 @@
 
 #define MWIFIEX_MP_AGGR_BUF_SIZE_16K   (16384)
 #define MWIFIEX_MP_AGGR_BUF_SIZE_32K   (32768)
+/* we leave one block of 256 bytes for DMA alignment*/
+#define MWIFIEX_MP_AGGR_BUF_SIZE_MAX    (65280)
 
 /* Misc. Config Register : Auto Re-enable interrupts */
 #define AUTO_RE_ENABLE_INT              BIT(4)
@@ -238,9 +240,6 @@ struct sdio_mmc_card {
        const struct mwifiex_sdio_card_reg *reg;
        u8 max_ports;
        u8 mp_agg_pkt_limit;
-       bool supports_sdio_new_mode;
-       bool has_control_mask;
-       bool supports_fw_dump;
        u16 tx_buf_size;
        u32 mp_tx_agg_buf_size;
        u32 mp_rx_agg_buf_size;
@@ -255,7 +254,10 @@ struct sdio_mmc_card {
        u8 curr_wr_port;
 
        u8 *mp_regs;
-       u8 auto_tdls;
+       bool supports_sdio_new_mode;
+       bool has_control_mask;
+       bool can_dump_fw;
+       bool can_auto_tdls;
        bool can_ext_scan;
 
        struct mwifiex_sdio_mpa_tx mpa_tx;
@@ -267,13 +269,13 @@ struct mwifiex_sdio_device {
        const struct mwifiex_sdio_card_reg *reg;
        u8 max_ports;
        u8 mp_agg_pkt_limit;
-       bool supports_sdio_new_mode;
-       bool has_control_mask;
-       bool supports_fw_dump;
        u16 tx_buf_size;
        u32 mp_tx_agg_buf_size;
        u32 mp_rx_agg_buf_size;
-       u8 auto_tdls;
+       bool supports_sdio_new_mode;
+       bool has_control_mask;
+       bool can_dump_fw;
+       bool can_auto_tdls;
        bool can_ext_scan;
 };
 
@@ -412,13 +414,13 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8786 = {
        .reg = &mwifiex_reg_sd87xx,
        .max_ports = 16,
        .mp_agg_pkt_limit = 8,
-       .supports_sdio_new_mode = false,
-       .has_control_mask = true,
        .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K,
        .mp_tx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_16K,
        .mp_rx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_16K,
-       .supports_fw_dump = false,
-       .auto_tdls = false,
+       .supports_sdio_new_mode = false,
+       .has_control_mask = true,
+       .can_dump_fw = false,
+       .can_auto_tdls = false,
        .can_ext_scan = false,
 };
 
@@ -427,13 +429,13 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8787 = {
        .reg = &mwifiex_reg_sd87xx,
        .max_ports = 16,
        .mp_agg_pkt_limit = 8,
-       .supports_sdio_new_mode = false,
-       .has_control_mask = true,
        .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K,
        .mp_tx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_16K,
        .mp_rx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_16K,
-       .supports_fw_dump = false,
-       .auto_tdls = false,
+       .supports_sdio_new_mode = false,
+       .has_control_mask = true,
+       .can_dump_fw = false,
+       .can_auto_tdls = false,
        .can_ext_scan = true,
 };
 
@@ -442,13 +444,13 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8797 = {
        .reg = &mwifiex_reg_sd87xx,
        .max_ports = 16,
        .mp_agg_pkt_limit = 8,
-       .supports_sdio_new_mode = false,
-       .has_control_mask = true,
        .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K,
        .mp_tx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_16K,
        .mp_rx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_16K,
-       .supports_fw_dump = false,
-       .auto_tdls = false,
+       .supports_sdio_new_mode = false,
+       .has_control_mask = true,
+       .can_dump_fw = false,
+       .can_auto_tdls = false,
        .can_ext_scan = true,
 };
 
@@ -457,13 +459,13 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8897 = {
        .reg = &mwifiex_reg_sd8897,
        .max_ports = 32,
        .mp_agg_pkt_limit = 16,
+       .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K,
+       .mp_tx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_MAX,
+       .mp_rx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_MAX,
        .supports_sdio_new_mode = true,
        .has_control_mask = false,
-       .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K,
-       .mp_tx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_32K,
-       .mp_rx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_32K,
-       .supports_fw_dump = true,
-       .auto_tdls = false,
+       .can_dump_fw = true,
+       .can_auto_tdls = false,
        .can_ext_scan = true,
 };
 
@@ -472,13 +474,13 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8887 = {
        .reg = &mwifiex_reg_sd8887,
        .max_ports = 32,
        .mp_agg_pkt_limit = 16,
-       .supports_sdio_new_mode = true,
-       .has_control_mask = false,
-       .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K,
+       .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K,
        .mp_tx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_32K,
        .mp_rx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_32K,
-       .supports_fw_dump = false,
-       .auto_tdls = true,
+       .supports_sdio_new_mode = true,
+       .has_control_mask = false,
+       .can_dump_fw = false,
+       .can_auto_tdls = true,
        .can_ext_scan = true,
 };
 
@@ -492,8 +494,8 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8801 = {
        .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K,
        .mp_tx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_16K,
        .mp_rx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_16K,
-       .supports_fw_dump = false,
-       .auto_tdls = false,
+       .can_dump_fw = false,
+       .can_auto_tdls = false,
        .can_ext_scan = true,
 };
 
@@ -571,9 +573,9 @@ mp_tx_aggr_port_limit_reached(struct sdio_mmc_card *card)
 
 /* Prepare to copy current packet from card to SDIO Rx aggregation buffer */
 static inline void mp_rx_aggr_setup(struct sdio_mmc_card *card,
-                                   struct sk_buff *skb, u8 port)
+                                   u16 rx_len, u8 port)
 {
-       card->mpa_rx.buf_len += skb->len;
+       card->mpa_rx.buf_len += rx_len;
 
        if (!card->mpa_rx.pkt_cnt)
                card->mpa_rx.start_port = port;
@@ -586,8 +588,8 @@ static inline void mp_rx_aggr_setup(struct sdio_mmc_card *card,
                else
                        card->mpa_rx.ports |= 1 << (card->mpa_rx.pkt_cnt + 1);
        }
-       card->mpa_rx.skb_arr[card->mpa_rx.pkt_cnt] = skb;
-       card->mpa_rx.len_arr[card->mpa_rx.pkt_cnt] = skb->len;
+       card->mpa_rx.skb_arr[card->mpa_rx.pkt_cnt] = NULL;
+       card->mpa_rx.len_arr[card->mpa_rx.pkt_cnt] = rx_len;
        card->mpa_rx.pkt_cnt++;
 }
 #endif /* _MWIFIEX_SDIO_H */
index f7d204ffd6e97c2444129252535e9fd3580959cc..49422f2a53809fe0c241de93afb231c8011871c3 100644 (file)
@@ -1370,22 +1370,29 @@ mwifiex_cmd_mef_cfg(struct mwifiex_private *priv,
                    struct mwifiex_ds_mef_cfg *mef)
 {
        struct host_cmd_ds_mef_cfg *mef_cfg = &cmd->params.mef_cfg;
+       struct mwifiex_fw_mef_entry *mef_entry = NULL;
        u8 *pos = (u8 *)mef_cfg;
+       u16 i;
 
        cmd->command = cpu_to_le16(HostCmd_CMD_MEF_CFG);
 
        mef_cfg->criteria = cpu_to_le32(mef->criteria);
        mef_cfg->num_entries = cpu_to_le16(mef->num_entries);
        pos += sizeof(*mef_cfg);
-       mef_cfg->mef_entry->mode = mef->mef_entry->mode;
-       mef_cfg->mef_entry->action = mef->mef_entry->action;
-       pos += sizeof(*(mef_cfg->mef_entry));
 
-       if (mwifiex_cmd_append_rpn_expression(priv, mef->mef_entry, &pos))
-               return -1;
+       for (i = 0; i < mef->num_entries; i++) {
+               mef_entry = (struct mwifiex_fw_mef_entry *)pos;
+               mef_entry->mode = mef->mef_entry[i].mode;
+               mef_entry->action = mef->mef_entry[i].action;
+               pos += sizeof(*mef_cfg->mef_entry);
+
+               if (mwifiex_cmd_append_rpn_expression(priv,
+                                                     &mef->mef_entry[i], &pos))
+                       return -1;
 
-       mef_cfg->mef_entry->exprsize =
-                       cpu_to_le16(pos - mef_cfg->mef_entry->expr);
+               mef_entry->exprsize =
+                       cpu_to_le16(pos - mef_entry->expr);
+       }
        cmd->size = cpu_to_le16((u16) (pos - (u8 *)mef_cfg) + S_DS_GEN);
 
        return 0;
@@ -1664,6 +1671,25 @@ mwifiex_cmd_tdls_oper(struct mwifiex_private *priv,
 
        return 0;
 }
+
+/* This function prepares command of sdio rx aggr info. */
+static int mwifiex_cmd_sdio_rx_aggr_cfg(struct host_cmd_ds_command *cmd,
+                                       u16 cmd_action, void *data_buf)
+{
+       struct host_cmd_sdio_sp_rx_aggr_cfg *cfg =
+                                       &cmd->params.sdio_rx_aggr_cfg;
+
+       cmd->command = cpu_to_le16(HostCmd_CMD_SDIO_SP_RX_AGGR_CFG);
+       cmd->size =
+               cpu_to_le16(sizeof(struct host_cmd_sdio_sp_rx_aggr_cfg) +
+                           S_DS_GEN);
+       cfg->action = cmd_action;
+       if (cmd_action == HostCmd_ACT_GEN_SET)
+               cfg->enable = *(u8 *)data_buf;
+
+       return 0;
+}
+
 /*
  * This function prepares the commands before sending them to the firmware.
  *
@@ -1901,6 +1927,10 @@ int mwifiex_sta_prepare_cmd(struct mwifiex_private *priv, uint16_t cmd_no,
                ret = mwifiex_cmd_issue_chan_report_request(priv, cmd_ptr,
                                                            data_buf);
                break;
+       case HostCmd_CMD_SDIO_SP_RX_AGGR_CFG:
+               ret = mwifiex_cmd_sdio_rx_aggr_cfg(cmd_ptr, cmd_action,
+                                                  data_buf);
+               break;
        default:
                dev_err(priv->adapter->dev,
                        "PREP_CMD: unknown cmd- %#x\n", cmd_no);
@@ -1940,6 +1970,7 @@ int mwifiex_sta_init_cmd(struct mwifiex_private *priv, u8 first_sta, bool init)
        struct mwifiex_ds_auto_ds auto_ds;
        enum state_11d_t state_11d;
        struct mwifiex_ds_11n_tx_cfg tx_cfg;
+       u8 sdio_sp_rx_aggr_enable;
 
        if (first_sta) {
                if (priv->adapter->iface_type == MWIFIEX_PCIE) {
@@ -1983,6 +2014,22 @@ int mwifiex_sta_init_cmd(struct mwifiex_private *priv, u8 first_sta, bool init)
                if (ret)
                        return -1;
 
+               /** Set SDIO Single Port RX Aggr Info */
+               if (priv->adapter->iface_type == MWIFIEX_SDIO &&
+                   ISSUPP_SDIO_SPA_ENABLED(priv->adapter->fw_cap_info)) {
+                       sdio_sp_rx_aggr_enable = true;
+                       ret = mwifiex_send_cmd(priv,
+                                              HostCmd_CMD_SDIO_SP_RX_AGGR_CFG,
+                                              HostCmd_ACT_GEN_SET, 0,
+                                              &sdio_sp_rx_aggr_enable,
+                                              true);
+                       if (ret) {
+                               dev_err(priv->adapter->dev,
+                                       "error while enabling SP aggregation..disable it");
+                               adapter->sdio_rx_aggr_enable = false;
+                       }
+               }
+
                /* Reconfigure tx buf size */
                ret = mwifiex_send_cmd(priv, HostCmd_CMD_RECONFIGURE_TX_BUFF,
                                       HostCmd_ACT_GEN_SET, 0,
index 5f8da5924666275615482fd07de75fa46d4bfb4a..88dc6b672ef43adb5cc8c1b836b19a1bed0db5d1 100644 (file)
@@ -90,6 +90,10 @@ mwifiex_process_cmdresp_error(struct mwifiex_private *priv,
        case HostCmd_CMD_MAC_CONTROL:
                break;
 
+       case HostCmd_CMD_SDIO_SP_RX_AGGR_CFG:
+               dev_err(priv->adapter->dev, "SDIO RX single-port aggregation Not support\n");
+               break;
+
        default:
                break;
        }
@@ -943,6 +947,20 @@ static int mwifiex_ret_cfg_data(struct mwifiex_private *priv,
        return 0;
 }
 
+/** This Function handles the command response of sdio rx aggr */
+static int mwifiex_ret_sdio_rx_aggr_cfg(struct mwifiex_private *priv,
+                                       struct host_cmd_ds_command *resp)
+{
+       struct mwifiex_adapter *adapter = priv->adapter;
+       struct host_cmd_sdio_sp_rx_aggr_cfg *cfg =
+                               &resp->params.sdio_rx_aggr_cfg;
+
+       adapter->sdio_rx_aggr_enable = cfg->enable;
+       adapter->sdio_rx_block_size = le16_to_cpu(cfg->block_size);
+
+       return 0;
+}
+
 /*
  * This function handles the command responses.
  *
@@ -1124,6 +1142,9 @@ int mwifiex_process_sta_cmdresp(struct mwifiex_private *priv, u16 cmdresp_no,
                break;
        case HostCmd_CMD_CHAN_REPORT_REQUEST:
                break;
+       case HostCmd_CMD_SDIO_SP_RX_AGGR_CFG:
+               ret = mwifiex_ret_sdio_rx_aggr_cfg(priv, resp);
+               break;
        default:
                dev_err(adapter->dev, "CMD_RESP: unknown cmd response %#x\n",
                        resp->command);
index 80ffe74124969a2410e2a1ce382d0f521f035238..0dc7a1d3993d325a15f84fa447afaa884349eabc 100644 (file)
@@ -135,7 +135,7 @@ mwifiex_reset_connect_state(struct mwifiex_private *priv, u16 reason_code)
                cfg80211_disconnected(priv->netdev, reason_code, NULL, 0,
                                      GFP_KERNEL);
        }
-       memset(priv->cfg_bssid, 0, ETH_ALEN);
+       eth_zero_addr(priv->cfg_bssid);
 
        mwifiex_stop_net_dev_queue(priv->netdev, adapter);
        if (netif_carrier_ok(priv->netdev))
@@ -312,7 +312,7 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
                                        adapter->ps_state = PS_STATE_AWAKE;
                                        adapter->pm_wakeup_card_req = false;
                                        adapter->pm_wakeup_fw_try = false;
-                                       del_timer_sync(&adapter->wakeup_timer);
+                                       del_timer(&adapter->wakeup_timer);
                                        break;
                                }
                                if (!mwifiex_send_null_packet
@@ -327,7 +327,7 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
                adapter->ps_state = PS_STATE_AWAKE;
                adapter->pm_wakeup_card_req = false;
                adapter->pm_wakeup_fw_try = false;
-               del_timer_sync(&adapter->wakeup_timer);
+               del_timer(&adapter->wakeup_timer);
 
                break;
 
index ac93557cbdc96ec951fd91f2f4d581b8cd9a7a5b..a245f444aeec17e23c027b60d027239050fb4497 100644 (file)
@@ -80,23 +80,29 @@ EXPORT_SYMBOL_GPL(mwifiex_handle_rx_packet);
 int mwifiex_process_tx(struct mwifiex_private *priv, struct sk_buff *skb,
                       struct mwifiex_tx_param *tx_param)
 {
-       int ret = -1;
+       int hroom, ret = -1;
        struct mwifiex_adapter *adapter = priv->adapter;
        u8 *head_ptr;
        struct txpd *local_tx_pd = NULL;
 
+       hroom = (adapter->iface_type == MWIFIEX_USB) ? 0 : INTF_HEADER_LEN;
+
        if (priv->bss_role == MWIFIEX_BSS_ROLE_UAP)
                head_ptr = mwifiex_process_uap_txpd(priv, skb);
        else
                head_ptr = mwifiex_process_sta_txpd(priv, skb);
 
+       if ((adapter->data_sent || adapter->tx_lock_flag) && head_ptr) {
+               skb_queue_tail(&adapter->tx_data_q, skb);
+               atomic_inc(&adapter->tx_queued);
+               return 0;
+       }
+
        if (head_ptr) {
                if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA)
-                       local_tx_pd =
-                               (struct txpd *) (head_ptr + INTF_HEADER_LEN);
+                       local_tx_pd = (struct txpd *)(head_ptr + hroom);
                if (adapter->iface_type == MWIFIEX_USB) {
                        adapter->data_sent = true;
-                       skb_pull(skb, INTF_HEADER_LEN);
                        ret = adapter->if_ops.host_to_card(adapter,
                                                           MWIFIEX_USB_EP_DATA,
                                                           skb, NULL);
@@ -142,6 +148,123 @@ int mwifiex_process_tx(struct mwifiex_private *priv, struct sk_buff *skb,
        return ret;
 }
 
+static int mwifiex_host_to_card(struct mwifiex_adapter *adapter,
+                               struct sk_buff *skb,
+                               struct mwifiex_tx_param *tx_param)
+{
+       struct txpd *local_tx_pd = NULL;
+       u8 *head_ptr = skb->data;
+       int ret = 0;
+       struct mwifiex_private *priv;
+       struct mwifiex_txinfo *tx_info;
+
+       tx_info = MWIFIEX_SKB_TXCB(skb);
+       priv = mwifiex_get_priv_by_id(adapter, tx_info->bss_num,
+                                     tx_info->bss_type);
+       if (!priv) {
+               dev_err(adapter->dev, "data: priv not found. Drop TX packet\n");
+               adapter->dbg.num_tx_host_to_card_failure++;
+               mwifiex_write_data_complete(adapter, skb, 0, 0);
+               return ret;
+       }
+       if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA) {
+               if (adapter->iface_type == MWIFIEX_USB)
+                       local_tx_pd = (struct txpd *)head_ptr;
+               else
+                       local_tx_pd = (struct txpd *) (head_ptr +
+                               INTF_HEADER_LEN);
+       }
+
+       if (adapter->iface_type == MWIFIEX_USB) {
+               adapter->data_sent = true;
+               ret = adapter->if_ops.host_to_card(adapter,
+                                                  MWIFIEX_USB_EP_DATA,
+                                                  skb, NULL);
+       } else {
+               ret = adapter->if_ops.host_to_card(adapter,
+                                                  MWIFIEX_TYPE_DATA,
+                                                  skb, tx_param);
+       }
+       switch (ret) {
+       case -ENOSR:
+               dev_err(adapter->dev, "data: -ENOSR is returned\n");
+               break;
+       case -EBUSY:
+               if ((GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA) &&
+                   (adapter->pps_uapsd_mode) &&
+                   (adapter->tx_lock_flag)) {
+                       priv->adapter->tx_lock_flag = false;
+                       if (local_tx_pd)
+                               local_tx_pd->flags = 0;
+               }
+               skb_queue_head(&adapter->tx_data_q, skb);
+               if (tx_info->flags & MWIFIEX_BUF_FLAG_AGGR_PKT)
+                       atomic_add(tx_info->aggr_num, &adapter->tx_queued);
+               else
+                       atomic_inc(&adapter->tx_queued);
+               dev_dbg(adapter->dev, "data: -EBUSY is returned\n");
+               break;
+       case -1:
+               if (adapter->iface_type != MWIFIEX_PCIE)
+                       adapter->data_sent = false;
+               dev_err(adapter->dev, "mwifiex_write_data_async failed: 0x%X\n",
+                       ret);
+               adapter->dbg.num_tx_host_to_card_failure++;
+               mwifiex_write_data_complete(adapter, skb, 0, ret);
+               break;
+       case -EINPROGRESS:
+               if (adapter->iface_type != MWIFIEX_PCIE)
+                       adapter->data_sent = false;
+               break;
+       case 0:
+               mwifiex_write_data_complete(adapter, skb, 0, ret);
+               break;
+       default:
+               break;
+       }
+       return ret;
+}
+
+static int
+mwifiex_dequeue_tx_queue(struct mwifiex_adapter *adapter)
+{
+       struct sk_buff *skb, *skb_next;
+       struct mwifiex_txinfo *tx_info;
+       struct mwifiex_tx_param tx_param;
+
+       skb = skb_dequeue(&adapter->tx_data_q);
+       if (!skb)
+               return -1;
+
+       tx_info = MWIFIEX_SKB_TXCB(skb);
+       if (tx_info->flags & MWIFIEX_BUF_FLAG_AGGR_PKT)
+               atomic_sub(tx_info->aggr_num, &adapter->tx_queued);
+       else
+               atomic_dec(&adapter->tx_queued);
+
+       if (!skb_queue_empty(&adapter->tx_data_q))
+               skb_next = skb_peek(&adapter->tx_data_q);
+       else
+               skb_next = NULL;
+       tx_param.next_pkt_len = ((skb_next) ? skb_next->len : 0);
+       if (!tx_param.next_pkt_len) {
+               if (!mwifiex_wmm_lists_empty(adapter))
+                       tx_param.next_pkt_len = 1;
+       }
+       return mwifiex_host_to_card(adapter, skb, &tx_param);
+}
+
+void
+mwifiex_process_tx_queue(struct mwifiex_adapter *adapter)
+{
+       do {
+               if (adapter->data_sent || adapter->tx_lock_flag)
+                       break;
+               if (mwifiex_dequeue_tx_queue(adapter))
+                       break;
+       } while (!skb_queue_empty(&adapter->tx_data_q));
+}
+
 /*
  * Packet send completion callback handler.
  *
@@ -179,8 +302,11 @@ int mwifiex_write_data_complete(struct mwifiex_adapter *adapter,
                priv->stats.tx_errors++;
        }
 
-       if (tx_info->flags & MWIFIEX_BUF_FLAG_BRIDGED_PKT)
+       if (tx_info->flags & MWIFIEX_BUF_FLAG_BRIDGED_PKT) {
                atomic_dec_return(&adapter->pending_bridged_pkts);
+               if (tx_info->flags & MWIFIEX_BUF_FLAG_AGGR_PKT)
+                       goto done;
+       }
 
        if (aggr)
                /* For skb_aggr, do not wake up tx queue */
index 223873022ffe24120054cf13c8a8466c78bcdd0d..fd8027f200a0ddd61c178ee1f7107e7e0931c1c1 100644 (file)
@@ -193,7 +193,7 @@ static void mwifiex_usb_rx_complete(struct urb *urb)
                dev_dbg(adapter->dev, "info: recv_length=%d, status=%d\n",
                        recv_length, status);
                if (status == -EINPROGRESS) {
-                       queue_work(adapter->workqueue, &adapter->main_work);
+                       mwifiex_queue_main_work(adapter);
 
                        /* urb for data_ep is re-submitted now;
                         * urb for cmd_ep will be re-submitted in callback
@@ -262,7 +262,7 @@ static void mwifiex_usb_tx_complete(struct urb *urb)
                                            urb->status ? -1 : 0);
        }
 
-       queue_work(adapter->workqueue, &adapter->main_work);
+       mwifiex_queue_main_work(adapter);
 
        return;
 }
@@ -1006,7 +1006,7 @@ static int mwifiex_pm_wakeup_card(struct mwifiex_adapter *adapter)
 {
        /* Simulation of HS_AWAKE event */
        adapter->pm_wakeup_fw_try = false;
-       del_timer_sync(&adapter->wakeup_timer);
+       del_timer(&adapter->wakeup_timer);
        adapter->pm_wakeup_card_req = false;
        adapter->ps_state = PS_STATE_AWAKE;
 
index 308550611f22fe5924c6d51f39e398b5e736ab89..b8a45872354d7f46c330da734fa5f2aef8b4c7c0 100644 (file)
@@ -367,6 +367,13 @@ mwifiex_process_mgmt_packet(struct mwifiex_private *priv,
        if (!skb)
                return -1;
 
+       if (!priv->mgmt_frame_mask ||
+           priv->wdev.iftype == NL80211_IFTYPE_UNSPECIFIED) {
+               dev_dbg(priv->adapter->dev,
+                       "do not receive mgmt frames on uninitialized intf");
+               return -1;
+       }
+
        rx_pd = (struct rxpd *)skb->data;
 
        skb_pull(skb, le16_to_cpu(rx_pd->rx_pkt_offset));
@@ -624,3 +631,26 @@ void mwifiex_hist_data_reset(struct mwifiex_private *priv)
        for (ix = 0; ix < MWIFIEX_MAX_SIG_STRENGTH; ix++)
                atomic_set(&phist_data->sig_str[ix], 0);
 }
+
+void *mwifiex_alloc_dma_align_buf(int rx_len, gfp_t flags)
+{
+       struct sk_buff *skb;
+       int buf_len, pad;
+
+       buf_len = rx_len + MWIFIEX_RX_HEADROOM + MWIFIEX_DMA_ALIGN_SZ;
+
+       skb = __dev_alloc_skb(buf_len, flags);
+
+       if (!skb)
+               return NULL;
+
+       skb_reserve(skb, MWIFIEX_RX_HEADROOM);
+
+       pad = MWIFIEX_ALIGN_ADDR(skb->data, MWIFIEX_DMA_ALIGN_SZ) -
+             (long)skb->data;
+
+       skb_reserve(skb, pad);
+
+       return skb;
+}
+EXPORT_SYMBOL_GPL(mwifiex_alloc_dma_align_buf);
index ef717acec8b76f3fc45cd1b210cccd5b3948f7d7..b2e99569a0f8b659b756179a7cec340de68952e1 100644 (file)
@@ -157,6 +157,8 @@ void mwifiex_ralist_add(struct mwifiex_private *priv, const u8 *ra)
 
                ra_list->is_11n_enabled = 0;
                ra_list->tdls_link = false;
+               ra_list->ba_status = BA_SETUP_NONE;
+               ra_list->amsdu_in_ampdu = false;
                if (!mwifiex_queuing_ra_based(priv)) {
                        if (mwifiex_get_tdls_link_status(priv, ra) ==
                            TDLS_SETUP_COMPLETE) {
@@ -574,7 +576,7 @@ mwifiex_clean_txrx(struct mwifiex_private *priv)
  * This function retrieves a particular RA list node, matching with the
  * given TID and RA address.
  */
-static struct mwifiex_ra_list_tbl *
+struct mwifiex_ra_list_tbl *
 mwifiex_wmm_get_ralist_node(struct mwifiex_private *priv, u8 tid,
                            const u8 *ra_addr)
 {
@@ -730,7 +732,7 @@ mwifiex_wmm_add_buf_txqueue(struct mwifiex_private *priv,
        } else {
                memcpy(ra, skb->data, ETH_ALEN);
                if (ra[0] & 0x01 || mwifiex_is_skb_mgmt_frame(skb))
-                       memset(ra, 0xff, ETH_ALEN);
+                       eth_broadcast_addr(ra);
                ra_list = mwifiex_wmm_get_queue_raptr(priv, tid_down, ra);
        }
 
@@ -942,14 +944,11 @@ mwifiex_wmm_get_highest_priolist_ptr(struct mwifiex_adapter *adapter,
        struct mwifiex_ra_list_tbl *ptr;
        struct mwifiex_tid_tbl *tid_ptr;
        atomic_t *hqp;
-       unsigned long flags_bss, flags_ra;
+       unsigned long flags_ra;
        int i, j;
 
        /* check the BSS with highest priority first */
        for (j = adapter->priv_num - 1; j >= 0; --j) {
-               spin_lock_irqsave(&adapter->bss_prio_tbl[j].bss_prio_lock,
-                                 flags_bss);
-
                /* iterate over BSS with the equal priority */
                list_for_each_entry(adapter->bss_prio_tbl[j].bss_prio_cur,
                                    &adapter->bss_prio_tbl[j].bss_prio_head,
@@ -985,19 +984,15 @@ mwifiex_wmm_get_highest_priolist_ptr(struct mwifiex_adapter *adapter,
                        }
                }
 
-               spin_unlock_irqrestore(&adapter->bss_prio_tbl[j].bss_prio_lock,
-                                      flags_bss);
        }
 
        return NULL;
 
 found:
-       /* holds bss_prio_lock / ra_list_spinlock */
+       /* holds ra_list_spinlock */
        if (atomic_read(hqp) > i)
                atomic_set(hqp, i);
        spin_unlock_irqrestore(&priv_tmp->wmm.ra_list_spinlock, flags_ra);
-       spin_unlock_irqrestore(&adapter->bss_prio_tbl[j].bss_prio_lock,
-                              flags_bss);
 
        *priv = priv_tmp;
        *tid = tos_to_tid[i];
@@ -1179,6 +1174,14 @@ mwifiex_send_processed_packet(struct mwifiex_private *priv,
 
        skb = skb_dequeue(&ptr->skb_head);
 
+       if (adapter->data_sent || adapter->tx_lock_flag) {
+               spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
+                                      ra_list_flags);
+               skb_queue_tail(&adapter->tx_data_q, skb);
+               atomic_inc(&adapter->tx_queued);
+               return;
+       }
+
        if (!skb_queue_empty(&ptr->skb_head))
                skb_next = skb_peek(&ptr->skb_head);
        else
@@ -1276,13 +1279,13 @@ mwifiex_dequeue_tx_packet(struct mwifiex_adapter *adapter)
        }
 
        if (!ptr->is_11n_enabled ||
-           mwifiex_is_ba_stream_setup(priv, ptr, tid) ||
-           priv->wps.session_enable) {
+               ptr->ba_status ||
+               priv->wps.session_enable) {
                if (ptr->is_11n_enabled &&
-                   mwifiex_is_ba_stream_setup(priv, ptr, tid) &&
-                   mwifiex_is_amsdu_in_ampdu_allowed(priv, ptr, tid) &&
-                   mwifiex_is_amsdu_allowed(priv, tid) &&
-                   mwifiex_is_11n_aggragation_possible(priv, ptr,
+                       ptr->ba_status &&
+                       ptr->amsdu_in_ampdu &&
+                       mwifiex_is_amsdu_allowed(priv, tid) &&
+                       mwifiex_is_11n_aggragation_possible(priv, ptr,
                                                        adapter->tx_buf_size))
                        mwifiex_11n_aggregate_pkt(priv, ptr, ptr_index, flags);
                        /* ra_list_spinlock has been freed in
@@ -1329,11 +1332,16 @@ void
 mwifiex_wmm_process_tx(struct mwifiex_adapter *adapter)
 {
        do {
-               /* Check if busy */
-               if (adapter->data_sent || adapter->tx_lock_flag)
-                       break;
-
                if (mwifiex_dequeue_tx_packet(adapter))
                        break;
+               if (adapter->iface_type != MWIFIEX_SDIO) {
+                       if (adapter->data_sent ||
+                           adapter->tx_lock_flag)
+                               break;
+               } else {
+                       if (atomic_read(&adapter->tx_queued) >=
+                           MWIFIEX_MAX_PKTS_TXQ)
+                               break;
+               }
        } while (!mwifiex_wmm_lists_empty(adapter));
 }
index 569bd73f33c5f001f93241fe1c81b40e172230e0..48ece0b355919d3c3a4278dfc1727fc391f5848d 100644 (file)
@@ -127,4 +127,6 @@ mwifiex_wmm_get_queue_raptr(struct mwifiex_private *priv, u8 tid,
                            const u8 *ra_addr);
 u8 mwifiex_wmm_downgrade_tid(struct mwifiex_private *priv, u32 tid);
 
+struct mwifiex_ra_list_tbl *mwifiex_wmm_get_ralist_node(struct mwifiex_private
+                                       *priv, u8 tid, const u8 *ra_addr);
 #endif /* !_MWIFIEX_WMM_H_ */
index f9b1218c761a4b7a187c239239365fe077649624..95921167b53f74a8577710a44afcd1f71b06142a 100644 (file)
@@ -1277,7 +1277,7 @@ static inline void mwl8k_save_beacon(struct ieee80211_hw *hw,
        struct mwl8k_priv *priv = hw->priv;
 
        priv->capture_beacon = false;
-       memset(priv->capture_bssid, 0, ETH_ALEN);
+       eth_zero_addr(priv->capture_bssid);
 
        /*
         * Use GFP_ATOMIC as rxq_process is called from
index 6d831d4d1b5f90f9c727166d4271da57a2cf245d..f6fa3f4e294f5dec2115e09021a487938accbff7 100644 (file)
@@ -2,7 +2,7 @@ config HERMES
        tristate "Hermes chipset 802.11b support (Orinoco/Prism2/Symbol)"
        depends on (PPC_PMAC || PCI || PCMCIA)
        depends on CFG80211
-       select CFG80211_WEXT
+       select CFG80211_WEXT_EXPORT
        select WIRELESS_EXT
        select WEXT_SPY
        select WEXT_PRIV
index 0ca8b1455cd93aba54bc8e845af53f9c3a82190c..77e6c53040a35f80cf8e8092b1c8ebcbcd4bf6f4 100644 (file)
@@ -228,7 +228,7 @@ MODULE_AUTHOR("Benjamin Herrenschmidt <benh@kernel.crashing.org>");
 MODULE_DESCRIPTION("Driver for the Apple Airport wireless card.");
 MODULE_LICENSE("Dual MPL/GPL");
 
-static struct of_device_id airport_match[] = {
+static const struct of_device_id airport_match[] = {
        {
        .name           = "radio",
        },
index 6abdaf0aa052253800697eb1631d100683ba2ec7..1d4dae422106c673351439e3f680feeff2adb06b 100644 (file)
@@ -168,7 +168,7 @@ static int orinoco_ioctl_setwap(struct net_device *dev,
        if (is_zero_ether_addr(ap_addr->sa_data) ||
            is_broadcast_ether_addr(ap_addr->sa_data)) {
                priv->bssid_fixed = 0;
-               memset(priv->desired_bssid, 0, ETH_ALEN);
+               eth_zero_addr(priv->desired_bssid);
 
                /* "off" means keep existing connection */
                if (ap_addr->sa_data[0] == 0) {
index 5367d510b22d7862360eaf04b7a994cca701e7b7..275408eaf95e6d54f006b853475229ea105ee9dd 100644 (file)
@@ -671,7 +671,7 @@ int p54_upload_key(struct p54_common *priv, u8 algo, int slot, u8 idx, u8 len,
        if (addr)
                memcpy(rxkey->mac, addr, ETH_ALEN);
        else
-               memset(rxkey->mac, ~0, ETH_ALEN);
+               eth_broadcast_addr(rxkey->mac);
 
        switch (algo) {
        case P54_CRYPTO_WEP:
index b9250d75d2539d827aec114a83e8056faec389aa..e79674f73dc5766cda5bfae2c1762a5aeb98f863 100644 (file)
@@ -182,7 +182,7 @@ static int p54_start(struct ieee80211_hw *dev)
        if (err)
                goto out;
 
-       memset(priv->bssid, ~0, ETH_ALEN);
+       eth_broadcast_addr(priv->bssid);
        priv->mode = NL80211_IFTYPE_MONITOR;
        err = p54_setup_mac(priv);
        if (err) {
@@ -274,8 +274,8 @@ static void p54_remove_interface(struct ieee80211_hw *dev,
                wait_for_completion_interruptible_timeout(&priv->beacon_comp, HZ);
        }
        priv->mode = NL80211_IFTYPE_MONITOR;
-       memset(priv->mac_addr, 0, ETH_ALEN);
-       memset(priv->bssid, 0, ETH_ALEN);
+       eth_zero_addr(priv->mac_addr);
+       eth_zero_addr(priv->bssid);
        p54_setup_mac(priv);
        mutex_unlock(&priv->conf_mutex);
 }
@@ -794,7 +794,7 @@ struct ieee80211_hw *p54_init_common(size_t priv_data_len)
        init_completion(&priv->beacon_comp);
        INIT_DELAYED_WORK(&priv->work, p54_work);
 
-       memset(&priv->mc_maclist[0], ~0, ETH_ALEN);
+       eth_broadcast_addr(priv->mc_maclist[0]);
        priv->curchan = NULL;
        p54_reset_stats(priv);
        return dev;
index 8330fa33e50b1e2f933f813ee187c407184780ae..477f86354dc5a7ff8a324717a16093cd9fc55f81 100644 (file)
@@ -808,7 +808,7 @@ static int ray_dev_init(struct net_device *dev)
 
        /* copy mac and broadcast addresses to linux device */
        memcpy(dev->dev_addr, &local->sparm.b4.a_mac_addr, ADDRLEN);
-       memset(dev->broadcast, 0xff, ETH_ALEN);
+       eth_broadcast_addr(dev->broadcast);
 
        dev_dbg(&link->dev, "ray_dev_init ending\n");
        return 0;
index 60d44ce9c0173b48af894b14264e2f761c34de44..d72ff8e7125d4525d1761c60d3828dba22b63296 100644 (file)
@@ -199,13 +199,13 @@ enum ndis_80211_pmkid_cand_list_flag_bits {
 
 struct ndis_80211_auth_request {
        __le32 length;
-       u8 bssid[6];
+       u8 bssid[ETH_ALEN];
        u8 padding[2];
        __le32 flags;
 } __packed;
 
 struct ndis_80211_pmkid_candidate {
-       u8 bssid[6];
+       u8 bssid[ETH_ALEN];
        u8 padding[2];
        __le32 flags;
 } __packed;
@@ -248,7 +248,7 @@ struct ndis_80211_conf {
 
 struct ndis_80211_bssid_ex {
        __le32 length;
-       u8 mac[6];
+       u8 mac[ETH_ALEN];
        u8 padding[2];
        struct ndis_80211_ssid ssid;
        __le32 privacy;
@@ -283,7 +283,7 @@ struct ndis_80211_key {
        __le32 size;
        __le32 index;
        __le32 length;
-       u8 bssid[6];
+       u8 bssid[ETH_ALEN];
        u8 padding[6];
        u8 rsc[8];
        u8 material[32];
@@ -292,7 +292,7 @@ struct ndis_80211_key {
 struct ndis_80211_remove_key {
        __le32 size;
        __le32 index;
-       u8 bssid[6];
+       u8 bssid[ETH_ALEN];
        u8 padding[2];
 } __packed;
 
@@ -310,7 +310,7 @@ struct ndis_80211_assoc_info {
        struct req_ie {
                __le16 capa;
                __le16 listen_interval;
-               u8 cur_ap_address[6];
+               u8 cur_ap_address[ETH_ALEN];
        } req_ie;
        __le32 req_ie_length;
        __le32 offset_req_ies;
@@ -338,7 +338,7 @@ struct ndis_80211_capability {
 } __packed;
 
 struct ndis_80211_bssid_info {
-       u8 bssid[6];
+       u8 bssid[ETH_ALEN];
        u8 pmkid[16];
 } __packed;
 
@@ -1037,7 +1037,7 @@ static int get_bssid(struct usbnet *usbdev, u8 bssid[ETH_ALEN])
                              bssid, &len);
 
        if (ret != 0)
-               memset(bssid, 0, ETH_ALEN);
+               eth_zero_addr(bssid);
 
        return ret;
 }
@@ -1391,7 +1391,7 @@ static int add_wep_key(struct usbnet *usbdev, const u8 *key, int key_len,
        priv->encr_keys[index].len = key_len;
        priv->encr_keys[index].cipher = cipher;
        memcpy(&priv->encr_keys[index].material, key, key_len);
-       memset(&priv->encr_keys[index].bssid, 0xff, ETH_ALEN);
+       eth_broadcast_addr(priv->encr_keys[index].bssid);
 
        return 0;
 }
@@ -1466,7 +1466,7 @@ static int add_wpa_key(struct usbnet *usbdev, const u8 *key, int key_len,
        } else {
                /* group key */
                if (priv->infra_mode == NDIS_80211_INFRA_ADHOC)
-                       memset(ndis_key.bssid, 0xff, ETH_ALEN);
+                       eth_broadcast_addr(ndis_key.bssid);
                else
                        get_bssid(usbdev, ndis_key.bssid);
        }
@@ -1486,7 +1486,7 @@ static int add_wpa_key(struct usbnet *usbdev, const u8 *key, int key_len,
        if (flags & NDIS_80211_ADDKEY_PAIRWISE_KEY)
                memcpy(&priv->encr_keys[index].bssid, ndis_key.bssid, ETH_ALEN);
        else
-               memset(&priv->encr_keys[index].bssid, 0xff, ETH_ALEN);
+               eth_broadcast_addr(priv->encr_keys[index].bssid);
 
        if (flags & NDIS_80211_ADDKEY_TRANSMIT_KEY)
                priv->encr_tx_key_index = index;
@@ -2280,7 +2280,7 @@ static int rndis_disconnect(struct wiphy *wiphy, struct net_device *dev,
        netdev_dbg(usbdev->net, "cfg80211.disconnect(%d)\n", reason_code);
 
        priv->connected = false;
-       memset(priv->bssid, 0, ETH_ALEN);
+       eth_zero_addr(priv->bssid);
 
        return deauthenticate(usbdev);
 }
@@ -2392,7 +2392,7 @@ static int rndis_leave_ibss(struct wiphy *wiphy, struct net_device *dev)
        netdev_dbg(usbdev->net, "cfg80211.leave_ibss()\n");
 
        priv->connected = false;
-       memset(priv->bssid, 0, ETH_ALEN);
+       eth_zero_addr(priv->bssid);
 
        return deauthenticate(usbdev);
 }
@@ -2857,7 +2857,7 @@ static void rndis_wlan_do_link_down_work(struct usbnet *usbdev)
 
        if (priv->connected) {
                priv->connected = false;
-               memset(priv->bssid, 0, ETH_ALEN);
+               eth_zero_addr(priv->bssid);
 
                deauthenticate(usbdev);
 
index 8444313eabe2bbc1bdcb660c5a82bff17f3fdb90..6ec2466b52b6ccd686341e2ed53df35f0b0370e0 100644 (file)
@@ -233,6 +233,7 @@ static int rt2800usb_autorun_detect(struct rt2x00_dev *rt2x00dev)
 {
        __le32 *reg;
        u32 fw_mode;
+       int ret;
 
        reg = kmalloc(sizeof(*reg), GFP_KERNEL);
        if (reg == NULL)
@@ -242,11 +243,14 @@ static int rt2800usb_autorun_detect(struct rt2x00_dev *rt2x00dev)
         * magic value USB_MODE_AUTORUN (0x11) to the device, thus the
         * returned value would be invalid.
         */
-       rt2x00usb_vendor_request(rt2x00dev, USB_DEVICE_MODE,
-                                USB_VENDOR_REQUEST_IN, 0, USB_MODE_AUTORUN,
-                                reg, sizeof(*reg), REGISTER_TIMEOUT_FIRMWARE);
+       ret = rt2x00usb_vendor_request(rt2x00dev, USB_DEVICE_MODE,
+                                      USB_VENDOR_REQUEST_IN, 0,
+                                      USB_MODE_AUTORUN, reg, sizeof(*reg),
+                                      REGISTER_TIMEOUT_FIRMWARE);
        fw_mode = le32_to_cpu(*reg);
        kfree(reg);
+       if (ret < 0)
+               return ret;
 
        if ((fw_mode & 0x00000003) == 2)
                return 1;
@@ -289,6 +293,7 @@ static int rt2800usb_write_firmware(struct rt2x00_dev *rt2x00dev,
        if (retval) {
                rt2x00_info(rt2x00dev,
                            "Firmware loading not required - NIC in AutoRun mode\n");
+               __clear_bit(REQUIRE_FIRMWARE, &rt2x00dev->cap_flags);
        } else {
                rt2x00usb_register_multiwrite(rt2x00dev, FIRMWARE_IMAGE_BASE,
                                              data + offset, length);
@@ -374,7 +379,6 @@ static int rt2800usb_enable_radio(struct rt2x00_dev *rt2x00dev)
 static void rt2800usb_disable_radio(struct rt2x00_dev *rt2x00dev)
 {
        rt2800_disable_radio(rt2x00dev);
-       rt2x00usb_disable_radio(rt2x00dev);
 }
 
 static int rt2800usb_set_state(struct rt2x00_dev *rt2x00dev,
@@ -1040,6 +1044,7 @@ static struct usb_device_id rt2800usb_device_table[] = {
        { USB_DEVICE(0x07d1, 0x3c17) },
        { USB_DEVICE(0x2001, 0x3317) },
        { USB_DEVICE(0x2001, 0x3c1b) },
+       { USB_DEVICE(0x2001, 0x3c25) },
        /* Draytek */
        { USB_DEVICE(0x07fa, 0x7712) },
        /* DVICO */
index 8f85fbd5f237eff576e343009d1aeefe26a39f02..569363da00a2999fca8cac2d425347d759a62b08 100644 (file)
@@ -199,7 +199,7 @@ static inline void rt2x00usb_register_read(struct rt2x00_dev *rt2x00dev,
                                           const unsigned int offset,
                                           u32 *value)
 {
-       __le32 reg;
+       __le32 reg = 0;
        rt2x00usb_vendor_request_buff(rt2x00dev, USB_MULTI_READ,
                                      USB_VENDOR_REQUEST_IN, offset,
                                      &reg, sizeof(reg));
@@ -219,7 +219,7 @@ static inline void rt2x00usb_register_read_lock(struct rt2x00_dev *rt2x00dev,
                                                const unsigned int offset,
                                                u32 *value)
 {
-       __le32 reg;
+       __le32 reg = 0;
        rt2x00usb_vendor_req_buff_lock(rt2x00dev, USB_MULTI_READ,
                                       USB_VENDOR_REQUEST_IN, offset,
                                       &reg, sizeof(reg), REGISTER_TIMEOUT);
index c6cb49c3ee32cff431bc679cf3a09ab92d1de4e0..ff9a4bfd45154aaaf37055d82eba1d32b0276a83 100644 (file)
@@ -45,9 +45,6 @@ enum ap_peer {
 #define RTL_TX_DESC_SIZE       32
 #define RTL_TX_HEADER_SIZE     (RTL_TX_DESC_SIZE + RTL_TX_DUMMY_SIZE)
 
-#define HT_AMSDU_SIZE_4K       3839
-#define HT_AMSDU_SIZE_8K       7935
-
 #define MAX_BIT_RATE_40MHZ_MCS15       300     /* Mbps */
 #define MAX_BIT_RATE_40MHZ_MCS7                150     /* Mbps */
 
@@ -61,9 +58,6 @@ enum ap_peer {
 #define MAX_BIT_RATE_LONG_GI_1NSS_80MHZ_MCS9   390     /* Mbps */
 #define MAX_BIT_RATE_LONG_GI_1NSS_80MHZ_MCS7   293     /* Mbps */
 
-#define RTL_RATE_COUNT_LEGACY          12
-#define RTL_CHANNEL_COUNT              14
-
 #define FRAME_OFFSET_FRAME_CONTROL     0
 #define FRAME_OFFSET_DURATION          2
 #define FRAME_OFFSET_ADDRESS1          4
@@ -129,7 +123,6 @@ bool rtl_tx_mgmt_proc(struct ieee80211_hw *hw, struct sk_buff *skb);
 u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx);
 
 void rtl_beacon_statistic(struct ieee80211_hw *hw, struct sk_buff *skb);
-void rtl_watch_dog_timer_callback(unsigned long data);
 int rtl_tx_agg_start(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
        struct ieee80211_sta *sta, u16 tid, u16 *ssn);
 int rtl_tx_agg_stop(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
index 35508087c0c5ed4f80a969dfffd989c5a6d9eeed..e2e647d511c17b56786e886c83f71de140078dbd 100644 (file)
 
 #define CAM_CONTENT_COUNT                              8
 
-#define CFG_DEFAULT_KEY                                        BIT(5)
 #define CFG_VALID                                      BIT(15)
 
 #define PAIRWISE_KEYIDX                                        0
 #define CAM_PAIRWISE_KEY_POSITION                      4
 
-#define        CAM_CONFIG_USEDK                                1
 #define        CAM_CONFIG_NO_USEDK                             0
 
 void rtl_cam_reset_all_entry(struct ieee80211_hw *hw);
index a31a12775f1a0ff114c63e9ee17dfddc31eee273..3b3a88b53b119909112a806ee71ab4d4bfa67a79 100644 (file)
@@ -195,7 +195,7 @@ static void rtl_op_stop(struct ieee80211_hw *hw)
        if (!(support_remote_wakeup &&
              rtlhal->enter_pnp_sleep)) {
                mac->link_state = MAC80211_NOLINK;
-               memset(mac->bssid, 0, 6);
+               eth_zero_addr(mac->bssid);
                mac->vendor = PEER_UNKNOWN;
 
                /* reset sec info */
@@ -357,7 +357,7 @@ static void rtl_op_remove_interface(struct ieee80211_hw *hw,
        mac->p2p = 0;
        mac->vif = NULL;
        mac->link_state = MAC80211_NOLINK;
-       memset(mac->bssid, 0, ETH_ALEN);
+       eth_zero_addr(mac->bssid);
        mac->vendor = PEER_UNKNOWN;
        mac->opmode = NL80211_IFTYPE_UNSPECIFIED;
        rtlpriv->cfg->ops->set_network_type(hw, mac->opmode);
@@ -1157,7 +1157,7 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
                        if (ppsc->p2p_ps_info.p2p_ps_mode > P2P_PS_NONE)
                                rtl_p2p_ps_cmd(hw, P2P_PS_DISABLE);
                        mac->link_state = MAC80211_NOLINK;
-                       memset(mac->bssid, 0, ETH_ALEN);
+                       eth_zero_addr(mac->bssid);
                        mac->vendor = PEER_UNKNOWN;
                        mac->mode = 0;
 
index 7b64e34f421e0d4ec41c072f07ed096c7d53ce83..82733c6b8c46e66d79a6d1a74cf1903214744ef7 100644 (file)
@@ -33,8 +33,6 @@
        FIF_FCSFAIL | \
        FIF_BCN_PRBRESP_PROMISC)
 
-#define RTL_SUPPORTED_CTRL_FILTER      0xFF
-
 #define DM_DIG_THRESH_HIGH             40
 #define DM_DIG_THRESH_LOW              35
 #define DM_FALSEALARM_THRESH_LOW       400
index fdab8240a5d79da027f641ad52c170a658febdb7..be02e7894c61d20bbb0eedd70d539d0da66120ff 100644 (file)
 #define PG_STATE_WORD_3                        0x10
 #define PG_STATE_DATA                  0x20
 
-#define PG_SWBYTE_H                    0x01
-#define PG_SWBYTE_L                    0x02
-
-#define _POWERON_DELAY_
-#define _PRE_EXECUTE_READ_CMD_
-
 #define EFUSE_REPEAT_THRESHOLD_                3
 #define EFUSE_ERROE_HANDLE             1
 
index d9ea9d0c79a526694210f7f9f46c7e9747a31285..0532b985244445d81f925dfd960950688a95de44 100644 (file)
 #ifndef __RTL92C_DEF_H__
 #define __RTL92C_DEF_H__
 
-#define HAL_RETRY_LIMIT_INFRA                          48
-#define HAL_RETRY_LIMIT_AP_ADHOC                       7
-
-#define RESET_DELAY_8185                               20
-
-#define RT_IBSS_INT_MASKS      (IMR_BCNINT | IMR_TBDOK | IMR_TBDER)
-#define RT_AC_INT_MASKS                (IMR_VIDOK | IMR_VODOK | IMR_BEDOK|IMR_BKDOK)
-
-#define NUM_OF_FIRMWARE_QUEUE                          10
-#define NUM_OF_PAGES_IN_FW                             0x100
-#define NUM_OF_PAGE_IN_FW_QUEUE_BK                     0x07
-#define NUM_OF_PAGE_IN_FW_QUEUE_BE                     0x07
-#define NUM_OF_PAGE_IN_FW_QUEUE_VI                     0x07
-#define NUM_OF_PAGE_IN_FW_QUEUE_VO                     0x07
-#define NUM_OF_PAGE_IN_FW_QUEUE_HCCA                   0x0
-#define NUM_OF_PAGE_IN_FW_QUEUE_CMD                    0x0
-#define NUM_OF_PAGE_IN_FW_QUEUE_MGNT                   0x02
-#define NUM_OF_PAGE_IN_FW_QUEUE_HIGH                   0x02
-#define NUM_OF_PAGE_IN_FW_QUEUE_BCN                    0x2
-#define NUM_OF_PAGE_IN_FW_QUEUE_PUB                    0xA1
-
-#define NUM_OF_PAGE_IN_FW_QUEUE_BK_DTM                 0x026
-#define NUM_OF_PAGE_IN_FW_QUEUE_BE_DTM                 0x048
-#define NUM_OF_PAGE_IN_FW_QUEUE_VI_DTM                 0x048
-#define NUM_OF_PAGE_IN_FW_QUEUE_VO_DTM                 0x026
-#define NUM_OF_PAGE_IN_FW_QUEUE_PUB_DTM                        0x00
-
-#define MAX_LINES_HWCONFIG_TXT                         1000
-#define MAX_BYTES_LINE_HWCONFIG_TXT                    256
-
-#define SW_THREE_WIRE                                  0
-#define HW_THREE_WIRE                                  2
-
-#define BT_DEMO_BOARD                                  0
-#define BT_QA_BOARD                                    1
-#define BT_FPGA                                                2
-
 #define HAL_PRIME_CHNL_OFFSET_DONT_CARE                        0
 #define HAL_PRIME_CHNL_OFFSET_LOWER                    1
 #define HAL_PRIME_CHNL_OFFSET_UPPER                    2
 
-#define MAX_H2C_QUEUE_NUM                              10
-
 #define RX_MPDU_QUEUE                                  0
 #define RX_CMD_QUEUE                                   1
-#define RX_MAX_QUEUE                                   2
-#define AC2QUEUEID(_AC)                                        (_AC)
 
 #define        C2H_RX_CMD_HDR_LEN                              8
 #define        GET_C2H_CMD_CMD_LEN(__prxhdr)                   \
index f2b9713c456e14e8a89726ec989113a4635a1767..86ce5b1930e6d2824b66f7d5c95210c59fca6cb7 100644 (file)
@@ -30,6 +30,7 @@
 #include "../cam.h"
 #include "../ps.h"
 #include "../pci.h"
+#include "../pwrseqcmd.h"
 #include "reg.h"
 #include "def.h"
 #include "phy.h"
@@ -566,7 +567,7 @@ void rtl88ee_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
                                acm_ctrl &= (~ACMHW_VIQEN);
                                break;
                        case AC3_VO:
-                               acm_ctrl &= (~ACMHW_BEQEN);
+                               acm_ctrl &= (~ACMHW_VOQEN);
                                break;
                        default:
                                RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
@@ -885,7 +886,7 @@ static bool _rtl88ee_init_mac(struct ieee80211_hw *hw)
 
        rtl_write_word(rtlpriv, REG_CR, 0x2ff);
        rtl_write_byte(rtlpriv, REG_CR+1, 0x06);
-       rtl_write_byte(rtlpriv, REG_CR+2, 0x00);
+       rtl_write_byte(rtlpriv, MSR, 0x00);
 
        if (!rtlhal->mac_func_enable) {
                if (_rtl88ee_llt_table_init(hw) == false) {
@@ -1277,7 +1278,7 @@ static int _rtl88ee_set_media_status(struct ieee80211_hw *hw,
                         mode);
        }
 
-       rtl_write_byte(rtlpriv, (MSR), bt_msr | mode);
+       rtl_write_byte(rtlpriv, MSR, bt_msr | mode);
        rtlpriv->cfg->ops->led_control(hw, ledaction);
        if (mode == MSR_AP)
                rtl_write_byte(rtlpriv, REG_BCNTCFG + 1, 0x00);
index 3f6c59cdeababd42c1ab5692ffe15c9d2b897956..a2bb02c7b837679455daf529fc84b5214068a33b 100644 (file)
@@ -452,9 +452,10 @@ static void handle_branch1(struct ieee80211_hw *hw, u16 arraylen,
                                READ_NEXT_PAIR(v1, v2, i);
                                while (v2 != 0xDEAD &&
                                       v2 != 0xCDEF &&
-                                      v2 != 0xCDCD && i < arraylen - 2)
+                                      v2 != 0xCDCD && i < arraylen - 2) {
                                        _rtl8188e_config_bb_reg(hw, v1, v2);
                                        READ_NEXT_PAIR(v1, v2, i);
+                               }
 
                                while (v2 != 0xDEAD && i < arraylen - 2)
                                        READ_NEXT_PAIR(v1, v2, i);
index 5c1472d88fd4496f123d43675f506c137a2dd92b..0eca030e32383d3d00342fe2ff18cab5d219675e 100644 (file)
@@ -27,7 +27,6 @@
 #define __RTL92C_RF_H__
 
 #define RF6052_MAX_TX_PWR              0x3F
-#define RF6052_MAX_REG                 0x3F
 
 void rtl88e_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw,
                                     u8 bandwidth);
index 9b660df6fd712fd517638723bb0616b17b9df257..690a7a1675e2019c16932e08b4cfa86731e03566 100644 (file)
 #ifndef __RTL92C_DEF_H__
 #define __RTL92C_DEF_H__
 
-#define HAL_RETRY_LIMIT_INFRA                          48
-#define HAL_RETRY_LIMIT_AP_ADHOC                       7
-
 #define        PHY_RSSI_SLID_WIN_MAX                           100
 #define        PHY_LINKQUALITY_SLID_WIN_MAX                    20
 #define        PHY_BEACON_RSSI_SLID_WIN_MAX                    10
 
-#define RESET_DELAY_8185                               20
-
-#define RT_IBSS_INT_MASKS      (IMR_BCNINT | IMR_TBDOK | IMR_TBDER)
-#define RT_AC_INT_MASKS                (IMR_VIDOK | IMR_VODOK | IMR_BEDOK|IMR_BKDOK)
-
-#define NUM_OF_FIRMWARE_QUEUE                          10
-#define NUM_OF_PAGES_IN_FW                             0x100
-#define NUM_OF_PAGE_IN_FW_QUEUE_BK                     0x07
-#define NUM_OF_PAGE_IN_FW_QUEUE_BE                     0x07
-#define NUM_OF_PAGE_IN_FW_QUEUE_VI                     0x07
-#define NUM_OF_PAGE_IN_FW_QUEUE_VO                     0x07
-#define NUM_OF_PAGE_IN_FW_QUEUE_HCCA                   0x0
-#define NUM_OF_PAGE_IN_FW_QUEUE_CMD                    0x0
-#define NUM_OF_PAGE_IN_FW_QUEUE_MGNT                   0x02
-#define NUM_OF_PAGE_IN_FW_QUEUE_HIGH                   0x02
-#define NUM_OF_PAGE_IN_FW_QUEUE_BCN                    0x2
-#define NUM_OF_PAGE_IN_FW_QUEUE_PUB                    0xA1
-
-#define NUM_OF_PAGE_IN_FW_QUEUE_BK_DTM                 0x026
-#define NUM_OF_PAGE_IN_FW_QUEUE_BE_DTM                 0x048
-#define NUM_OF_PAGE_IN_FW_QUEUE_VI_DTM                 0x048
-#define NUM_OF_PAGE_IN_FW_QUEUE_VO_DTM                 0x026
-#define NUM_OF_PAGE_IN_FW_QUEUE_PUB_DTM                        0x00
-
-#define MAX_LINES_HWCONFIG_TXT                         1000
-#define MAX_BYTES_LINE_HWCONFIG_TXT                    256
-
-#define SW_THREE_WIRE                                  0
-#define HW_THREE_WIRE                                  2
-
-#define BT_DEMO_BOARD                                  0
-#define BT_QA_BOARD                                    1
-#define BT_FPGA                                                2
-
 #define RX_SMOOTH_FACTOR                               20
 
 #define HAL_PRIME_CHNL_OFFSET_DONT_CARE                        0
 #define HAL_PRIME_CHNL_OFFSET_LOWER                    1
 #define HAL_PRIME_CHNL_OFFSET_UPPER                    2
 
-#define MAX_H2C_QUEUE_NUM                              10
-
 #define RX_MPDU_QUEUE                                  0
 #define RX_CMD_QUEUE                                   1
-#define RX_MAX_QUEUE                                   2
-#define AC2QUEUEID(_AC)                                        (_AC)
 
 #define        C2H_RX_CMD_HDR_LEN                              8
 #define        GET_C2H_CMD_CMD_LEN(__prxhdr)           \
index 303b299376c95b46da33dbec2d1159cec746a93c..04eb5c3f84640d4702402fc7d1dfeeb79f790c32 100644 (file)
@@ -363,7 +363,7 @@ void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
                                        acm_ctrl &= (~AcmHw_ViqEn);
                                        break;
                                case AC3_VO:
-                                       acm_ctrl &= (~AcmHw_BeqEn);
+                                       acm_ctrl &= (~AcmHw_VoqEn);
                                        break;
                                default:
                                        RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
index d8fe68b389d213e17b133b7044fec647b315f2b8..ebd72cae10b6ecf680deb765a9ce26e779d7c6fc 100644 (file)
@@ -31,7 +31,6 @@
 #define __RTL92C_RF_H__
 
 #define RF6052_MAX_TX_PWR              0x3F
-#define RF6052_MAX_REG                 0x3F
 #define RF6052_MAX_PATH                        2
 
 void rtl92ce_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, u8 bandwidth);
index fe4b699a12f5b4d6bdc9d883584d356e9670e24e..d310d55d800efd584f9dd9a6d8e143539c0c777c 100644 (file)
@@ -1364,7 +1364,7 @@ static int _rtl92cu_set_media_status(struct ieee80211_hw *hw,
                         "Network type %d not supported!\n", type);
                goto error_out;
        }
-       rtl_write_byte(rtlpriv, (MSR), bt_msr);
+       rtl_write_byte(rtlpriv, MSR, bt_msr);
        rtlpriv->cfg->ops->led_control(hw, ledaction);
        if ((bt_msr & MSR_MASK) == MSR_AP)
                rtl_write_byte(rtlpriv, REG_BCNTCFG + 1, 0x00);
@@ -1471,8 +1471,7 @@ static void _InitBeaconParameters(struct ieee80211_hw *hw)
                rtl_write_word(rtlpriv, REG_BCNTCFG, 0x66FF);
 }
 
-static void _beacon_function_enable(struct ieee80211_hw *hw, bool Enable,
-                                   bool Linked)
+static void _beacon_function_enable(struct ieee80211_hw *hw)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
 
@@ -1517,7 +1516,7 @@ void rtl92cu_set_beacon_related_registers(struct ieee80211_hw *hw)
                rtl_write_byte(rtlpriv, REG_RXTSF_OFFSET_CCK, 0x50);
                rtl_write_byte(rtlpriv, REG_RXTSF_OFFSET_OFDM, 0x50);
        }
-       _beacon_function_enable(hw, true, true);
+       _beacon_function_enable(hw);
 }
 
 void rtl92cu_set_beacon_interval(struct ieee80211_hw *hw)
@@ -1589,6 +1588,8 @@ void rtl92cu_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
        case HW_VAR_DATA_FILTER:
                *((u16 *) (val)) = rtl_read_word(rtlpriv, REG_RXFLTMAP2);
                break;
+       case HAL_DEF_WOWLAN:
+               break;
        default:
                RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
                         "switch case not processed\n");
@@ -1871,7 +1872,7 @@ void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
                                        acm_ctrl &= (~AcmHw_ViqEn);
                                        break;
                                case AC3_VO:
-                                       acm_ctrl &= (~AcmHw_BeqEn);
+                                       acm_ctrl &= (~AcmHw_VoqEn);
                                        break;
                                default:
                                        RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
index c1e33b0228c0176e1ae31f5a8f99085a4f840397..67588083e6cc7ae9009c57a1fd8b07bd178c6e4d 100644 (file)
@@ -32,8 +32,6 @@
 
 #define H2C_RA_MASK    6
 
-#define LLT_POLLING_LLT_THRESHOLD              20
-#define LLT_POLLING_READY_TIMEOUT_COUNT                100
 #define LLT_LAST_ENTRY_OF_TX_PKT_BUFFER                255
 
 #define RX_PAGE_SIZE_REG_VALUE                 PBP_128
index 133e395b7401fc80f286792441c72fbb7f56a397..adb810794eef71e27080cd9590a07911eba5123b 100644 (file)
@@ -497,7 +497,7 @@ int rtl92c_set_network_type(struct ieee80211_hw *hw, enum nl80211_iftype type)
                         "Network type %d not supported!\n", type);
                return -EOPNOTSUPP;
        }
-       rtl_write_byte(rtlpriv, (REG_CR + 2), value);
+       rtl_write_byte(rtlpriv, MSR, value);
        return 0;
 }
 
index 11b439d6b67167c11e1529919bc5f07dfe96e060..6f987de5b4413ee74b71fbb7ddef9cb39929954a 100644 (file)
@@ -31,7 +31,6 @@
 #define __RTL92CU_RF_H__
 
 #define RF6052_MAX_TX_PWR              0x3F
-#define RF6052_MAX_REG                 0x3F
 #define RF6052_MAX_PATH                        2
 
 void rtl92cu_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, u8 bandwidth);
index 90a714c189a8e5694fa0c84ec8868739b436135f..23806c243a53174db28aa1b0a99e5a01119206d7 100644 (file)
@@ -321,6 +321,7 @@ static struct usb_device_id rtl8192c_usb_ids[] = {
        {RTL_USB_DEVICE(0x07b8, 0x8188, rtl92cu_hal_cfg)}, /*Abocom - Abocom*/
        {RTL_USB_DEVICE(0x07b8, 0x8189, rtl92cu_hal_cfg)}, /*Funai - Abocom*/
        {RTL_USB_DEVICE(0x0846, 0x9041, rtl92cu_hal_cfg)}, /*NetGear WNA1000M*/
+       {RTL_USB_DEVICE(0x0b05, 0x17ba, rtl92cu_hal_cfg)}, /*ASUS-Edimax*/
        {RTL_USB_DEVICE(0x0bda, 0x5088, rtl92cu_hal_cfg)}, /*Thinkware-CC&C*/
        {RTL_USB_DEVICE(0x0df6, 0x0052, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/
        {RTL_USB_DEVICE(0x0df6, 0x005c, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/
@@ -377,6 +378,7 @@ static struct usb_device_id rtl8192c_usb_ids[] = {
        {RTL_USB_DEVICE(0x2001, 0x3307, rtl92cu_hal_cfg)}, /*D-Link-Cameo*/
        {RTL_USB_DEVICE(0x2001, 0x3309, rtl92cu_hal_cfg)}, /*D-Link-Alpha*/
        {RTL_USB_DEVICE(0x2001, 0x330a, rtl92cu_hal_cfg)}, /*D-Link-Alpha*/
+       {RTL_USB_DEVICE(0x2001, 0x330d, rtl92cu_hal_cfg)}, /*D-Link DWA-131 */
        {RTL_USB_DEVICE(0x2019, 0xab2b, rtl92cu_hal_cfg)}, /*Planex -Abocom*/
        {RTL_USB_DEVICE(0x20f4, 0x624d, rtl92cu_hal_cfg)}, /*TRENDNet*/
        {RTL_USB_DEVICE(0x2357, 0x0100, rtl92cu_hal_cfg)}, /*TP-Link WN8200ND*/
index 939c905f547fd179c8e1af49b266893bdbc14694..0a443ed17cf4760dcc38475741450e6328196cce 100644 (file)
 #define        MAX_MSS_DENSITY_1T                              0x0A
 
 #define RF6052_MAX_TX_PWR                              0x3F
-#define RF6052_MAX_REG                                 0x3F
 #define RF6052_MAX_PATH                                        2
 
-#define HAL_RETRY_LIMIT_INFRA                          48
-#define HAL_RETRY_LIMIT_AP_ADHOC                       7
-
 #define        PHY_RSSI_SLID_WIN_MAX                           100
 #define        PHY_LINKQUALITY_SLID_WIN_MAX                    20
 #define        PHY_BEACON_RSSI_SLID_WIN_MAX                    10
 
-#define RESET_DELAY_8185                               20
-
-#define RT_IBSS_INT_MASKS      (IMR_BCNINT | IMR_TBDOK | IMR_TBDER)
 #define RT_AC_INT_MASKS                (IMR_VIDOK | IMR_VODOK | IMR_BEDOK|IMR_BKDOK)
 
-#define NUM_OF_FIRMWARE_QUEUE                          10
-#define NUM_OF_PAGES_IN_FW                             0x100
-#define NUM_OF_PAGE_IN_FW_QUEUE_BK                     0x07
-#define NUM_OF_PAGE_IN_FW_QUEUE_BE                     0x07
-#define NUM_OF_PAGE_IN_FW_QUEUE_VI                     0x07
-#define NUM_OF_PAGE_IN_FW_QUEUE_VO                     0x07
-#define NUM_OF_PAGE_IN_FW_QUEUE_HCCA                   0x0
-#define NUM_OF_PAGE_IN_FW_QUEUE_CMD                    0x0
-#define NUM_OF_PAGE_IN_FW_QUEUE_MGNT                   0x02
-#define NUM_OF_PAGE_IN_FW_QUEUE_HIGH                   0x02
-#define NUM_OF_PAGE_IN_FW_QUEUE_BCN                    0x2
-#define NUM_OF_PAGE_IN_FW_QUEUE_PUB                    0xA1
-
-#define NUM_OF_PAGE_IN_FW_QUEUE_BK_DTM                 0x026
-#define NUM_OF_PAGE_IN_FW_QUEUE_BE_DTM                 0x048
-#define NUM_OF_PAGE_IN_FW_QUEUE_VI_DTM                 0x048
-#define NUM_OF_PAGE_IN_FW_QUEUE_VO_DTM                 0x026
-#define NUM_OF_PAGE_IN_FW_QUEUE_PUB_DTM                        0x00
-
-#define MAX_LINES_HWCONFIG_TXT                         1000
-#define MAX_BYTES_LINE_HWCONFIG_TXT                    256
-
-#define SW_THREE_WIRE                                  0
-#define HW_THREE_WIRE                                  2
-
-#define BT_DEMO_BOARD                                  0
-#define BT_QA_BOARD                                    1
-#define BT_FPGA                                                2
-
 #define RX_SMOOTH_FACTOR                               20
 
 #define HAL_PRIME_CHNL_OFFSET_DONT_CARE                        0
 #define HAL_PRIME_CHNL_OFFSET_LOWER                    1
 #define HAL_PRIME_CHNL_OFFSET_UPPER                    2
 
-#define MAX_H2C_QUEUE_NUM                              10
-
 #define RX_MPDU_QUEUE                                  0
 #define RX_CMD_QUEUE                                   1
-#define RX_MAX_QUEUE                                   2
 
 #define        C2H_RX_CMD_HDR_LEN                              8
 #define        GET_C2H_CMD_CMD_LEN(__prxhdr)                   \
index 01bcc2d218dc5d215a1f0e6125699a007d94b574..f49b60d314502d7565eea0badb7666b8e5e76a55 100644 (file)
@@ -1126,7 +1126,7 @@ static int _rtl92de_set_media_status(struct ieee80211_hw *hw,
                break;
 
        }
-       rtl_write_byte(rtlpriv, REG_CR + 2, bt_msr);
+       rtl_write_byte(rtlpriv, MSR, bt_msr);
        rtlpriv->cfg->ops->led_control(hw, ledaction);
        if ((bt_msr & MSR_MASK) == MSR_AP)
                rtl_write_byte(rtlpriv, REG_BCNTCFG + 1, 0x00);
index b461b3128da581bdb17ee53b1e5e9238472ca95b..da0a6125f314b7c582eefb5ea297fd2efd593ca0 100644 (file)
@@ -562,7 +562,7 @@ void rtl92ee_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
                                acm_ctrl &= (~ACMHW_VIQEN);
                                break;
                        case AC3_VO:
-                               acm_ctrl &= (~ACMHW_BEQEN);
+                               acm_ctrl &= (~ACMHW_VOQEN);
                                break;
                        default:
                                RT_TRACE(rtlpriv, COMP_ERR, DBG_DMESG,
@@ -1510,7 +1510,7 @@ static int _rtl92ee_set_media_status(struct ieee80211_hw *hw,
                         mode);
        }
 
-       rtl_write_byte(rtlpriv, (MSR), bt_msr | mode);
+       rtl_write_byte(rtlpriv, MSR, bt_msr | mode);
        rtlpriv->cfg->ops->led_control(hw, ledaction);
        if (mode == MSR_AP)
                rtl_write_byte(rtlpriv, REG_BCNTCFG + 1, 0x00);
index 8bdeed3c064ebb43e3141bb4305d4832c7f4e3c3..039c0133ad6b5a1b8d6869ded381454a886eb43d 100644 (file)
@@ -27,7 +27,6 @@
 #define __RTL92E_RF_H__
 
 #define RF6052_MAX_TX_PWR              0x3F
-#define RF6052_MAX_REG                 0x3F
 
 void rtl92ee_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw,
                                      u8 bandwidth);
index ef87c09b77d0c877ad10d3beddbc8045f0d9de43..41466f957cdc055b4f00588ab95f1281c8ea2d77 100644 (file)
@@ -31,7 +31,6 @@
 
 #define RX_MPDU_QUEUE                          0
 #define RX_CMD_QUEUE                           1
-#define RX_MAX_QUEUE                           2
 
 #define SHORT_SLOT_TIME                                9
 #define NON_SHORT_SLOT_TIME                    20
index 5761d5b49e39e4e9cb2ce703f39578ad1cf99e74..12b0978ba4faf26161e57f34d61644149544fa9d 100644 (file)
@@ -293,7 +293,7 @@ void rtl92se_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
                                        acm_ctrl &= (~AcmHw_ViqEn);
                                        break;
                                case AC3_VO:
-                                       acm_ctrl &= (~AcmHw_BeqEn);
+                                       acm_ctrl &= (~AcmHw_VoqEn);
                                        break;
                                default:
                                        RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
@@ -1204,7 +1204,7 @@ static int _rtl92se_set_media_status(struct ieee80211_hw *hw,
        if (type != NL80211_IFTYPE_AP &&
            rtlpriv->mac80211.link_state < MAC80211_LINKED)
                bt_msr = rtl_read_byte(rtlpriv, MSR) & ~MSR_LINK_MASK;
-       rtl_write_byte(rtlpriv, (MSR), bt_msr);
+       rtl_write_byte(rtlpriv, MSR, bt_msr);
 
        temp = rtl_read_dword(rtlpriv, TCR);
        rtl_write_dword(rtlpriv, TCR, temp & (~BIT(8)));
index 94bdd4bbca5dfcfb6aa537fd70365180df2dd8bf..bcdf2273688ebad1224e99166036c75c859adfbe 100644 (file)
 #ifndef __RTL8723E_DEF_H__
 #define __RTL8723E_DEF_H__
 
-#define HAL_RETRY_LIMIT_INFRA                          48
-#define HAL_RETRY_LIMIT_AP_ADHOC                       7
-
-#define RESET_DELAY_8185                                       20
-
-#define RT_IBSS_INT_MASKS      (IMR_BCNINT | IMR_TBDOK | IMR_TBDER)
-#define RT_AC_INT_MASKS                (IMR_VIDOK | IMR_VODOK | IMR_BEDOK|IMR_BKDOK)
-
-#define NUM_OF_FIRMWARE_QUEUE                          10
-#define NUM_OF_PAGES_IN_FW                                     0x100
-#define NUM_OF_PAGE_IN_FW_QUEUE_BK                     0x07
-#define NUM_OF_PAGE_IN_FW_QUEUE_BE                     0x07
-#define NUM_OF_PAGE_IN_FW_QUEUE_VI                     0x07
-#define NUM_OF_PAGE_IN_FW_QUEUE_VO                     0x07
-#define NUM_OF_PAGE_IN_FW_QUEUE_HCCA           0x0
-#define NUM_OF_PAGE_IN_FW_QUEUE_CMD                    0x0
-#define NUM_OF_PAGE_IN_FW_QUEUE_MGNT           0x02
-#define NUM_OF_PAGE_IN_FW_QUEUE_HIGH           0x02
-#define NUM_OF_PAGE_IN_FW_QUEUE_BCN                    0x2
-#define NUM_OF_PAGE_IN_FW_QUEUE_PUB                    0xA1
-
-#define NUM_OF_PAGE_IN_FW_QUEUE_BK_DTM         0x026
-#define NUM_OF_PAGE_IN_FW_QUEUE_BE_DTM         0x048
-#define NUM_OF_PAGE_IN_FW_QUEUE_VI_DTM         0x048
-#define NUM_OF_PAGE_IN_FW_QUEUE_VO_DTM         0x026
-#define NUM_OF_PAGE_IN_FW_QUEUE_PUB_DTM                0x00
-
-#define MAX_LINES_HWCONFIG_TXT                         1000
-#define MAX_BYTES_LINE_HWCONFIG_TXT                    256
-
-#define SW_THREE_WIRE                                          0
-#define HW_THREE_WIRE                                          2
-
-#define BT_DEMO_BOARD                                          0
-#define BT_QA_BOARD                                                    1
-#define BT_FPGA                                                                2
-
 #define HAL_PRIME_CHNL_OFFSET_DONT_CARE                0
 #define HAL_PRIME_CHNL_OFFSET_LOWER                    1
 #define HAL_PRIME_CHNL_OFFSET_UPPER                    2
 
-#define MAX_H2C_QUEUE_NUM                                      10
-
 #define RX_MPDU_QUEUE                                          0
 #define RX_CMD_QUEUE                                           1
-#define RX_MAX_QUEUE                                           2
-#define AC2QUEUEID(_AC)                                                (_AC)
 
 #define        C2H_RX_CMD_HDR_LEN                                      8
 #define        GET_C2H_CMD_CMD_LEN(__prxhdr)           \
index aa085462d0e9592d26d7d57418d451bbbd9ae8ae..67bb47d77b68c5d45bd6bb6dc0f0721c2461fa2c 100644 (file)
@@ -362,7 +362,7 @@ void rtl8723e_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
                                        acm_ctrl &= (~ACMHW_VIQEN);
                                        break;
                                case AC3_VO:
-                                       acm_ctrl &= (~ACMHW_BEQEN);
+                                       acm_ctrl &= (~ACMHW_VOQEN);
                                        break;
                                default:
                                        RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
@@ -1183,7 +1183,7 @@ static int _rtl8723e_set_media_status(struct ieee80211_hw *hw,
                         mode);
        }
 
-       rtl_write_byte(rtlpriv, (MSR), bt_msr | mode);
+       rtl_write_byte(rtlpriv, MSR, bt_msr | mode);
        rtlpriv->cfg->ops->led_control(hw, ledaction);
        if (mode == MSR_AP)
                rtl_write_byte(rtlpriv, REG_BCNTCFG + 1, 0x00);
index f3f45b16361f3b98f1a374356290ee7283952903..7b44ebc0fac978d4b746e052583aa5cc96d38426 100644 (file)
@@ -27,7 +27,6 @@
 #define __RTL8723E_RF_H__
 
 #define RF6052_MAX_TX_PWR              0x3F
-#define RF6052_MAX_REG                 0x3F
 
 void rtl8723e_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw,
                                       u8 bandwidth);
index 6dad28e77bbb6c6f07f53ff2ea38e39996e9cd6e..b681af3c7a355d66fb411c71dcdcbba5bb8dfc01 100644 (file)
@@ -603,7 +603,7 @@ void rtl8723be_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
                                acm_ctrl &= (~ACMHW_VIQEN);
                                break;
                        case AC3_VO:
-                               acm_ctrl &= (~ACMHW_BEQEN);
+                               acm_ctrl &= (~ACMHW_VOQEN);
                                break;
                        default:
                                RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
@@ -1558,7 +1558,7 @@ static int _rtl8723be_set_media_status(struct ieee80211_hw *hw,
                         mode);
        }
 
-       rtl_write_byte(rtlpriv, (MSR), bt_msr | mode);
+       rtl_write_byte(rtlpriv, MSR, bt_msr | mode);
        rtlpriv->cfg->ops->led_control(hw, ledaction);
        if (mode == MSR_AP)
                rtl_write_byte(rtlpriv, REG_BCNTCFG + 1, 0x00);
index a6fea106ced4a92f3d08045f63a6ce1876ac883d..f423e157020ffb631f31806ac6cc25347afd2359 100644 (file)
@@ -27,7 +27,6 @@
 #define __RTL8723BE_RF_H__
 
 #define RF6052_MAX_TX_PWR              0x3F
-#define RF6052_MAX_REG                 0x3F
 
 void rtl8723be_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw,
                                        u8 bandwidth);
index ee7c208bd070944850c5f560fcaff58d432e1a18..dfbdf539de1a1fc0b6163543c462082c6301a9c4 100644 (file)
 #define        WIFI_NAV_UPPER_US                               30000
 #define HAL_92C_NAV_UPPER_UNIT                 128
 
-#define HAL_RETRY_LIMIT_INFRA                          48
-#define HAL_RETRY_LIMIT_AP_ADHOC                       7
-
-#define RESET_DELAY_8185                                       20
-
-#define RT_IBSS_INT_MASKS      (IMR_BCNINT | IMR_TBDOK | IMR_TBDER)
-#define RT_AC_INT_MASKS                (IMR_VIDOK | IMR_VODOK | IMR_BEDOK|IMR_BKDOK)
-
-#define NUM_OF_FIRMWARE_QUEUE                          10
-#define NUM_OF_PAGES_IN_FW                                     0x100
-#define NUM_OF_PAGE_IN_FW_QUEUE_BK                     0x07
-#define NUM_OF_PAGE_IN_FW_QUEUE_BE                     0x07
-#define NUM_OF_PAGE_IN_FW_QUEUE_VI                     0x07
-#define NUM_OF_PAGE_IN_FW_QUEUE_VO                     0x07
-#define NUM_OF_PAGE_IN_FW_QUEUE_HCCA           0x0
-#define NUM_OF_PAGE_IN_FW_QUEUE_CMD                    0x0
-#define NUM_OF_PAGE_IN_FW_QUEUE_MGNT           0x02
-#define NUM_OF_PAGE_IN_FW_QUEUE_HIGH           0x02
-#define NUM_OF_PAGE_IN_FW_QUEUE_BCN                    0x2
-#define NUM_OF_PAGE_IN_FW_QUEUE_PUB                    0xA1
-
-#define NUM_OF_PAGE_IN_FW_QUEUE_BK_DTM         0x026
-#define NUM_OF_PAGE_IN_FW_QUEUE_BE_DTM         0x048
-#define NUM_OF_PAGE_IN_FW_QUEUE_VI_DTM         0x048
-#define NUM_OF_PAGE_IN_FW_QUEUE_VO_DTM         0x026
-#define NUM_OF_PAGE_IN_FW_QUEUE_PUB_DTM                0x00
-
 #define MAX_RX_DMA_BUFFER_SIZE                         0x3E80
 
-#define MAX_LINES_HWCONFIG_TXT                         1000
-#define MAX_BYTES_LINE_HWCONFIG_TXT                    256
-
-#define SW_THREE_WIRE                                          0
-#define HW_THREE_WIRE                                          2
-
-#define BT_DEMO_BOARD                                          0
-#define BT_QA_BOARD                                                    1
-#define BT_FPGA                                                                2
-
 #define HAL_PRIME_CHNL_OFFSET_DONT_CARE                0
 #define HAL_PRIME_CHNL_OFFSET_LOWER                    1
 #define HAL_PRIME_CHNL_OFFSET_UPPER                    2
 
-#define MAX_H2C_QUEUE_NUM                                      10
-
 #define RX_MPDU_QUEUE                                          0
 #define RX_CMD_QUEUE                                           1
-#define RX_MAX_QUEUE                                           2
-#define AC2QUEUEID(_AC)                                                (_AC)
 
 #define MAX_RX_DMA_BUFFER_SIZE_8812    0x3E80
 
index 8ec8200002c7311025b3ae645c9956bf08c44a17..8704eee9f3a495108e93135d6e306a090c322bc7 100644 (file)
@@ -423,7 +423,7 @@ void rtl8821ae_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
                *((u16 *)(val+4)) = rtl_read_word(rtlpriv, REG_BSSID+4);
                break;
        case HW_VAR_MEDIA_STATUS:
-               val[0] = rtl_read_byte(rtlpriv, REG_CR+2) & 0x3;
+               val[0] = rtl_read_byte(rtlpriv, MSR) & 0x3;
                break;
        case HW_VAR_SLOT_TIME:
                *((u8 *)(val)) = mac->slot_time;
@@ -667,7 +667,7 @@ void rtl8821ae_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
                                acm_ctrl &= (~ACMHW_VIQEN);
                                break;
                        case AC3_VO:
-                               acm_ctrl &= (~ACMHW_BEQEN);
+                               acm_ctrl &= (~ACMHW_VOQEN);
                                break;
                        default:
                                RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
@@ -1515,7 +1515,7 @@ static bool _rtl8821ae_dynamic_rqpn(struct ieee80211_hw *hw, u32 boundary,
                                      (u8 *)(&support_remote_wakeup));
 
        RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
-                "boundary=0x%#X, NPQ_RQPNValue=0x%#X, RQPNValue=0x%#X\n",
+                "boundary=%#X, NPQ_RQPNValue=%#X, RQPNValue=%#X\n",
                  boundary, npq_rqpn_value, rqpn_val);
 
        /* stop PCIe DMA
@@ -2178,7 +2178,7 @@ static int _rtl8821ae_set_media_status(struct ieee80211_hw *hw,
                return 1;
        }
 
-       rtl_write_byte(rtlpriv, (MSR), bt_msr);
+       rtl_write_byte(rtlpriv, MSR, bt_msr);
        rtlpriv->cfg->ops->led_control(hw, ledaction);
        if ((bt_msr & 0xfc) == MSR_AP)
                rtl_write_byte(rtlpriv, REG_BCNTCFG + 1, 0x00);
index d9582ee1c335462de324f9d347cdb96be73a971c..efd22bd0b139f2880aa5fd547f81c5976846cb4c 100644 (file)
@@ -27,7 +27,6 @@
 #define __RTL8821AE_RF_H__
 
 #define RF6052_MAX_TX_PWR              0x3F
-#define RF6052_MAX_REG                 0x3F
 
 void rtl8821ae_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw,
                                        u8 bandwidth);
index 72af4b9ee32b3f24f350acf03b188710d02f254f..174743aef9431c16673ff69f4d1ff79e47ecf752 100644 (file)
@@ -64,6 +64,20 @@ static u16 odm_cfo(char value)
        return ret_val;
 }
 
+static u8 _rtl8821ae_evm_dbm_jaguar(char value)
+{
+       char ret_val = value;
+
+       /* -33dB~0dB to 33dB ~ 0dB*/
+       if (ret_val == -128)
+               ret_val = 127;
+       else if (ret_val < 0)
+               ret_val = 0 - ret_val;
+
+       ret_val  = ret_val >> 1;
+       return ret_val;
+}
+
 static void query_rxphystatus(struct ieee80211_hw *hw,
                              struct rtl_stats *pstatus, u8 *pdesc,
                              struct rx_fwinfo_8821ae *p_drvinfo,
@@ -246,7 +260,7 @@ static void query_rxphystatus(struct ieee80211_hw *hw,
 
                for (i = 0; i < max_spatial_stream; i++) {
                        evm = rtl_evm_db_to_percentage(p_phystrpt->rxevm[i]);
-                       evmdbm = rtl_evm_dbm_jaguar(p_phystrpt->rxevm[i]);
+                       evmdbm = _rtl8821ae_evm_dbm_jaguar(p_phystrpt->rxevm[i]);
 
                        if (bpacket_match_bssid) {
                                /* Fill value in RFD, Get the first
index 2d0736a09fc0e0a590724ff4415134018b3f8293..d8b30690b00de35aad2595a4e8d28952b9b2ffb4 100644 (file)
@@ -39,15 +39,8 @@ EXPORT_SYMBOL(rtl_query_rxpwrpercentage);
 
 u8 rtl_evm_db_to_percentage(char value)
 {
-       char ret_val;
-       ret_val = value;
+       char ret_val = clamp(-value, 0, 33) * 3;
 
-       if (ret_val >= 0)
-               ret_val = 0;
-       if (ret_val <= -33)
-               ret_val = -33;
-       ret_val = 0 - ret_val;
-       ret_val *= 3;
        if (ret_val == 99)
                ret_val = 100;
 
@@ -55,21 +48,6 @@ u8 rtl_evm_db_to_percentage(char value)
 }
 EXPORT_SYMBOL(rtl_evm_db_to_percentage);
 
-u8 rtl_evm_dbm_jaguar(char value)
-{
-       char ret_val = value;
-
-       /* -33dB~0dB to 33dB ~ 0dB*/
-       if (ret_val == -128)
-               ret_val = 127;
-       else if (ret_val < 0)
-               ret_val = 0 - ret_val;
-
-       ret_val  = ret_val >> 1;
-       return ret_val;
-}
-EXPORT_SYMBOL(rtl_evm_dbm_jaguar);
-
 static long rtl_translate_todbm(struct ieee80211_hw *hw,
                         u8 signal_strength_index)
 {
index aa4eec80ccf7a7588e3ec58f817172130fcc4a1d..2b57dffef572f019ac42d5d2465bb82e82bc29ce 100644 (file)
@@ -35,7 +35,6 @@
 
 u8 rtl_query_rxpwrpercentage(char antpower);
 u8 rtl_evm_db_to_percentage(char value);
-u8 rtl_evm_dbm_jaguar(char value);
 long rtl_signal_scale_mapping(struct ieee80211_hw *hw, long currsig);
 void rtl_process_phyinfo(struct ieee80211_hw *hw, u8 *buffer,
                         struct rtl_stats *pstatus);
index 46ee956d0235dc79dbb8130242b5b2586fd4db5e..f0188c83c79f7d6027bdee6372768d3657347a2a 100644 (file)
@@ -701,12 +701,18 @@ free:
 
 static void _rtl_usb_cleanup_rx(struct ieee80211_hw *hw)
 {
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
        struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
        struct urb *urb;
 
        usb_kill_anchored_urbs(&rtlusb->rx_submitted);
 
        tasklet_kill(&rtlusb->rx_work_tasklet);
+       cancel_work_sync(&rtlpriv->works.lps_change_work);
+
+       flush_workqueue(rtlpriv->works.rtl_wq);
+       destroy_workqueue(rtlpriv->works.rtl_wq);
+
        skb_queue_purge(&rtlusb->rx_queue);
 
        while ((urb = usb_get_from_anchor(&rtlusb->rx_cleanup_urbs))) {
@@ -794,8 +800,6 @@ static void rtl_usb_cleanup(struct ieee80211_hw *hw)
        struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
        struct ieee80211_tx_info *txinfo;
 
-       SET_USB_STOP(rtlusb);
-
        /* clean up rx stuff. */
        _rtl_usb_cleanup_rx(hw);
 
@@ -834,7 +838,6 @@ static void rtl_usb_stop(struct ieee80211_hw *hw)
        cancel_work_sync(&rtlpriv->works.fill_h2c_cmd);
        /* Enable software */
        SET_USB_STOP(rtlusb);
-       rtl_usb_deinit(hw);
        rtlpriv->cfg->ops->hw_disable(hw);
 }
 
@@ -1147,9 +1150,9 @@ void rtl_usb_disconnect(struct usb_interface *intf)
 
        if (unlikely(!rtlpriv))
                return;
-
        /* just in case driver is removed before firmware callback */
        wait_for_completion(&rtlpriv->firmware_loading_complete);
+       clear_bit(RTL_STATUS_INTERFACE_START, &rtlpriv->status);
        /*ieee80211_unregister_hw will call ops_stop */
        if (rtlmac->mac80211_registered == 1) {
                ieee80211_unregister_hw(hw);
index d4ba009ac9aa62b7d9c24404dda707a132030291..d1e9a13be910b584d5e17394822ba3c1ff2a9ba4 100644 (file)
@@ -468,7 +468,7 @@ static void wl1251_op_stop(struct ieee80211_hw *hw)
        wl1251_tx_flush(wl);
        wl1251_power_off(wl);
 
-       memset(wl->bssid, 0, ETH_ALEN);
+       eth_zero_addr(wl->bssid);
        wl->listen_int = 1;
        wl->bss_type = MAX_BSS_TYPE;
 
@@ -547,7 +547,7 @@ static void wl1251_op_remove_interface(struct ieee80211_hw *hw,
        mutex_lock(&wl->mutex);
        wl1251_debug(DEBUG_MAC80211, "mac80211 remove interface");
        wl->vif = NULL;
-       memset(wl->bssid, 0, ETH_ALEN);
+       eth_zero_addr(wl->bssid);
        mutex_unlock(&wl->mutex);
 }
 
index c93fae95baac87e1775714c122a32ab35bfbda3c..5fbd2230f372f2a17d763be90efa3228bca5bdbd 100644 (file)
@@ -139,7 +139,7 @@ WL18XX_DEBUGFS_FWSTATS_FILE(rx_filter, protection_filter, "%u");
 WL18XX_DEBUGFS_FWSTATS_FILE(rx_filter, accum_arp_pend_requests, "%u");
 WL18XX_DEBUGFS_FWSTATS_FILE(rx_filter, max_arp_queue_dep, "%u");
 
-WL18XX_DEBUGFS_FWSTATS_FILE(rx_rate, rx_frames_per_rates, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE_ARRAY(rx_rate, rx_frames_per_rates, 50);
 
 WL18XX_DEBUGFS_FWSTATS_FILE_ARRAY(aggr_size, tx_agg_vs_rate,
                                  AGGR_STATS_TX_AGG*AGGR_STATS_TX_RATE);
index c28f0685419597ef0179d496594417eaa630c154..548bb9e7e91ec054ad7da9022d325e42b007a4cf 100644 (file)
@@ -77,7 +77,7 @@ static int wlcore_smart_config_sync_event(struct wl1271 *wl, u8 sync_channel,
        wl1271_debug(DEBUG_EVENT,
                     "SMART_CONFIG_SYNC_EVENT_ID, freq: %d (chan: %d band %d)",
                     freq, sync_channel, sync_band);
-       skb = cfg80211_vendor_event_alloc(wl->hw->wiphy, 20,
+       skb = cfg80211_vendor_event_alloc(wl->hw->wiphy, NULL, 20,
                                          WLCORE_VENDOR_EVENT_SC_SYNC,
                                          GFP_KERNEL);
 
@@ -98,7 +98,7 @@ static int wlcore_smart_config_decode_event(struct wl1271 *wl,
        wl1271_debug(DEBUG_EVENT, "SMART_CONFIG_DECODE_EVENT_ID");
        wl1271_dump_ascii(DEBUG_EVENT, "SSID:", ssid, ssid_len);
 
-       skb = cfg80211_vendor_event_alloc(wl->hw->wiphy,
+       skb = cfg80211_vendor_event_alloc(wl->hw->wiphy, NULL,
                                          ssid_len + pwd_len + 20,
                                          WLCORE_VENDOR_EVENT_SC_DECODE,
                                          GFP_KERNEL);
index c26fc2106e5bfc3f44f8c7d2cda483824b0ea284..68919f8d4310455fad623381ee16e2f1c96dac69 100644 (file)
@@ -367,7 +367,7 @@ void wl12xx_free_link(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 *hlid)
        wl->links[*hlid].allocated_pkts = 0;
        wl->links[*hlid].prev_freed_pkts = 0;
        wl->links[*hlid].ba_bitmap = 0;
-       memset(wl->links[*hlid].addr, 0, ETH_ALEN);
+       eth_zero_addr(wl->links[*hlid].addr);
 
        /*
         * At this point op_tx() will not add more packets to the queues. We
@@ -1293,7 +1293,7 @@ int wl1271_cmd_build_arp_rsp(struct wl1271 *wl, struct wl12xx_vif *wlvif)
        hdr->frame_control = cpu_to_le16(fc);
        memcpy(hdr->addr1, vif->bss_conf.bssid, ETH_ALEN);
        memcpy(hdr->addr2, vif->addr, ETH_ALEN);
-       memset(hdr->addr3, 0xff, ETH_ALEN);
+       eth_broadcast_addr(hdr->addr3);
 
        ret = wl1271_cmd_template_set(wl, wlvif->role_id, CMD_TEMPL_ARP_RSP,
                                      skb->data, skb->len, 0,
index 0f2cfb0d2a9ec38fe013872e6d4339c2db1345e3..bf14676e6515002b8b2982f507386e8999b356e9 100644 (file)
@@ -26,8 +26,8 @@
 
 #include "wlcore.h"
 
-int wl1271_format_buffer(char __user *userbuf, size_t count,
-                        loff_t *ppos, char *fmt, ...);
+__printf(4, 5) int wl1271_format_buffer(char __user *userbuf, size_t count,
+                                       loff_t *ppos, char *fmt, ...);
 
 int wl1271_debugfs_init(struct wl1271 *wl);
 void wl1271_debugfs_exit(struct wl1271 *wl);
index 589fa256256b8ca423bf424544056557c1c7e0ce..8a495b318b6f23bf66b19f4d77e557506cab5b0f 100644 (file)
@@ -238,6 +238,8 @@ struct xenvif {
        unsigned int num_queues; /* active queues, resource allocated */
        unsigned int stalled_queues;
 
+       struct xenbus_watch credit_watch;
+
        spinlock_t lock;
 
 #ifdef CONFIG_DEBUG_FS
@@ -260,6 +262,8 @@ static inline struct xenbus_device *xenvif_to_xenbus_device(struct xenvif *vif)
        return to_xenbus_device(vif->dev->dev.parent);
 }
 
+void xenvif_tx_credit_callback(unsigned long data);
+
 struct xenvif *xenvif_alloc(struct device *parent,
                            domid_t domid,
                            unsigned int handle);
index 3aa8648080c8dee1133b9faf0b6ac0c2ad538a9b..1a83e190fc15e4158b5e441cc267ad149d465abe 100644 (file)
@@ -437,7 +437,7 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
         * stolen by an Ethernet bridge for STP purposes.
         * (FE:FF:FF:FF:FF:FF)
         */
-       memset(dev->dev_addr, 0xFF, ETH_ALEN);
+       eth_broadcast_addr(dev->dev_addr);
        dev->dev_addr[0] &= ~0x01;
 
        netif_carrier_off(dev);
@@ -463,6 +463,7 @@ int xenvif_init_queue(struct xenvif_queue *queue)
        queue->credit_bytes = queue->remaining_credit = ~0UL;
        queue->credit_usec  = 0UL;
        init_timer(&queue->credit_timeout);
+       queue->credit_timeout.function = xenvif_tx_credit_callback;
        queue->credit_window_start = get_jiffies_64();
 
        queue->rx_queue_max = XENVIF_RX_QUEUE_BYTES;
index 997cf0901ac2dc50d92f26ea2c5c4e51511b8362..b8c471813f4ccdf30513775e33b0cd22231cf145 100644 (file)
@@ -642,7 +642,7 @@ static void tx_add_credit(struct xenvif_queue *queue)
        queue->remaining_credit = min(max_credit, max_burst);
 }
 
-static void tx_credit_callback(unsigned long data)
+void xenvif_tx_credit_callback(unsigned long data)
 {
        struct xenvif_queue *queue = (struct xenvif_queue *)data;
        tx_add_credit(queue);
@@ -1165,8 +1165,6 @@ static bool tx_credit_exceeded(struct xenvif_queue *queue, unsigned size)
        if (size > queue->remaining_credit) {
                queue->credit_timeout.data     =
                        (unsigned long)queue;
-               queue->credit_timeout.function =
-                       tx_credit_callback;
                mod_timer(&queue->credit_timeout,
                          next_credit);
                queue->credit_window_start = next_credit;
index 794204e34fba4fb74d1c21f2fef0837ef340feaa..3d8dbf5f2d396aa8dde8745afe98d8b4f9febd67 100644 (file)
@@ -41,6 +41,7 @@ static void connect(struct backend_info *be);
 static int read_xenbus_vif_flags(struct backend_info *be);
 static int backend_create_xenvif(struct backend_info *be);
 static void unregister_hotplug_status_watch(struct backend_info *be);
+static void xen_unregister_watchers(struct xenvif *vif);
 static void set_backend_state(struct backend_info *be,
                              enum xenbus_state state);
 
@@ -232,6 +233,7 @@ static int netback_remove(struct xenbus_device *dev)
        unregister_hotplug_status_watch(be);
        if (be->vif) {
                kobject_uevent(&dev->dev.kobj, KOBJ_OFFLINE);
+               xen_unregister_watchers(be->vif);
                xenbus_rm(XBT_NIL, dev->nodename, "hotplug-status");
                xenvif_free(be->vif);
                be->vif = NULL;
@@ -430,6 +432,7 @@ static int backend_create_xenvif(struct backend_info *be)
 static void backend_disconnect(struct backend_info *be)
 {
        if (be->vif) {
+               xen_unregister_watchers(be->vif);
 #ifdef CONFIG_DEBUG_FS
                xenvif_debugfs_delif(be->vif);
 #endif /* CONFIG_DEBUG_FS */
@@ -645,6 +648,59 @@ static int xen_net_read_mac(struct xenbus_device *dev, u8 mac[])
        return 0;
 }
 
+static void xen_net_rate_changed(struct xenbus_watch *watch,
+                               const char **vec, unsigned int len)
+{
+       struct xenvif *vif = container_of(watch, struct xenvif, credit_watch);
+       struct xenbus_device *dev = xenvif_to_xenbus_device(vif);
+       unsigned long   credit_bytes;
+       unsigned long   credit_usec;
+       unsigned int queue_index;
+
+       xen_net_read_rate(dev, &credit_bytes, &credit_usec);
+       for (queue_index = 0; queue_index < vif->num_queues; queue_index++) {
+               struct xenvif_queue *queue = &vif->queues[queue_index];
+
+               queue->credit_bytes = credit_bytes;
+               queue->credit_usec = credit_usec;
+               if (!mod_timer_pending(&queue->credit_timeout, jiffies) &&
+                       queue->remaining_credit > queue->credit_bytes) {
+                       queue->remaining_credit = queue->credit_bytes;
+               }
+       }
+}
+
+static int xen_register_watchers(struct xenbus_device *dev, struct xenvif *vif)
+{
+       int err = 0;
+       char *node;
+       unsigned maxlen = strlen(dev->nodename) + sizeof("/rate");
+
+       node = kmalloc(maxlen, GFP_KERNEL);
+       if (!node)
+               return -ENOMEM;
+       snprintf(node, maxlen, "%s/rate", dev->nodename);
+       vif->credit_watch.node = node;
+       vif->credit_watch.callback = xen_net_rate_changed;
+       err = register_xenbus_watch(&vif->credit_watch);
+       if (err) {
+               pr_err("Failed to set watcher %s\n", vif->credit_watch.node);
+               kfree(node);
+               vif->credit_watch.node = NULL;
+               vif->credit_watch.callback = NULL;
+       }
+       return err;
+}
+
+static void xen_unregister_watchers(struct xenvif *vif)
+{
+       if (vif->credit_watch.node) {
+               unregister_xenbus_watch(&vif->credit_watch);
+               kfree(vif->credit_watch.node);
+               vif->credit_watch.node = NULL;
+       }
+}
+
 static void unregister_hotplug_status_watch(struct backend_info *be)
 {
        if (be->have_hotplug_status_watch) {
@@ -709,6 +765,7 @@ static void connect(struct backend_info *be)
        }
 
        xen_net_read_rate(dev, &credit_bytes, &credit_usec);
+       xen_register_watchers(dev, be->vif);
        read_xenbus_vif_flags(be);
 
        /* Use the number of queues requested by the frontend */
index e9b960f0ff32c8af2ff404a138780ff751bf4572..720aaf6313d296bec9b9a4826f1240b0eb4c0940 100644 (file)
@@ -1008,8 +1008,7 @@ err:
 
 static int xennet_change_mtu(struct net_device *dev, int mtu)
 {
-       int max = xennet_can_sg(dev) ?
-               XEN_NETIF_MAX_TX_SIZE - MAX_TCP_HEADER : ETH_DATA_LEN;
+       int max = xennet_can_sg(dev) ? XEN_NETIF_MAX_TX_SIZE : ETH_DATA_LEN;
 
        if (mtu > max)
                return -EINVAL;
@@ -1279,8 +1278,6 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev)
        netdev->ethtool_ops = &xennet_ethtool_ops;
        SET_NETDEV_DEV(netdev, &dev->dev);
 
-       netif_set_gso_max_size(netdev, XEN_NETIF_MAX_TX_SIZE - MAX_TCP_HEADER);
-
        np->netdev = netdev;
 
        netif_carrier_off(netdev);
index ad2906919d4589f4edbc0cd599a2fe7aa2938922..78a7dcbec7d8990ac37adad938a0aff3420423e2 100644 (file)
@@ -450,12 +450,17 @@ static struct of_bus *of_match_bus(struct device_node *np)
        return NULL;
 }
 
-static int of_empty_ranges_quirk(void)
+static int of_empty_ranges_quirk(struct device_node *np)
 {
        if (IS_ENABLED(CONFIG_PPC)) {
-               /* To save cycles, we cache the result */
+               /* To save cycles, we cache the result for global "Mac" setting */
                static int quirk_state = -1;
 
+               /* PA-SEMI sdc DT bug */
+               if (of_device_is_compatible(np, "1682m-sdc"))
+                       return true;
+
+               /* Make quirk cached */
                if (quirk_state < 0)
                        quirk_state =
                                of_machine_is_compatible("Power Macintosh") ||
@@ -490,7 +495,7 @@ static int of_translate_one(struct device_node *parent, struct of_bus *bus,
         * This code is only enabled on powerpc. --gcl
         */
        ranges = of_get_property(parent, rprop, &rlen);
-       if (ranges == NULL && !of_empty_ranges_quirk()) {
+       if (ranges == NULL && !of_empty_ranges_quirk(parent)) {
                pr_debug("OF: no ranges; cannot translate\n");
                return 1;
        }
index 1bd43053b8c774505d7d3037d46a96925a446643..0c064485d1c2c47e3c84b2ab25fd524bf200ea33 100644 (file)
@@ -88,7 +88,7 @@ static int of_mdiobus_register_phy(struct mii_bus *mdio, struct device_node *chi
        return 0;
 }
 
-static int of_mdio_parse_addr(struct device *dev, const struct device_node *np)
+int of_mdio_parse_addr(struct device *dev, const struct device_node *np)
 {
        u32 addr;
        int ret;
@@ -108,6 +108,7 @@ static int of_mdio_parse_addr(struct device *dev, const struct device_node *np)
 
        return addr;
 }
+EXPORT_SYMBOL(of_mdio_parse_addr);
 
 /**
  * of_mdiobus_register - Register mii_bus and create PHYs from the device tree
index f8a76090cbca1e8bbf694a4fbc16a54ef60490d5..da7bae9915523195def56ead30b186881c025fa4 100644 (file)
@@ -124,7 +124,7 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg)
        struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock);
        struct ptp_clock_info *ops = ptp->info;
        struct ptp_clock_time *pct;
-       struct timespec ts;
+       struct timespec64 ts;
        int enable, err = 0;
        unsigned int i, pin_index;
 
@@ -197,16 +197,16 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg)
                }
                pct = &sysoff->ts[0];
                for (i = 0; i < sysoff->n_samples; i++) {
-                       getnstimeofday(&ts);
+                       getnstimeofday64(&ts);
                        pct->sec = ts.tv_sec;
                        pct->nsec = ts.tv_nsec;
                        pct++;
-                       ptp->info->gettime(ptp->info, &ts);
+                       ptp->info->gettime64(ptp->info, &ts);
                        pct->sec = ts.tv_sec;
                        pct->nsec = ts.tv_nsec;
                        pct++;
                }
-               getnstimeofday(&ts);
+               getnstimeofday64(&ts);
                pct->sec = ts.tv_sec;
                pct->nsec = ts.tv_nsec;
                if (copy_to_user((void __user *)arg, sysoff, sizeof(*sysoff)))
index 296b0ec8744da915763f8444c2ae8e902376c33e..2e481b9e8ea597858e08ab8933374fce8ccd197c 100644 (file)
@@ -107,13 +107,21 @@ static int ptp_clock_getres(struct posix_clock *pc, struct timespec *tp)
 static int ptp_clock_settime(struct posix_clock *pc, const struct timespec *tp)
 {
        struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock);
-       return ptp->info->settime(ptp->info, tp);
+       struct timespec64 ts = timespec_to_timespec64(*tp);
+
+       return  ptp->info->settime64(ptp->info, &ts);
 }
 
 static int ptp_clock_gettime(struct posix_clock *pc, struct timespec *tp)
 {
        struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock);
-       return ptp->info->gettime(ptp->info, tp);
+       struct timespec64 ts;
+       int err;
+
+       err = ptp->info->gettime64(ptp->info, &ts);
+       if (!err)
+               *tp = timespec64_to_timespec(ts);
+       return err;
 }
 
 static int ptp_clock_adjtime(struct posix_clock *pc, struct timex *tx)
index 604d340f20956bc1d0df55cc692d76d0f313df02..934c139916c609e3647acd5c6229a781ca348d56 100644 (file)
@@ -175,7 +175,7 @@ static int ptp_ixp_adjtime(struct ptp_clock_info *ptp, s64 delta)
        return 0;
 }
 
-static int ptp_ixp_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
+static int ptp_ixp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
 {
        u64 ns;
        u32 remainder;
@@ -195,7 +195,7 @@ static int ptp_ixp_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
 }
 
 static int ptp_ixp_settime(struct ptp_clock_info *ptp,
-                          const struct timespec *ts)
+                          const struct timespec64 *ts)
 {
        u64 ns;
        unsigned long flags;
@@ -248,8 +248,8 @@ static struct ptp_clock_info ptp_ixp_caps = {
        .pps            = 0,
        .adjfreq        = ptp_ixp_adjfreq,
        .adjtime        = ptp_ixp_adjtime,
-       .gettime        = ptp_ixp_gettime,
-       .settime        = ptp_ixp_settime,
+       .gettime64      = ptp_ixp_gettime,
+       .settime64      = ptp_ixp_settime,
        .enable         = ptp_ixp_enable,
 };
 
index 255487272859d719c4e0be05735cb0475df99afe..3aa22ae4d94c051fb979c2b9ca6610db4c3c249c 100644 (file)
@@ -449,7 +449,7 @@ static int ptp_pch_adjtime(struct ptp_clock_info *ptp, s64 delta)
        return 0;
 }
 
-static int ptp_pch_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
+static int ptp_pch_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
 {
        u64 ns;
        u32 remainder;
@@ -467,7 +467,7 @@ static int ptp_pch_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
 }
 
 static int ptp_pch_settime(struct ptp_clock_info *ptp,
-                          const struct timespec *ts)
+                          const struct timespec64 *ts)
 {
        u64 ns;
        unsigned long flags;
@@ -518,8 +518,8 @@ static struct ptp_clock_info ptp_pch_caps = {
        .pps            = 0,
        .adjfreq        = ptp_pch_adjfreq,
        .adjtime        = ptp_pch_adjtime,
-       .gettime        = ptp_pch_gettime,
-       .settime        = ptp_pch_settime,
+       .gettime64      = ptp_pch_gettime,
+       .settime64      = ptp_pch_settime,
        .enable         = ptp_pch_enable,
 };
 
index f1b5111bbaba42a690ae573ce7a699ab60ebcaaa..b2837b1c70b750c229c9c42265dd232cedfa73e8 100644 (file)
@@ -57,17 +57,6 @@ config SMSGIUCV_EVENT
 
          To compile as a module, choose M. The module name is "smsgiucv_app".
 
-config CLAW
-       def_tristate m
-       prompt "CLAW device support"
-       depends on CCW && NETDEVICES
-       help
-         This driver supports channel attached CLAW devices.
-         CLAW is Common Link Access for Workstation.  Common devices
-          that use CLAW are RS/6000s, Cisco Routers (CIP) and 3172 devices.
-         To compile as a module, choose M. The module name is claw.
-         To compile into the kernel, choose Y.
-
 config QETH
        def_tristate y
        prompt "Gigabit Ethernet device support"
@@ -106,6 +95,6 @@ config QETH_IPV6
 
 config CCWGROUP
        tristate
-       default (LCS || CTCM || QETH || CLAW)
+       default (LCS || CTCM || QETH)
 
 endmenu
index d28f05d0c75addfd7e388d443c8828b0c8c666be..c351b07603e054bf72e9802f525d48f03f42cd06 100644 (file)
@@ -8,7 +8,6 @@ obj-$(CONFIG_NETIUCV) += netiucv.o fsm.o
 obj-$(CONFIG_SMSGIUCV) += smsgiucv.o
 obj-$(CONFIG_SMSGIUCV_EVENT) += smsgiucv_app.o
 obj-$(CONFIG_LCS) += lcs.o
-obj-$(CONFIG_CLAW) += claw.o
 qeth-y += qeth_core_sys.o qeth_core_main.o qeth_core_mpc.o
 obj-$(CONFIG_QETH) += qeth.o
 qeth_l2-y += qeth_l2_main.o qeth_l2_sys.o
diff --git a/drivers/s390/net/claw.c b/drivers/s390/net/claw.c
deleted file mode 100644 (file)
index d609ca0..0000000
+++ /dev/null
@@ -1,3377 +0,0 @@
-/*
- *    ESCON CLAW network driver
- *
- *  Linux for zSeries version
- *    Copyright IBM Corp. 2002, 2009
- *  Author(s) Original code written by:
- *             Kazuo Iimura <iimura@jp.ibm.com>
- *           Rewritten by
- *             Andy Richter <richtera@us.ibm.com>
- *             Marc Price <mwprice@us.ibm.com>
- *
- *    sysfs parms:
- *   group x.x.rrrr,x.x.wwww
- *   read_buffer nnnnnnn
- *   write_buffer nnnnnn
- *   host_name  aaaaaaaa
- *   adapter_name aaaaaaaa
- *   api_type    aaaaaaaa
- *
- *  eg.
- *   group  0.0.0200 0.0.0201
- *   read_buffer 25
- *   write_buffer 20
- *   host_name LINUX390
- *   adapter_name RS6K
- *   api_type     TCPIP
- *
- *  where
- *
- *   The device id is decided by the order entries
- *   are added to the group the first is claw0 the second claw1
- *   up to CLAW_MAX_DEV
- *
- *   rrrr     -        the first of 2 consecutive device addresses used for the
- *             CLAW protocol.
- *             The specified address is always used as the input (Read)
- *             channel and the next address is used as the output channel.
- *
- *   wwww     -        the second of 2 consecutive device addresses used for
- *             the CLAW protocol.
- *              The specified address is always used as the output
- *             channel and the previous address is used as the input channel.
- *
- *   read_buffer       -       specifies number of input buffers to allocate.
- *   write_buffer       -       specifies number of output buffers to allocate.
- *   host_name          -       host name
- *   adaptor_name       -       adaptor name
- *   api_type           -       API type TCPIP or API will be sent and expected
- *                             as ws_name
- *
- *   Note the following requirements:
- *   1)  host_name must match the configured adapter_name on the remote side
- *   2)  adaptor_name must match the configured host name on the remote side
- *
- *  Change History
- *    1.00  Initial release shipped
- *    1.10  Changes for Buffer allocation
- *    1.15  Changed for 2.6 Kernel  No longer compiles on 2.4 or lower
- *    1.25  Added Packing support
- *    1.5
- */
-
-#define KMSG_COMPONENT "claw"
-
-#include <asm/ccwdev.h>
-#include <asm/ccwgroup.h>
-#include <asm/debug.h>
-#include <asm/idals.h>
-#include <asm/io.h>
-#include <linux/bitops.h>
-#include <linux/ctype.h>
-#include <linux/delay.h>
-#include <linux/errno.h>
-#include <linux/if_arp.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/ip.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/proc_fs.h>
-#include <linux/sched.h>
-#include <linux/signal.h>
-#include <linux/skbuff.h>
-#include <linux/slab.h>
-#include <linux/string.h>
-#include <linux/tcp.h>
-#include <linux/timer.h>
-#include <linux/types.h>
-
-#include "claw.h"
-
-/*
-   CLAW uses the s390dbf file system  see claw_trace and claw_setup
-*/
-
-static char version[] __initdata = "CLAW driver";
-static char debug_buffer[255];
-/**
- * Debug Facility Stuff
- */
-static debug_info_t *claw_dbf_setup;
-static debug_info_t *claw_dbf_trace;
-
-/**
- *  CLAW Debug Facility functions
- */
-static void
-claw_unregister_debug_facility(void)
-{
-       debug_unregister(claw_dbf_setup);
-       debug_unregister(claw_dbf_trace);
-}
-
-static int
-claw_register_debug_facility(void)
-{
-       claw_dbf_setup = debug_register("claw_setup", 2, 1, 8);
-       claw_dbf_trace = debug_register("claw_trace", 2, 2, 8);
-       if (claw_dbf_setup == NULL || claw_dbf_trace == NULL) {
-               claw_unregister_debug_facility();
-               return -ENOMEM;
-       }
-       debug_register_view(claw_dbf_setup, &debug_hex_ascii_view);
-       debug_set_level(claw_dbf_setup, 2);
-       debug_register_view(claw_dbf_trace, &debug_hex_ascii_view);
-       debug_set_level(claw_dbf_trace, 2);
-       return 0;
-}
-
-static inline void
-claw_set_busy(struct net_device *dev)
-{
- ((struct claw_privbk *)dev->ml_priv)->tbusy = 1;
-}
-
-static inline void
-claw_clear_busy(struct net_device *dev)
-{
-       clear_bit(0, &(((struct claw_privbk *) dev->ml_priv)->tbusy));
-       netif_wake_queue(dev);
-}
-
-static inline int
-claw_check_busy(struct net_device *dev)
-{
-       return ((struct claw_privbk *) dev->ml_priv)->tbusy;
-}
-
-static inline void
-claw_setbit_busy(int nr,struct net_device *dev)
-{
-       netif_stop_queue(dev);
-       set_bit(nr, (void *)&(((struct claw_privbk *)dev->ml_priv)->tbusy));
-}
-
-static inline void
-claw_clearbit_busy(int nr,struct net_device *dev)
-{
-       clear_bit(nr, (void *)&(((struct claw_privbk *)dev->ml_priv)->tbusy));
-       netif_wake_queue(dev);
-}
-
-static inline int
-claw_test_and_setbit_busy(int nr,struct net_device *dev)
-{
-       netif_stop_queue(dev);
-       return test_and_set_bit(nr,
-               (void *)&(((struct claw_privbk *) dev->ml_priv)->tbusy));
-}
-
-
-/* Functions for the DEV methods */
-
-static int claw_probe(struct ccwgroup_device *cgdev);
-static void claw_remove_device(struct ccwgroup_device *cgdev);
-static void claw_purge_skb_queue(struct sk_buff_head *q);
-static int claw_new_device(struct ccwgroup_device *cgdev);
-static int claw_shutdown_device(struct ccwgroup_device *cgdev);
-static int claw_tx(struct sk_buff *skb, struct net_device *dev);
-static int claw_change_mtu( struct net_device *dev, int new_mtu);
-static int claw_open(struct net_device *dev);
-static void claw_irq_handler(struct ccw_device *cdev,
-       unsigned long intparm, struct irb *irb);
-static void claw_irq_tasklet ( unsigned long data );
-static int claw_release(struct net_device *dev);
-static void claw_write_retry ( struct chbk * p_ch );
-static void claw_write_next ( struct chbk * p_ch );
-static void claw_timer ( struct chbk * p_ch );
-
-/* Functions */
-static int add_claw_reads(struct net_device *dev,
-       struct ccwbk* p_first, struct ccwbk* p_last);
-static void ccw_check_return_code (struct ccw_device *cdev, int return_code);
-static void ccw_check_unit_check (struct chbk * p_ch, unsigned char sense );
-static int find_link(struct net_device *dev, char *host_name, char *ws_name );
-static int claw_hw_tx(struct sk_buff *skb, struct net_device *dev, long linkid);
-static int init_ccw_bk(struct net_device *dev);
-static void probe_error( struct ccwgroup_device *cgdev);
-static struct net_device_stats *claw_stats(struct net_device *dev);
-static int pages_to_order_of_mag(int num_of_pages);
-static struct sk_buff *claw_pack_skb(struct claw_privbk *privptr);
-/* sysfs Functions */
-static ssize_t claw_hname_show(struct device *dev,
-       struct device_attribute *attr, char *buf);
-static ssize_t claw_hname_write(struct device *dev,
-       struct device_attribute *attr,
-       const char *buf, size_t count);
-static ssize_t claw_adname_show(struct device *dev,
-       struct device_attribute *attr, char *buf);
-static ssize_t claw_adname_write(struct device *dev,
-       struct device_attribute *attr,
-       const char *buf, size_t count);
-static ssize_t claw_apname_show(struct device *dev,
-       struct device_attribute *attr, char *buf);
-static ssize_t claw_apname_write(struct device *dev,
-       struct device_attribute *attr,
-       const char *buf, size_t count);
-static ssize_t claw_wbuff_show(struct device *dev,
-       struct device_attribute *attr, char *buf);
-static ssize_t claw_wbuff_write(struct device *dev,
-       struct device_attribute *attr,
-       const char *buf, size_t count);
-static ssize_t claw_rbuff_show(struct device *dev,
-       struct device_attribute *attr, char *buf);
-static ssize_t claw_rbuff_write(struct device *dev,
-       struct device_attribute *attr,
-       const char *buf, size_t count);
-
-/*   Functions for System Validate  */
-static int claw_process_control( struct net_device *dev, struct ccwbk * p_ccw);
-static int claw_send_control(struct net_device *dev, __u8 type, __u8 link,
-       __u8 correlator, __u8 rc , char *local_name, char *remote_name);
-static int claw_snd_conn_req(struct net_device *dev, __u8 link);
-static int claw_snd_disc(struct net_device *dev, struct clawctl * p_ctl);
-static int claw_snd_sys_validate_rsp(struct net_device *dev,
-        struct clawctl * p_ctl, __u32 return_code);
-static int claw_strt_conn_req(struct net_device *dev );
-static void claw_strt_read(struct net_device *dev, int lock);
-static void claw_strt_out_IO(struct net_device *dev);
-static void claw_free_wrt_buf(struct net_device *dev);
-
-/* Functions for unpack reads   */
-static void unpack_read(struct net_device *dev);
-
-static int claw_pm_prepare(struct ccwgroup_device *gdev)
-{
-       return -EPERM;
-}
-
-/* the root device for claw group devices */
-static struct device *claw_root_dev;
-
-/* ccwgroup table  */
-
-static struct ccwgroup_driver claw_group_driver = {
-       .driver = {
-               .owner  = THIS_MODULE,
-               .name   = "claw",
-       },
-       .setup       = claw_probe,
-       .remove      = claw_remove_device,
-       .set_online  = claw_new_device,
-       .set_offline = claw_shutdown_device,
-       .prepare     = claw_pm_prepare,
-};
-
-static struct ccw_device_id claw_ids[] = {
-       {CCW_DEVICE(0x3088, 0x61), .driver_info = claw_channel_type_claw},
-       {},
-};
-MODULE_DEVICE_TABLE(ccw, claw_ids);
-
-static struct ccw_driver claw_ccw_driver = {
-       .driver = {
-               .owner  = THIS_MODULE,
-               .name   = "claw",
-       },
-       .ids    = claw_ids,
-       .probe  = ccwgroup_probe_ccwdev,
-       .remove = ccwgroup_remove_ccwdev,
-       .int_class = IRQIO_CLW,
-};
-
-static ssize_t claw_driver_group_store(struct device_driver *ddrv,
-                                      const char *buf, size_t count)
-{
-       int err;
-       err = ccwgroup_create_dev(claw_root_dev, &claw_group_driver, 2, buf);
-       return err ? err : count;
-}
-static DRIVER_ATTR(group, 0200, NULL, claw_driver_group_store);
-
-static struct attribute *claw_drv_attrs[] = {
-       &driver_attr_group.attr,
-       NULL,
-};
-static struct attribute_group claw_drv_attr_group = {
-       .attrs = claw_drv_attrs,
-};
-static const struct attribute_group *claw_drv_attr_groups[] = {
-       &claw_drv_attr_group,
-       NULL,
-};
-
-/*
-*       Key functions
-*/
-
-/*-------------------------------------------------------------------*
- *   claw_tx                                                         *
- *-------------------------------------------------------------------*/
-
-static int
-claw_tx(struct sk_buff *skb, struct net_device *dev)
-{
-        int             rc;
-       struct claw_privbk *privptr = dev->ml_priv;
-       unsigned long saveflags;
-        struct chbk *p_ch;
-
-       CLAW_DBF_TEXT(4, trace, "claw_tx");
-       p_ch = &privptr->channel[WRITE_CHANNEL];
-        spin_lock_irqsave(get_ccwdev_lock(p_ch->cdev), saveflags);
-        rc=claw_hw_tx( skb, dev, 1 );
-        spin_unlock_irqrestore(get_ccwdev_lock(p_ch->cdev), saveflags);
-       CLAW_DBF_TEXT_(4, trace, "clawtx%d", rc);
-       if (rc)
-               rc = NETDEV_TX_BUSY;
-       else
-               rc = NETDEV_TX_OK;
-        return rc;
-}   /*  end of claw_tx */
-
-/*------------------------------------------------------------------*
- *  pack the collect queue into an skb and return it                *
- *   If not packing just return the top skb from the queue          *
- *------------------------------------------------------------------*/
-
-static struct sk_buff *
-claw_pack_skb(struct claw_privbk *privptr)
-{
-       struct sk_buff *new_skb,*held_skb;
-       struct chbk *p_ch = &privptr->channel[WRITE_CHANNEL];
-       struct claw_env  *p_env = privptr->p_env;
-       int     pkt_cnt,pk_ind,so_far;
-
-       new_skb = NULL;         /* assume no dice */
-       pkt_cnt = 0;
-       CLAW_DBF_TEXT(4, trace, "PackSKBe");
-       if (!skb_queue_empty(&p_ch->collect_queue)) {
-       /* some data */
-               held_skb = skb_dequeue(&p_ch->collect_queue);
-               if (held_skb)
-                       dev_kfree_skb_any(held_skb);
-               else
-                       return NULL;
-               if (p_env->packing != DO_PACKED)
-                       return held_skb;
-               /* get a new SKB we will pack at least one */
-               new_skb = dev_alloc_skb(p_env->write_size);
-               if (new_skb == NULL) {
-                       atomic_inc(&held_skb->users);
-                       skb_queue_head(&p_ch->collect_queue,held_skb);
-                       return NULL;
-               }
-               /* we have packed packet and a place to put it  */
-               pk_ind = 1;
-               so_far = 0;
-               new_skb->cb[1] = 'P'; /* every skb on queue has pack header */
-               while ((pk_ind) && (held_skb != NULL)) {
-                       if (held_skb->len+so_far <= p_env->write_size-8) {
-                               memcpy(skb_put(new_skb,held_skb->len),
-                                       held_skb->data,held_skb->len);
-                               privptr->stats.tx_packets++;
-                               so_far += held_skb->len;
-                               pkt_cnt++;
-                               dev_kfree_skb_any(held_skb);
-                               held_skb = skb_dequeue(&p_ch->collect_queue);
-                               if (held_skb)
-                                       atomic_dec(&held_skb->users);
-                       } else {
-                               pk_ind = 0;
-                               atomic_inc(&held_skb->users);
-                               skb_queue_head(&p_ch->collect_queue,held_skb);
-                       }
-               }
-       }
-       CLAW_DBF_TEXT(4, trace, "PackSKBx");
-       return new_skb;
-}
-
-/*-------------------------------------------------------------------*
- *   claw_change_mtu                                                 *
- *                                                                   *
- *-------------------------------------------------------------------*/
-
-static int
-claw_change_mtu(struct net_device *dev, int new_mtu)
-{
-       struct claw_privbk *privptr = dev->ml_priv;
-       int buff_size;
-       CLAW_DBF_TEXT(4, trace, "setmtu");
-       buff_size = privptr->p_env->write_size;
-        if ((new_mtu < 60) || (new_mtu > buff_size)) {
-                return -EINVAL;
-        }
-        dev->mtu = new_mtu;
-        return 0;
-}  /*   end of claw_change_mtu */
-
-
-/*-------------------------------------------------------------------*
- *   claw_open                                                       *
- *                                                                   *
- *-------------------------------------------------------------------*/
-static int
-claw_open(struct net_device *dev)
-{
-
-        int     rc;
-        int     i;
-        unsigned long       saveflags=0;
-        unsigned long       parm;
-        struct claw_privbk  *privptr;
-       DECLARE_WAITQUEUE(wait, current);
-        struct timer_list  timer;
-        struct ccwbk *p_buf;
-
-       CLAW_DBF_TEXT(4, trace, "open");
-       privptr = (struct claw_privbk *)dev->ml_priv;
-        /*   allocate and initialize CCW blocks */
-       if (privptr->buffs_alloc == 0) {
-               rc=init_ccw_bk(dev);
-               if (rc) {
-                       CLAW_DBF_TEXT(2, trace, "openmem");
-                       return -ENOMEM;
-               }
-       }
-        privptr->system_validate_comp=0;
-        privptr->release_pend=0;
-       if(strncmp(privptr->p_env->api_type,WS_APPL_NAME_PACKED,6) == 0) {
-               privptr->p_env->read_size=DEF_PACK_BUFSIZE;
-               privptr->p_env->write_size=DEF_PACK_BUFSIZE;
-               privptr->p_env->packing=PACKING_ASK;
-       } else {
-               privptr->p_env->packing=0;
-               privptr->p_env->read_size=CLAW_FRAME_SIZE;
-               privptr->p_env->write_size=CLAW_FRAME_SIZE;
-       }
-        claw_set_busy(dev);
-       tasklet_init(&privptr->channel[READ_CHANNEL].tasklet, claw_irq_tasklet,
-               (unsigned long) &privptr->channel[READ_CHANNEL]);
-        for ( i = 0; i < 2;  i++) {
-               CLAW_DBF_TEXT_(2, trace, "opn_ch%d", i);
-                init_waitqueue_head(&privptr->channel[i].wait);
-               /* skb_queue_head_init(&p_ch->io_queue); */
-               if (i == WRITE_CHANNEL)
-                       skb_queue_head_init(
-                               &privptr->channel[WRITE_CHANNEL].collect_queue);
-                privptr->channel[i].flag_a = 0;
-                privptr->channel[i].IO_active = 0;
-                privptr->channel[i].flag  &= ~CLAW_TIMER;
-                init_timer(&timer);
-                timer.function = (void *)claw_timer;
-                timer.data = (unsigned long)(&privptr->channel[i]);
-                timer.expires = jiffies + 15*HZ;
-                add_timer(&timer);
-                spin_lock_irqsave(get_ccwdev_lock(
-                       privptr->channel[i].cdev), saveflags);
-                parm = (unsigned long) &privptr->channel[i];
-                privptr->channel[i].claw_state = CLAW_START_HALT_IO;
-               rc = 0;
-               add_wait_queue(&privptr->channel[i].wait, &wait);
-                rc = ccw_device_halt(
-                       (struct ccw_device *)privptr->channel[i].cdev,parm);
-                set_current_state(TASK_INTERRUPTIBLE);
-                spin_unlock_irqrestore(
-                       get_ccwdev_lock(privptr->channel[i].cdev), saveflags);
-                schedule();
-                remove_wait_queue(&privptr->channel[i].wait, &wait);
-                if(rc != 0)
-                        ccw_check_return_code(privptr->channel[i].cdev, rc);
-                if((privptr->channel[i].flag & CLAW_TIMER) == 0x00)
-                        del_timer(&timer);
-        }
-       if ((((privptr->channel[READ_CHANNEL].last_dstat |
-               privptr->channel[WRITE_CHANNEL].last_dstat) &
-           ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END)) != 0x00) ||
-          (((privptr->channel[READ_CHANNEL].flag |
-               privptr->channel[WRITE_CHANNEL].flag) & CLAW_TIMER) != 0x00)) {
-               dev_info(&privptr->channel[READ_CHANNEL].cdev->dev,
-                       "%s: remote side is not ready\n", dev->name);
-               CLAW_DBF_TEXT(2, trace, "notrdy");
-
-                for ( i = 0; i < 2;  i++) {
-                        spin_lock_irqsave(
-                               get_ccwdev_lock(privptr->channel[i].cdev),
-                               saveflags);
-                        parm = (unsigned long) &privptr->channel[i];
-                        privptr->channel[i].claw_state = CLAW_STOP;
-                        rc = ccw_device_halt(
-                               (struct ccw_device *)&privptr->channel[i].cdev,
-                               parm);
-                        spin_unlock_irqrestore(
-                               get_ccwdev_lock(privptr->channel[i].cdev),
-                               saveflags);
-                        if (rc != 0) {
-                                ccw_check_return_code(
-                                       privptr->channel[i].cdev, rc);
-                        }
-                }
-                free_pages((unsigned long)privptr->p_buff_ccw,
-                       (int)pages_to_order_of_mag(privptr->p_buff_ccw_num));
-                if (privptr->p_env->read_size < PAGE_SIZE) {
-                        free_pages((unsigned long)privptr->p_buff_read,
-                              (int)pages_to_order_of_mag(
-                                       privptr->p_buff_read_num));
-                }
-                else {
-                        p_buf=privptr->p_read_active_first;
-                        while (p_buf!=NULL) {
-                                free_pages((unsigned long)p_buf->p_buffer,
-                                     (int)pages_to_order_of_mag(
-                                       privptr->p_buff_pages_perread ));
-                                p_buf=p_buf->next;
-                        }
-                }
-                if (privptr->p_env->write_size < PAGE_SIZE ) {
-                        free_pages((unsigned long)privptr->p_buff_write,
-                            (int)pages_to_order_of_mag(
-                               privptr->p_buff_write_num));
-                }
-                else {
-                        p_buf=privptr->p_write_active_first;
-                        while (p_buf!=NULL) {
-                                free_pages((unsigned long)p_buf->p_buffer,
-                                    (int)pages_to_order_of_mag(
-                                       privptr->p_buff_pages_perwrite ));
-                                p_buf=p_buf->next;
-                        }
-                }
-               privptr->buffs_alloc = 0;
-               privptr->channel[READ_CHANNEL].flag = 0x00;
-               privptr->channel[WRITE_CHANNEL].flag = 0x00;
-                privptr->p_buff_ccw=NULL;
-                privptr->p_buff_read=NULL;
-                privptr->p_buff_write=NULL;
-                claw_clear_busy(dev);
-               CLAW_DBF_TEXT(2, trace, "open EIO");
-                return -EIO;
-        }
-
-        /*   Send SystemValidate command */
-
-        claw_clear_busy(dev);
-       CLAW_DBF_TEXT(4, trace, "openok");
-        return 0;
-}    /*     end of claw_open    */
-
-/*-------------------------------------------------------------------*
-*                                                                    *
-*       claw_irq_handler                                             *
-*                                                                    *
-*--------------------------------------------------------------------*/
-static void
-claw_irq_handler(struct ccw_device *cdev,
-       unsigned long intparm, struct irb *irb)
-{
-        struct chbk *p_ch = NULL;
-        struct claw_privbk *privptr = NULL;
-        struct net_device *dev = NULL;
-        struct claw_env  *p_env;
-        struct chbk *p_ch_r=NULL;
-
-       CLAW_DBF_TEXT(4, trace, "clawirq");
-        /* Bypass all 'unsolicited interrupts' */
-       privptr = dev_get_drvdata(&cdev->dev);
-       if (!privptr) {
-               dev_warn(&cdev->dev, "An uninitialized CLAW device received an"
-                       " IRQ, c-%02x d-%02x\n",
-                       irb->scsw.cmd.cstat, irb->scsw.cmd.dstat);
-               CLAW_DBF_TEXT(2, trace, "badirq");
-                return;
-        }
-
-       /* Try to extract channel from driver data. */
-       if (privptr->channel[READ_CHANNEL].cdev == cdev)
-               p_ch = &privptr->channel[READ_CHANNEL];
-       else if (privptr->channel[WRITE_CHANNEL].cdev == cdev)
-               p_ch = &privptr->channel[WRITE_CHANNEL];
-       else {
-               dev_warn(&cdev->dev, "The device is not a CLAW device\n");
-               CLAW_DBF_TEXT(2, trace, "badchan");
-               return;
-       }
-       CLAW_DBF_TEXT_(4, trace, "IRQCH=%d", p_ch->flag);
-
-       dev = (struct net_device *) (p_ch->ndev);
-        p_env=privptr->p_env;
-
-       /* Copy interruption response block. */
-       memcpy(p_ch->irb, irb, sizeof(struct irb));
-
-       /* Check for good subchannel return code, otherwise info message */
-       if (irb->scsw.cmd.cstat && !(irb->scsw.cmd.cstat & SCHN_STAT_PCI)) {
-               dev_info(&cdev->dev,
-                       "%s: subchannel check for device: %04x -"
-                       " Sch Stat %02x  Dev Stat %02x CPA - %04x\n",
-                        dev->name, p_ch->devno,
-                       irb->scsw.cmd.cstat, irb->scsw.cmd.dstat,
-                       irb->scsw.cmd.cpa);
-               CLAW_DBF_TEXT(2, trace, "chanchk");
-                /* return; */
-        }
-
-        /* Check the reason-code of a unit check */
-       if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK)
-                ccw_check_unit_check(p_ch, irb->ecw[0]);
-
-        /* State machine to bring the connection up, down and to restart */
-       p_ch->last_dstat = irb->scsw.cmd.dstat;
-
-        switch (p_ch->claw_state) {
-       case CLAW_STOP:/* HALT_IO by claw_release (halt sequence) */
-               if (!((p_ch->irb->scsw.cmd.stctl & SCSW_STCTL_SEC_STATUS) ||
-               (p_ch->irb->scsw.cmd.stctl == SCSW_STCTL_STATUS_PEND) ||
-               (p_ch->irb->scsw.cmd.stctl ==
-               (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND))))
-                       return;
-               wake_up(&p_ch->wait);   /* wake up claw_release */
-               CLAW_DBF_TEXT(4, trace, "stop");
-               return;
-       case CLAW_START_HALT_IO: /* HALT_IO issued by claw_open  */
-               if (!((p_ch->irb->scsw.cmd.stctl & SCSW_STCTL_SEC_STATUS) ||
-               (p_ch->irb->scsw.cmd.stctl == SCSW_STCTL_STATUS_PEND) ||
-               (p_ch->irb->scsw.cmd.stctl ==
-               (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)))) {
-                       CLAW_DBF_TEXT(4, trace, "haltio");
-                       return;
-               }
-               if (p_ch->flag == CLAW_READ) {
-                       p_ch->claw_state = CLAW_START_READ;
-                       wake_up(&p_ch->wait); /* wake claw_open (READ)*/
-               } else if (p_ch->flag == CLAW_WRITE) {
-                       p_ch->claw_state = CLAW_START_WRITE;
-                       /*      send SYSTEM_VALIDATE                    */
-                       claw_strt_read(dev, LOCK_NO);
-                       claw_send_control(dev,
-                               SYSTEM_VALIDATE_REQUEST,
-                               0, 0, 0,
-                               p_env->host_name,
-                               p_env->adapter_name);
-               } else {
-                       dev_warn(&cdev->dev, "The CLAW device received"
-                               " an unexpected IRQ, "
-                               "c-%02x d-%02x\n",
-                               irb->scsw.cmd.cstat,
-                               irb->scsw.cmd.dstat);
-                       return;
-                       }
-               CLAW_DBF_TEXT(4, trace, "haltio");
-               return;
-       case CLAW_START_READ:
-               CLAW_DBF_TEXT(4, trace, "ReadIRQ");
-               if (p_ch->irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) {
-                       clear_bit(0, (void *)&p_ch->IO_active);
-                       if ((p_ch->irb->ecw[0] & 0x41) == 0x41 ||
-                           (p_ch->irb->ecw[0] & 0x40) == 0x40 ||
-                           (p_ch->irb->ecw[0])        == 0) {
-                               privptr->stats.rx_errors++;
-                               dev_info(&cdev->dev,
-                                       "%s: Restart is required after remote "
-                                       "side recovers \n",
-                                       dev->name);
-                       }
-                       CLAW_DBF_TEXT(4, trace, "notrdy");
-                       return;
-               }
-               if ((p_ch->irb->scsw.cmd.cstat & SCHN_STAT_PCI) &&
-                       (p_ch->irb->scsw.cmd.dstat == 0)) {
-                       if (test_and_set_bit(CLAW_BH_ACTIVE,
-                               (void *)&p_ch->flag_a) == 0)
-                               tasklet_schedule(&p_ch->tasklet);
-                       else
-                               CLAW_DBF_TEXT(4, trace, "PCINoBH");
-                       CLAW_DBF_TEXT(4, trace, "PCI_read");
-                       return;
-               }
-               if (!((p_ch->irb->scsw.cmd.stctl & SCSW_STCTL_SEC_STATUS) ||
-                (p_ch->irb->scsw.cmd.stctl == SCSW_STCTL_STATUS_PEND) ||
-                (p_ch->irb->scsw.cmd.stctl ==
-                (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)))) {
-                       CLAW_DBF_TEXT(4, trace, "SPend_rd");
-                       return;
-               }
-               clear_bit(0, (void *)&p_ch->IO_active);
-               claw_clearbit_busy(TB_RETRY, dev);
-               if (test_and_set_bit(CLAW_BH_ACTIVE,
-                       (void *)&p_ch->flag_a) == 0)
-                       tasklet_schedule(&p_ch->tasklet);
-               else
-                       CLAW_DBF_TEXT(4, trace, "RdBHAct");
-               CLAW_DBF_TEXT(4, trace, "RdIRQXit");
-               return;
-       case CLAW_START_WRITE:
-               if (p_ch->irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) {
-                       dev_info(&cdev->dev,
-                               "%s: Unit Check Occurred in "
-                               "write channel\n", dev->name);
-                       clear_bit(0, (void *)&p_ch->IO_active);
-                       if (p_ch->irb->ecw[0] & 0x80) {
-                               dev_info(&cdev->dev,
-                                       "%s: Resetting Event "
-                                       "occurred:\n", dev->name);
-                               init_timer(&p_ch->timer);
-                               p_ch->timer.function =
-                                       (void *)claw_write_retry;
-                               p_ch->timer.data = (unsigned long)p_ch;
-                               p_ch->timer.expires = jiffies + 10*HZ;
-                               add_timer(&p_ch->timer);
-                               dev_info(&cdev->dev,
-                                       "%s: write connection "
-                                       "restarting\n", dev->name);
-                       }
-                       CLAW_DBF_TEXT(4, trace, "rstrtwrt");
-                       return;
-               }
-               if (p_ch->irb->scsw.cmd.dstat & DEV_STAT_UNIT_EXCEP) {
-                       clear_bit(0, (void *)&p_ch->IO_active);
-                       dev_info(&cdev->dev,
-                               "%s: Unit Exception "
-                               "occurred in write channel\n",
-                               dev->name);
-               }
-               if (!((p_ch->irb->scsw.cmd.stctl & SCSW_STCTL_SEC_STATUS) ||
-               (p_ch->irb->scsw.cmd.stctl == SCSW_STCTL_STATUS_PEND) ||
-               (p_ch->irb->scsw.cmd.stctl ==
-               (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)))) {
-                       CLAW_DBF_TEXT(4, trace, "writeUE");
-                       return;
-               }
-               clear_bit(0, (void *)&p_ch->IO_active);
-               if (claw_test_and_setbit_busy(TB_TX, dev) == 0) {
-                       claw_write_next(p_ch);
-                       claw_clearbit_busy(TB_TX, dev);
-                       claw_clear_busy(dev);
-               }
-               p_ch_r = (struct chbk *)&privptr->channel[READ_CHANNEL];
-               if (test_and_set_bit(CLAW_BH_ACTIVE,
-                       (void *)&p_ch_r->flag_a) == 0)
-                       tasklet_schedule(&p_ch_r->tasklet);
-               CLAW_DBF_TEXT(4, trace, "StWtExit");
-               return;
-       default:
-               dev_warn(&cdev->dev,
-                       "The CLAW device for %s received an unexpected IRQ\n",
-                        dev->name);
-               CLAW_DBF_TEXT(2, trace, "badIRQ");
-               return;
-        }
-
-}       /*   end of claw_irq_handler    */
-
-
-/*-------------------------------------------------------------------*
-*       claw_irq_tasklet                                             *
-*                                                                    *
-*--------------------------------------------------------------------*/
-static void
-claw_irq_tasklet ( unsigned long data )
-{
-       struct chbk * p_ch;
-        struct net_device  *dev;
-
-       p_ch = (struct chbk *) data;
-        dev = (struct net_device *)p_ch->ndev;
-       CLAW_DBF_TEXT(4, trace, "IRQtask");
-        unpack_read(dev);
-        clear_bit(CLAW_BH_ACTIVE, (void *)&p_ch->flag_a);
-       CLAW_DBF_TEXT(4, trace, "TskletXt");
-        return;
-}       /*    end of claw_irq_bh    */
-
-/*-------------------------------------------------------------------*
-*       claw_release                                                 *
-*                                                                    *
-*--------------------------------------------------------------------*/
-static int
-claw_release(struct net_device *dev)
-{
-        int                rc;
-        int                i;
-        unsigned long      saveflags;
-        unsigned long      parm;
-        struct claw_privbk *privptr;
-        DECLARE_WAITQUEUE(wait, current);
-        struct ccwbk*             p_this_ccw;
-        struct ccwbk*             p_buf;
-
-       if (!dev)
-                return 0;
-       privptr = (struct claw_privbk *)dev->ml_priv;
-        if (!privptr)
-                return 0;
-       CLAW_DBF_TEXT(4, trace, "release");
-        privptr->release_pend=1;
-        claw_setbit_busy(TB_STOP,dev);
-        for ( i = 1; i >=0 ;  i--) {
-                spin_lock_irqsave(
-                       get_ccwdev_lock(privptr->channel[i].cdev), saveflags);
-            /*   del_timer(&privptr->channel[READ_CHANNEL].timer);  */
-               privptr->channel[i].claw_state = CLAW_STOP;
-                privptr->channel[i].IO_active = 0;
-                parm = (unsigned long) &privptr->channel[i];
-               if (i == WRITE_CHANNEL)
-                       claw_purge_skb_queue(
-                               &privptr->channel[WRITE_CHANNEL].collect_queue);
-                rc = ccw_device_halt (privptr->channel[i].cdev, parm);
-               if (privptr->system_validate_comp==0x00)  /* never opened? */
-                   init_waitqueue_head(&privptr->channel[i].wait);
-                add_wait_queue(&privptr->channel[i].wait, &wait);
-                set_current_state(TASK_INTERRUPTIBLE);
-               spin_unlock_irqrestore(
-                       get_ccwdev_lock(privptr->channel[i].cdev), saveflags);
-               schedule();
-               remove_wait_queue(&privptr->channel[i].wait, &wait);
-               if (rc != 0) {
-                        ccw_check_return_code(privptr->channel[i].cdev, rc);
-                }
-        }
-       if (privptr->pk_skb != NULL) {
-               dev_kfree_skb_any(privptr->pk_skb);
-               privptr->pk_skb = NULL;
-       }
-       if(privptr->buffs_alloc != 1) {
-               CLAW_DBF_TEXT(4, trace, "none2fre");
-               return 0;
-       }
-       CLAW_DBF_TEXT(4, trace, "freebufs");
-       if (privptr->p_buff_ccw != NULL) {
-               free_pages((unsigned long)privptr->p_buff_ccw,
-                       (int)pages_to_order_of_mag(privptr->p_buff_ccw_num));
-       }
-       CLAW_DBF_TEXT(4, trace, "freeread");
-        if (privptr->p_env->read_size < PAGE_SIZE) {
-           if (privptr->p_buff_read != NULL) {
-                free_pages((unsigned long)privptr->p_buff_read,
-                     (int)pages_to_order_of_mag(privptr->p_buff_read_num));
-               }
-        }
-        else {
-                p_buf=privptr->p_read_active_first;
-                while (p_buf!=NULL) {
-                        free_pages((unsigned long)p_buf->p_buffer,
-                            (int)pages_to_order_of_mag(
-                               privptr->p_buff_pages_perread ));
-                        p_buf=p_buf->next;
-                }
-        }
-        CLAW_DBF_TEXT(4, trace, "freewrit");
-        if (privptr->p_env->write_size < PAGE_SIZE ) {
-                free_pages((unsigned long)privptr->p_buff_write,
-                     (int)pages_to_order_of_mag(privptr->p_buff_write_num));
-        }
-        else {
-                p_buf=privptr->p_write_active_first;
-                while (p_buf!=NULL) {
-                        free_pages((unsigned long)p_buf->p_buffer,
-                             (int)pages_to_order_of_mag(
-                             privptr->p_buff_pages_perwrite ));
-                        p_buf=p_buf->next;
-                }
-        }
-        CLAW_DBF_TEXT(4, trace, "clearptr");
-       privptr->buffs_alloc = 0;
-        privptr->p_buff_ccw=NULL;
-        privptr->p_buff_read=NULL;
-        privptr->p_buff_write=NULL;
-        privptr->system_validate_comp=0;
-        privptr->release_pend=0;
-        /*      Remove any writes that were pending and reset all reads   */
-        p_this_ccw=privptr->p_read_active_first;
-        while (p_this_ccw!=NULL) {
-                p_this_ccw->header.length=0xffff;
-                p_this_ccw->header.opcode=0xff;
-                p_this_ccw->header.flag=0x00;
-                p_this_ccw=p_this_ccw->next;
-        }
-
-        while (privptr->p_write_active_first!=NULL) {
-                p_this_ccw=privptr->p_write_active_first;
-                p_this_ccw->header.flag=CLAW_PENDING;
-                privptr->p_write_active_first=p_this_ccw->next;
-                p_this_ccw->next=privptr->p_write_free_chain;
-                privptr->p_write_free_chain=p_this_ccw;
-                ++privptr->write_free_count;
-        }
-        privptr->p_write_active_last=NULL;
-        privptr->mtc_logical_link = -1;
-        privptr->mtc_skipping = 1;
-        privptr->mtc_offset=0;
-
-       if (((privptr->channel[READ_CHANNEL].last_dstat |
-               privptr->channel[WRITE_CHANNEL].last_dstat) &
-               ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END)) != 0x00) {
-               dev_warn(&privptr->channel[READ_CHANNEL].cdev->dev,
-                       "Deactivating %s completed with incorrect"
-                       " subchannel status "
-                       "(read %02x, write %02x)\n",
-                dev->name,
-               privptr->channel[READ_CHANNEL].last_dstat,
-               privptr->channel[WRITE_CHANNEL].last_dstat);
-                CLAW_DBF_TEXT(2, trace, "badclose");
-        }
-       CLAW_DBF_TEXT(4, trace, "rlsexit");
-        return 0;
-}      /* end of claw_release     */
-
-/*-------------------------------------------------------------------*
-*       claw_write_retry                                             *
-*                                                                    *
-*--------------------------------------------------------------------*/
-
-static void
-claw_write_retry ( struct chbk *p_ch )
-{
-
-        struct net_device  *dev=p_ch->ndev;
-
-       CLAW_DBF_TEXT(4, trace, "w_retry");
-        if (p_ch->claw_state == CLAW_STOP) {
-               return;
-        }
-       claw_strt_out_IO( dev );
-       CLAW_DBF_TEXT(4, trace, "rtry_xit");
-        return;
-}      /* end of claw_write_retry      */
-
-
-/*-------------------------------------------------------------------*
-*       claw_write_next                                              *
-*                                                                    *
-*--------------------------------------------------------------------*/
-
-static void
-claw_write_next ( struct chbk * p_ch )
-{
-
-        struct net_device  *dev;
-        struct claw_privbk *privptr=NULL;
-       struct sk_buff *pk_skb;
-
-       CLAW_DBF_TEXT(4, trace, "claw_wrt");
-        if (p_ch->claw_state == CLAW_STOP)
-                return;
-        dev = (struct net_device *) p_ch->ndev;
-       privptr = (struct claw_privbk *) dev->ml_priv;
-        claw_free_wrt_buf( dev );
-       if ((privptr->write_free_count > 0) &&
-           !skb_queue_empty(&p_ch->collect_queue)) {
-               pk_skb = claw_pack_skb(privptr);
-               while (pk_skb != NULL) {
-                       claw_hw_tx(pk_skb, dev, 1);
-                       if (privptr->write_free_count > 0) {
-                               pk_skb = claw_pack_skb(privptr);
-                       } else
-                               pk_skb = NULL;
-               }
-       }
-        if (privptr->p_write_active_first!=NULL) {
-                claw_strt_out_IO(dev);
-        }
-        return;
-}      /* end of claw_write_next      */
-
-/*-------------------------------------------------------------------*
-*                                                                    *
-*       claw_timer                                                   *
-*--------------------------------------------------------------------*/
-
-static void
-claw_timer ( struct chbk * p_ch )
-{
-       CLAW_DBF_TEXT(4, trace, "timer");
-        p_ch->flag |= CLAW_TIMER;
-        wake_up(&p_ch->wait);
-        return;
-}      /* end of claw_timer  */
-
-/*
-*
-*       functions
-*/
-
-
-/*-------------------------------------------------------------------*
-*                                                                    *
-*     pages_to_order_of_mag                                          *
-*                                                                    *
-*    takes a number of pages from 1 to 512 and returns the           *
-*    log(num_pages)/log(2) get_free_pages() needs a base 2 order     *
-*    of magnitude get_free_pages() has an upper order of 9           *
-*--------------------------------------------------------------------*/
-
-static int
-pages_to_order_of_mag(int num_of_pages)
-{
-       int     order_of_mag=1;         /* assume 2 pages */
-       int     nump;
-
-       CLAW_DBF_TEXT_(5, trace, "pages%d", num_of_pages);
-       if (num_of_pages == 1)   {return 0; }  /* magnitude of 0 = 1 page */
-       /* 512 pages = 2Meg on 4k page systems */
-       if (num_of_pages >= 512) {return 9; }
-       /* we have two or more pages order is at least 1 */
-       for (nump=2 ;nump <= 512;nump*=2) {
-         if (num_of_pages <= nump)
-                 break;
-         order_of_mag +=1;
-       }
-       if (order_of_mag > 9) { order_of_mag = 9; }  /* I know it's paranoid */
-       CLAW_DBF_TEXT_(5, trace, "mag%d", order_of_mag);
-       return order_of_mag;
-}
-
-/*-------------------------------------------------------------------*
-*                                                                    *
-*     add_claw_reads                                                 *
-*                                                                    *
-*--------------------------------------------------------------------*/
-static int
-add_claw_reads(struct net_device *dev, struct ccwbk* p_first,
-       struct ccwbk* p_last)
-{
-        struct claw_privbk *privptr;
-        struct ccw1  temp_ccw;
-        struct endccw * p_end;
-       CLAW_DBF_TEXT(4, trace, "addreads");
-       privptr = dev->ml_priv;
-        p_end = privptr->p_end_ccw;
-
-        /* first CCW and last CCW contains a new set of read channel programs
-        *       to apend the running channel programs
-        */
-        if ( p_first==NULL) {
-               CLAW_DBF_TEXT(4, trace, "addexit");
-                return 0;
-        }
-
-        /* set up ending CCW sequence for this segment */
-        if (p_end->read1) {
-                p_end->read1=0x00;    /*  second ending CCW is now active */
-                /*      reset ending CCWs and setup TIC CCWs              */
-                p_end->read2_nop2.cmd_code = CCW_CLAW_CMD_READFF;
-                p_end->read2_nop2.flags  = CCW_FLAG_SLI | CCW_FLAG_SKIP;
-                p_last->r_TIC_1.cda =(__u32)__pa(&p_end->read2_nop1);
-                p_last->r_TIC_2.cda =(__u32)__pa(&p_end->read2_nop1);
-                p_end->read2_nop2.cda=0;
-                p_end->read2_nop2.count=1;
-        }
-        else {
-                p_end->read1=0x01;  /* first ending CCW is now active */
-                /*      reset ending CCWs and setup TIC CCWs          */
-                p_end->read1_nop2.cmd_code = CCW_CLAW_CMD_READFF;
-                p_end->read1_nop2.flags  = CCW_FLAG_SLI | CCW_FLAG_SKIP;
-                p_last->r_TIC_1.cda = (__u32)__pa(&p_end->read1_nop1);
-                p_last->r_TIC_2.cda = (__u32)__pa(&p_end->read1_nop1);
-                p_end->read1_nop2.cda=0;
-                p_end->read1_nop2.count=1;
-        }
-
-        if ( privptr-> p_read_active_first ==NULL ) {
-               privptr->p_read_active_first = p_first;  /*  set new first */
-               privptr->p_read_active_last  = p_last;   /*  set new last  */
-        }
-        else {
-
-                /* set up TIC ccw  */
-                temp_ccw.cda= (__u32)__pa(&p_first->read);
-                temp_ccw.count=0;
-                temp_ccw.flags=0;
-                temp_ccw.cmd_code = CCW_CLAW_CMD_TIC;
-
-
-                if (p_end->read1) {
-
-               /* first set of CCW's is chained to the new read              */
-               /* chain, so the second set is chained to the active chain.   */
-               /* Therefore modify the second set to point to the new        */
-               /* read chain set up TIC CCWs                                 */
-               /* make sure we update the CCW so channel doesn't fetch it    */
-               /* when it's only half done                                   */
-                        memcpy( &p_end->read2_nop2, &temp_ccw ,
-                               sizeof(struct ccw1));
-                        privptr->p_read_active_last->r_TIC_1.cda=
-                               (__u32)__pa(&p_first->read);
-                        privptr->p_read_active_last->r_TIC_2.cda=
-                               (__u32)__pa(&p_first->read);
-                }
-                else {
-                        /* make sure we update the CCW so channel doesn't   */
-                       /* fetch it when it is only half done               */
-                        memcpy( &p_end->read1_nop2, &temp_ccw ,
-                               sizeof(struct ccw1));
-                        privptr->p_read_active_last->r_TIC_1.cda=
-                               (__u32)__pa(&p_first->read);
-                        privptr->p_read_active_last->r_TIC_2.cda=
-                               (__u32)__pa(&p_first->read);
-                }
-               /*      chain in new set of blocks                         */
-                privptr->p_read_active_last->next = p_first;
-                privptr->p_read_active_last=p_last;
-        } /* end of if ( privptr-> p_read_active_first ==NULL)  */
-       CLAW_DBF_TEXT(4, trace, "addexit");
-        return 0;
-}    /*     end of add_claw_reads   */
-
-/*-------------------------------------------------------------------*
- *   ccw_check_return_code                                           *
- *                                                                   *
- *-------------------------------------------------------------------*/
-
-static void
-ccw_check_return_code(struct ccw_device *cdev, int return_code)
-{
-       CLAW_DBF_TEXT(4, trace, "ccwret");
-        if (return_code != 0) {
-                switch (return_code) {
-               case -EBUSY: /* BUSY is a transient state no action needed */
-                       break;
-               case -ENODEV:
-                       dev_err(&cdev->dev, "The remote channel adapter is not"
-                               " available\n");
-                       break;
-               case -EINVAL:
-                       dev_err(&cdev->dev,
-                               "The status of the remote channel adapter"
-                               " is not valid\n");
-                       break;
-               default:
-                       dev_err(&cdev->dev, "The common device layer"
-                               " returned error code %d\n",
-                                 return_code);
-               }
-       }
-       CLAW_DBF_TEXT(4, trace, "ccwret");
-}    /*    end of ccw_check_return_code   */
-
-/*-------------------------------------------------------------------*
-*       ccw_check_unit_check                                         *
-*--------------------------------------------------------------------*/
-
-static void
-ccw_check_unit_check(struct chbk * p_ch, unsigned char sense )
-{
-       struct net_device *ndev = p_ch->ndev;
-       struct device *dev = &p_ch->cdev->dev;
-
-       CLAW_DBF_TEXT(4, trace, "unitchek");
-       dev_warn(dev, "The communication peer of %s disconnected\n",
-               ndev->name);
-
-       if (sense & 0x40) {
-               if (sense & 0x01) {
-                       dev_warn(dev, "The remote channel adapter for"
-                               " %s has been reset\n",
-                               ndev->name);
-               }
-       } else if (sense & 0x20) {
-               if (sense & 0x04) {
-                       dev_warn(dev, "A data streaming timeout occurred"
-                               " for %s\n",
-                               ndev->name);
-               } else if (sense & 0x10) {
-                       dev_warn(dev, "The remote channel adapter for %s"
-                               " is faulty\n",
-                               ndev->name);
-               } else {
-                       dev_warn(dev, "A data transfer parity error occurred"
-                               " for %s\n",
-                               ndev->name);
-               }
-       } else if (sense & 0x10) {
-               dev_warn(dev, "A read data parity error occurred"
-                       " for %s\n",
-                       ndev->name);
-       }
-
-}   /*    end of ccw_check_unit_check    */
-
-/*-------------------------------------------------------------------*
-*               find_link                                            *
-*--------------------------------------------------------------------*/
-static int
-find_link(struct net_device *dev, char *host_name, char *ws_name )
-{
-       struct claw_privbk *privptr;
-       struct claw_env *p_env;
-       int    rc=0;
-
-       CLAW_DBF_TEXT(2, setup, "findlink");
-       privptr = dev->ml_priv;
-        p_env=privptr->p_env;
-       switch (p_env->packing)
-       {
-               case  PACKING_ASK:
-                       if ((memcmp(WS_APPL_NAME_PACKED, host_name, 8)!=0) ||
-                           (memcmp(WS_APPL_NAME_PACKED, ws_name, 8)!=0 ))
-                            rc = EINVAL;
-                       break;
-               case  DO_PACKED:
-               case  PACK_SEND:
-                       if ((memcmp(WS_APPL_NAME_IP_NAME, host_name, 8)!=0) ||
-                           (memcmp(WS_APPL_NAME_IP_NAME, ws_name, 8)!=0 ))
-                               rc = EINVAL;
-                       break;
-               default:
-                       if ((memcmp(HOST_APPL_NAME, host_name, 8)!=0) ||
-                           (memcmp(p_env->api_type , ws_name, 8)!=0))
-                               rc = EINVAL;
-                       break;
-       }
-
-       return rc;
-}    /*    end of find_link    */
-
-/*-------------------------------------------------------------------*
- *   claw_hw_tx                                                      *
- *                                                                   *
- *                                                                   *
- *-------------------------------------------------------------------*/
-
-static int
-claw_hw_tx(struct sk_buff *skb, struct net_device *dev, long linkid)
-{
-        int                             rc=0;
-        struct claw_privbk             *privptr;
-        struct ccwbk           *p_this_ccw;
-        struct ccwbk           *p_first_ccw;
-        struct ccwbk           *p_last_ccw;
-        __u32                           numBuffers;
-        signed long                     len_of_data;
-        unsigned long                   bytesInThisBuffer;
-        unsigned char                   *pDataAddress;
-        struct endccw                   *pEnd;
-        struct ccw1                     tempCCW;
-       struct claw_env                 *p_env;
-       struct clawph                   *pk_head;
-       struct chbk                     *ch;
-
-       CLAW_DBF_TEXT(4, trace, "hw_tx");
-       privptr = (struct claw_privbk *)(dev->ml_priv);
-       p_env =privptr->p_env;
-       claw_free_wrt_buf(dev); /* Clean up free chain if posible */
-        /*  scan the write queue to free any completed write packets   */
-        p_first_ccw=NULL;
-        p_last_ccw=NULL;
-       if ((p_env->packing >= PACK_SEND) &&
-                   (skb->cb[1] != 'P')) {
-               skb_push(skb,sizeof(struct clawph));
-               pk_head=(struct clawph *)skb->data;
-               pk_head->len=skb->len-sizeof(struct clawph);
-               if (pk_head->len%4)  {
-                       pk_head->len+= 4-(pk_head->len%4);
-                       skb_pad(skb,4-(pk_head->len%4));
-                       skb_put(skb,4-(pk_head->len%4));
-               }
-               if (p_env->packing == DO_PACKED)
-                       pk_head->link_num = linkid;
-               else
-                       pk_head->link_num = 0;
-               pk_head->flag = 0x00;
-               skb_pad(skb,4);
-               skb->cb[1] = 'P';
-       }
-        if (linkid == 0) {
-               if (claw_check_busy(dev)) {
-                       if (privptr->write_free_count!=0) {
-                                claw_clear_busy(dev);
-                        }
-                        else {
-                                claw_strt_out_IO(dev );
-                                claw_free_wrt_buf( dev );
-                                if (privptr->write_free_count==0) {
-                                       ch = &privptr->channel[WRITE_CHANNEL];
-                                       atomic_inc(&skb->users);
-                                       skb_queue_tail(&ch->collect_queue, skb);
-                                       goto Done;
-                                }
-                                else {
-                                       claw_clear_busy(dev);
-                                }
-                        }
-                }
-                /*  tx lock  */
-                if (claw_test_and_setbit_busy(TB_TX,dev)) { /* set to busy */
-                       ch = &privptr->channel[WRITE_CHANNEL];
-                       atomic_inc(&skb->users);
-                       skb_queue_tail(&ch->collect_queue, skb);
-                        claw_strt_out_IO(dev );
-                        rc=-EBUSY;
-                        goto Done2;
-                }
-        }
-        /*      See how many write buffers are required to hold this data */
-       numBuffers = DIV_ROUND_UP(skb->len, privptr->p_env->write_size);
-
-        /*      If that number of buffers isn't available, give up for now */
-        if (privptr->write_free_count < numBuffers ||
-            privptr->p_write_free_chain == NULL ) {
-
-                claw_setbit_busy(TB_NOBUFFER,dev);
-               ch = &privptr->channel[WRITE_CHANNEL];
-               atomic_inc(&skb->users);
-               skb_queue_tail(&ch->collect_queue, skb);
-               CLAW_DBF_TEXT(2, trace, "clawbusy");
-                goto Done2;
-        }
-        pDataAddress=skb->data;
-        len_of_data=skb->len;
-
-        while (len_of_data > 0) {
-                p_this_ccw=privptr->p_write_free_chain;  /* get a block */
-               if (p_this_ccw == NULL) { /* lost the race */
-                       ch = &privptr->channel[WRITE_CHANNEL];
-                       atomic_inc(&skb->users);
-                       skb_queue_tail(&ch->collect_queue, skb);
-                       goto Done2;
-               }
-                privptr->p_write_free_chain=p_this_ccw->next;
-                p_this_ccw->next=NULL;
-                --privptr->write_free_count; /* -1 */
-               if (len_of_data >= privptr->p_env->write_size)
-                       bytesInThisBuffer = privptr->p_env->write_size;
-               else
-                       bytesInThisBuffer = len_of_data;
-                memcpy( p_this_ccw->p_buffer,pDataAddress, bytesInThisBuffer);
-                len_of_data-=bytesInThisBuffer;
-                pDataAddress+=(unsigned long)bytesInThisBuffer;
-                /*      setup write CCW         */
-                p_this_ccw->write.cmd_code = (linkid * 8) +1;
-                if (len_of_data>0) {
-                        p_this_ccw->write.cmd_code+=MORE_to_COME_FLAG;
-                }
-                p_this_ccw->write.count=bytesInThisBuffer;
-                /*      now add to end of this chain    */
-                if (p_first_ccw==NULL)    {
-                        p_first_ccw=p_this_ccw;
-                }
-                if (p_last_ccw!=NULL) {
-                        p_last_ccw->next=p_this_ccw;
-                        /*      set up TIC ccws         */
-                        p_last_ccw->w_TIC_1.cda=
-                               (__u32)__pa(&p_this_ccw->write);
-                }
-                p_last_ccw=p_this_ccw;      /* save new last block */
-        }
-
-        /*      FirstCCW and LastCCW now contain a new set of write channel
-        *       programs to append to the running channel program
-        */
-
-        if (p_first_ccw!=NULL) {
-               /*      setup ending ccw sequence for this segment           */
-                pEnd=privptr->p_end_ccw;
-                if (pEnd->write1) {
-                        pEnd->write1=0x00;   /* second end ccw is now active */
-                        /*      set up Tic CCWs         */
-                        p_last_ccw->w_TIC_1.cda=
-                               (__u32)__pa(&pEnd->write2_nop1);
-                        pEnd->write2_nop2.cmd_code = CCW_CLAW_CMD_READFF;
-                        pEnd->write2_nop2.flags    =
-                               CCW_FLAG_SLI | CCW_FLAG_SKIP;
-                        pEnd->write2_nop2.cda=0;
-                        pEnd->write2_nop2.count=1;
-                }
-                else {  /*  end of if (pEnd->write1)*/
-                        pEnd->write1=0x01;   /* first end ccw is now active */
-                        /*      set up Tic CCWs         */
-                        p_last_ccw->w_TIC_1.cda=
-                               (__u32)__pa(&pEnd->write1_nop1);
-                        pEnd->write1_nop2.cmd_code = CCW_CLAW_CMD_READFF;
-                        pEnd->write1_nop2.flags    =
-                               CCW_FLAG_SLI | CCW_FLAG_SKIP;
-                        pEnd->write1_nop2.cda=0;
-                        pEnd->write1_nop2.count=1;
-                }  /* end if if (pEnd->write1) */
-
-                if (privptr->p_write_active_first==NULL ) {
-                        privptr->p_write_active_first=p_first_ccw;
-                        privptr->p_write_active_last=p_last_ccw;
-                }
-                else {
-                        /*      set up Tic CCWs         */
-
-                        tempCCW.cda=(__u32)__pa(&p_first_ccw->write);
-                        tempCCW.count=0;
-                        tempCCW.flags=0;
-                        tempCCW.cmd_code=CCW_CLAW_CMD_TIC;
-
-                        if (pEnd->write1) {
-
-                 /*
-                 * first set of ending CCW's is chained to the new write
-                 * chain, so the second set is chained to the active chain
-                 * Therefore modify the second set to point the new write chain.
-                 * make sure we update the CCW atomically
-                 * so channel does not fetch it when it's only half done
-                 */
-                                memcpy( &pEnd->write2_nop2, &tempCCW ,
-                                       sizeof(struct ccw1));
-                                privptr->p_write_active_last->w_TIC_1.cda=
-                                       (__u32)__pa(&p_first_ccw->write);
-                        }
-                        else {
-
-                        /*make sure we update the CCW atomically
-                         *so channel does not fetch it when it's only half done
-                         */
-                                memcpy(&pEnd->write1_nop2, &tempCCW ,
-                                       sizeof(struct ccw1));
-                                privptr->p_write_active_last->w_TIC_1.cda=
-                                       (__u32)__pa(&p_first_ccw->write);
-
-                        } /* end if if (pEnd->write1) */
-
-                        privptr->p_write_active_last->next=p_first_ccw;
-                        privptr->p_write_active_last=p_last_ccw;
-                }
-
-        } /* endif (p_first_ccw!=NULL)  */
-        dev_kfree_skb_any(skb);
-        claw_strt_out_IO(dev );
-        /*      if write free count is zero , set NOBUFFER       */
-       if (privptr->write_free_count==0) {
-               claw_setbit_busy(TB_NOBUFFER,dev);
-        }
-Done2:
-       claw_clearbit_busy(TB_TX,dev);
-Done:
-       return(rc);
-}    /*    end of claw_hw_tx    */
-
-/*-------------------------------------------------------------------*
-*                                                                    *
-*     init_ccw_bk                                                    *
-*                                                                    *
-*--------------------------------------------------------------------*/
-
-static int
-init_ccw_bk(struct net_device *dev)
-{
-
-        __u32   ccw_blocks_required;
-        __u32   ccw_blocks_perpage;
-        __u32   ccw_pages_required;
-        __u32   claw_reads_perpage=1;
-        __u32   claw_read_pages;
-        __u32   claw_writes_perpage=1;
-        __u32   claw_write_pages;
-        void    *p_buff=NULL;
-        struct ccwbk*p_free_chain;
-       struct ccwbk*p_buf;
-       struct ccwbk*p_last_CCWB;
-       struct ccwbk*p_first_CCWB;
-        struct endccw *p_endccw=NULL;
-       addr_t  real_address;
-       struct claw_privbk *privptr = dev->ml_priv;
-        struct clawh *pClawH=NULL;
-        addr_t   real_TIC_address;
-        int i,j;
-       CLAW_DBF_TEXT(4, trace, "init_ccw");
-
-        /*  initialize  statistics field */
-        privptr->active_link_ID=0;
-        /*  initialize  ccwbk pointers  */
-        privptr->p_write_free_chain=NULL;   /* pointer to free ccw chain*/
-        privptr->p_write_active_first=NULL; /* pointer to the first write ccw*/
-        privptr->p_write_active_last=NULL;  /* pointer to the last write ccw*/
-        privptr->p_read_active_first=NULL;  /* pointer to the first read ccw*/
-        privptr->p_read_active_last=NULL;   /* pointer to the last read ccw */
-        privptr->p_end_ccw=NULL;            /* pointer to ending ccw        */
-        privptr->p_claw_signal_blk=NULL;    /* pointer to signal block      */
-       privptr->buffs_alloc = 0;
-        memset(&privptr->end_ccw, 0x00, sizeof(struct endccw));
-        memset(&privptr->ctl_bk, 0x00, sizeof(struct clawctl));
-        /*  initialize  free write ccwbk counter  */
-        privptr->write_free_count=0;  /* number of free bufs on write chain */
-        p_last_CCWB = NULL;
-        p_first_CCWB= NULL;
-        /*
-        *  We need 1 CCW block for each read buffer, 1 for each
-        *  write buffer, plus 1 for ClawSignalBlock
-        */
-        ccw_blocks_required =
-               privptr->p_env->read_buffers+privptr->p_env->write_buffers+1;
-        /*
-        * compute number of CCW blocks that will fit in a page
-        */
-        ccw_blocks_perpage= PAGE_SIZE /  CCWBK_SIZE;
-        ccw_pages_required=
-               DIV_ROUND_UP(ccw_blocks_required, ccw_blocks_perpage);
-
-        /*
-         *  read and write sizes are set by 2 constants in claw.h
-        *  4k and 32k.  Unpacked values other than 4k are not going to
-        * provide good performance. With packing buffers support 32k
-        * buffers are used.
-         */
-       if (privptr->p_env->read_size < PAGE_SIZE) {
-               claw_reads_perpage = PAGE_SIZE / privptr->p_env->read_size;
-               claw_read_pages = DIV_ROUND_UP(privptr->p_env->read_buffers,
-                                               claw_reads_perpage);
-         }
-         else {       /* > or equal  */
-               privptr->p_buff_pages_perread =
-                       DIV_ROUND_UP(privptr->p_env->read_size, PAGE_SIZE);
-               claw_read_pages = privptr->p_env->read_buffers *
-                                       privptr->p_buff_pages_perread;
-         }
-        if (privptr->p_env->write_size < PAGE_SIZE) {
-               claw_writes_perpage =
-                       PAGE_SIZE / privptr->p_env->write_size;
-               claw_write_pages = DIV_ROUND_UP(privptr->p_env->write_buffers,
-                                               claw_writes_perpage);
-
-        }
-        else {      /* >  or equal  */
-               privptr->p_buff_pages_perwrite =
-                       DIV_ROUND_UP(privptr->p_env->read_size, PAGE_SIZE);
-               claw_write_pages = privptr->p_env->write_buffers *
-                                       privptr->p_buff_pages_perwrite;
-        }
-        /*
-        *               allocate ccw_pages_required
-        */
-        if (privptr->p_buff_ccw==NULL) {
-                privptr->p_buff_ccw=
-                       (void *)__get_free_pages(__GFP_DMA,
-                       (int)pages_to_order_of_mag(ccw_pages_required ));
-                if (privptr->p_buff_ccw==NULL) {
-                        return -ENOMEM;
-                }
-                privptr->p_buff_ccw_num=ccw_pages_required;
-        }
-        memset(privptr->p_buff_ccw, 0x00,
-               privptr->p_buff_ccw_num * PAGE_SIZE);
-
-        /*
-        *               obtain ending ccw block address
-        *
-        */
-        privptr->p_end_ccw = (struct endccw *)&privptr->end_ccw;
-        real_address  = (__u32)__pa(privptr->p_end_ccw);
-        /*                              Initialize ending CCW block       */
-        p_endccw=privptr->p_end_ccw;
-        p_endccw->real=real_address;
-        p_endccw->write1=0x00;
-        p_endccw->read1=0x00;
-
-        /*      write1_nop1                                     */
-        p_endccw->write1_nop1.cmd_code = CCW_CLAW_CMD_NOP;
-        p_endccw->write1_nop1.flags       = CCW_FLAG_SLI | CCW_FLAG_CC;
-        p_endccw->write1_nop1.count       = 1;
-        p_endccw->write1_nop1.cda         = 0;
-
-        /*      write1_nop2                                     */
-        p_endccw->write1_nop2.cmd_code = CCW_CLAW_CMD_READFF;
-        p_endccw->write1_nop2.flags        = CCW_FLAG_SLI | CCW_FLAG_SKIP;
-        p_endccw->write1_nop2.count      = 1;
-        p_endccw->write1_nop2.cda        = 0;
-
-        /*      write2_nop1                                     */
-        p_endccw->write2_nop1.cmd_code = CCW_CLAW_CMD_NOP;
-        p_endccw->write2_nop1.flags        = CCW_FLAG_SLI | CCW_FLAG_CC;
-        p_endccw->write2_nop1.count        = 1;
-        p_endccw->write2_nop1.cda          = 0;
-
-        /*      write2_nop2                                     */
-        p_endccw->write2_nop2.cmd_code = CCW_CLAW_CMD_READFF;
-        p_endccw->write2_nop2.flags        = CCW_FLAG_SLI | CCW_FLAG_SKIP;
-        p_endccw->write2_nop2.count        = 1;
-        p_endccw->write2_nop2.cda          = 0;
-
-        /*      read1_nop1                                      */
-        p_endccw->read1_nop1.cmd_code = CCW_CLAW_CMD_NOP;
-        p_endccw->read1_nop1.flags        = CCW_FLAG_SLI | CCW_FLAG_CC;
-        p_endccw->read1_nop1.count        = 1;
-        p_endccw->read1_nop1.cda          = 0;
-
-        /*      read1_nop2                                      */
-        p_endccw->read1_nop2.cmd_code = CCW_CLAW_CMD_READFF;
-        p_endccw->read1_nop2.flags        = CCW_FLAG_SLI | CCW_FLAG_SKIP;
-        p_endccw->read1_nop2.count        = 1;
-        p_endccw->read1_nop2.cda          = 0;
-
-        /*      read2_nop1                                      */
-        p_endccw->read2_nop1.cmd_code = CCW_CLAW_CMD_NOP;
-        p_endccw->read2_nop1.flags        = CCW_FLAG_SLI | CCW_FLAG_CC;
-        p_endccw->read2_nop1.count        = 1;
-        p_endccw->read2_nop1.cda          = 0;
-
-        /*      read2_nop2                                      */
-        p_endccw->read2_nop2.cmd_code = CCW_CLAW_CMD_READFF;
-        p_endccw->read2_nop2.flags        = CCW_FLAG_SLI | CCW_FLAG_SKIP;
-        p_endccw->read2_nop2.count        = 1;
-        p_endccw->read2_nop2.cda          = 0;
-
-        /*
-        *                               Build a chain of CCWs
-        *
-        */
-        p_buff=privptr->p_buff_ccw;
-
-        p_free_chain=NULL;
-        for (i=0 ; i < ccw_pages_required; i++ ) {
-                real_address  = (__u32)__pa(p_buff);
-                p_buf=p_buff;
-                for (j=0 ; j < ccw_blocks_perpage ; j++) {
-                        p_buf->next  = p_free_chain;
-                        p_free_chain = p_buf;
-                        p_buf->real=(__u32)__pa(p_buf);
-                        ++p_buf;
-                }
-                p_buff+=PAGE_SIZE;
-        }
-        /*
-        *                               Initialize ClawSignalBlock
-        *
-        */
-        if (privptr->p_claw_signal_blk==NULL) {
-                privptr->p_claw_signal_blk=p_free_chain;
-                p_free_chain=p_free_chain->next;
-                pClawH=(struct clawh *)privptr->p_claw_signal_blk;
-                pClawH->length=0xffff;
-                pClawH->opcode=0xff;
-                pClawH->flag=CLAW_BUSY;
-        }
-
-        /*
-        *               allocate write_pages_required and add to free chain
-        */
-        if (privptr->p_buff_write==NULL) {
-            if (privptr->p_env->write_size < PAGE_SIZE) {
-                privptr->p_buff_write=
-                       (void *)__get_free_pages(__GFP_DMA,
-                       (int)pages_to_order_of_mag(claw_write_pages ));
-                if (privptr->p_buff_write==NULL) {
-                        privptr->p_buff_ccw=NULL;
-                        return -ENOMEM;
-                }
-                /*
-                *                               Build CLAW write free chain
-                *
-                */
-
-                memset(privptr->p_buff_write, 0x00,
-                       ccw_pages_required * PAGE_SIZE);
-                privptr->p_write_free_chain=NULL;
-
-                p_buff=privptr->p_buff_write;
-
-                for (i=0 ; i< privptr->p_env->write_buffers ; i++) {
-                        p_buf        = p_free_chain;      /*  get a CCW */
-                        p_free_chain = p_buf->next;
-                        p_buf->next  =privptr->p_write_free_chain;
-                        privptr->p_write_free_chain = p_buf;
-                        p_buf-> p_buffer       = (struct clawbuf *)p_buff;
-                        p_buf-> write.cda       = (__u32)__pa(p_buff);
-                        p_buf-> write.flags     = CCW_FLAG_SLI | CCW_FLAG_CC;
-                        p_buf-> w_read_FF.cmd_code = CCW_CLAW_CMD_READFF;
-                        p_buf-> w_read_FF.flags   = CCW_FLAG_SLI | CCW_FLAG_CC;
-                        p_buf-> w_read_FF.count   = 1;
-                        p_buf-> w_read_FF.cda     =
-                               (__u32)__pa(&p_buf-> header.flag);
-                        p_buf-> w_TIC_1.cmd_code = CCW_CLAW_CMD_TIC;
-                        p_buf-> w_TIC_1.flags      = 0;
-                        p_buf-> w_TIC_1.count      = 0;
-
-                       if (((unsigned long)p_buff +
-                                           privptr->p_env->write_size) >=
-                          ((unsigned long)(p_buff+2*
-                           (privptr->p_env->write_size) - 1) & PAGE_MASK)) {
-                               p_buff = p_buff+privptr->p_env->write_size;
-                        }
-                }
-           }
-           else      /*  Buffers are => PAGE_SIZE. 1 buff per get_free_pages */
-           {
-               privptr->p_write_free_chain=NULL;
-               for (i = 0; i< privptr->p_env->write_buffers ; i++) {
-                   p_buff=(void *)__get_free_pages(__GFP_DMA,
-                       (int)pages_to_order_of_mag(
-                       privptr->p_buff_pages_perwrite) );
-                   if (p_buff==NULL) {
-                        free_pages((unsigned long)privptr->p_buff_ccw,
-                             (int)pages_to_order_of_mag(
-                                       privptr->p_buff_ccw_num));
-                        privptr->p_buff_ccw=NULL;
-                       p_buf=privptr->p_buff_write;
-                        while (p_buf!=NULL) {
-                                free_pages((unsigned long)
-                                       p_buf->p_buffer,
-                                       (int)pages_to_order_of_mag(
-                                       privptr->p_buff_pages_perwrite));
-                                p_buf=p_buf->next;
-                        }
-                        return -ENOMEM;
-                   }  /* Error on get_pages   */
-                   memset(p_buff, 0x00, privptr->p_env->write_size );
-                   p_buf         = p_free_chain;
-                   p_free_chain  = p_buf->next;
-                   p_buf->next   = privptr->p_write_free_chain;
-                   privptr->p_write_free_chain = p_buf;
-                   privptr->p_buff_write = p_buf;
-                   p_buf->p_buffer=(struct clawbuf *)p_buff;
-                   p_buf-> write.cda     = (__u32)__pa(p_buff);
-                   p_buf-> write.flags   = CCW_FLAG_SLI | CCW_FLAG_CC;
-                   p_buf-> w_read_FF.cmd_code = CCW_CLAW_CMD_READFF;
-                   p_buf-> w_read_FF.flags    = CCW_FLAG_SLI | CCW_FLAG_CC;
-                   p_buf-> w_read_FF.count    = 1;
-                   p_buf-> w_read_FF.cda      =
-                       (__u32)__pa(&p_buf-> header.flag);
-                   p_buf-> w_TIC_1.cmd_code = CCW_CLAW_CMD_TIC;
-                   p_buf-> w_TIC_1.flags   = 0;
-                   p_buf-> w_TIC_1.count   = 0;
-               }  /* for all write_buffers   */
-
-           }    /* else buffers are PAGE_SIZE or bigger */
-
-        }
-        privptr->p_buff_write_num=claw_write_pages;
-        privptr->write_free_count=privptr->p_env->write_buffers;
-
-
-        /*
-        *               allocate read_pages_required and chain to free chain
-        */
-        if (privptr->p_buff_read==NULL) {
-            if (privptr->p_env->read_size < PAGE_SIZE)  {
-                privptr->p_buff_read=
-                       (void *)__get_free_pages(__GFP_DMA,
-                       (int)pages_to_order_of_mag(claw_read_pages) );
-                if (privptr->p_buff_read==NULL) {
-                        free_pages((unsigned long)privptr->p_buff_ccw,
-                               (int)pages_to_order_of_mag(
-                                       privptr->p_buff_ccw_num));
-                       /* free the write pages size is < page size  */
-                        free_pages((unsigned long)privptr->p_buff_write,
-                               (int)pages_to_order_of_mag(
-                               privptr->p_buff_write_num));
-                        privptr->p_buff_ccw=NULL;
-                        privptr->p_buff_write=NULL;
-                        return -ENOMEM;
-                }
-                memset(privptr->p_buff_read, 0x00, claw_read_pages * PAGE_SIZE);
-                privptr->p_buff_read_num=claw_read_pages;
-                /*
-                *                               Build CLAW read free chain
-                *
-                */
-                p_buff=privptr->p_buff_read;
-                for (i=0 ; i< privptr->p_env->read_buffers ; i++) {
-                        p_buf        = p_free_chain;
-                        p_free_chain = p_buf->next;
-
-                        if (p_last_CCWB==NULL) {
-                                p_buf->next=NULL;
-                                real_TIC_address=0;
-                                p_last_CCWB=p_buf;
-                        }
-                        else {
-                                p_buf->next=p_first_CCWB;
-                                real_TIC_address=
-                               (__u32)__pa(&p_first_CCWB -> read );
-                        }
-
-                        p_first_CCWB=p_buf;
-
-                        p_buf->p_buffer=(struct clawbuf *)p_buff;
-                        /*  initialize read command */
-                        p_buf-> read.cmd_code = CCW_CLAW_CMD_READ;
-                        p_buf-> read.cda = (__u32)__pa(p_buff);
-                        p_buf-> read.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
-                        p_buf-> read.count       = privptr->p_env->read_size;
-
-                        /*  initialize read_h command */
-                        p_buf-> read_h.cmd_code = CCW_CLAW_CMD_READHEADER;
-                        p_buf-> read_h.cda =
-                               (__u32)__pa(&(p_buf->header));
-                        p_buf-> read_h.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
-                        p_buf-> read_h.count      = sizeof(struct clawh);
-
-                        /*  initialize Signal command */
-                        p_buf-> signal.cmd_code = CCW_CLAW_CMD_SIGNAL_SMOD;
-                        p_buf-> signal.cda =
-                               (__u32)__pa(&(pClawH->flag));
-                        p_buf-> signal.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
-                        p_buf-> signal.count     = 1;
-
-                        /*  initialize r_TIC_1 command */
-                        p_buf-> r_TIC_1.cmd_code = CCW_CLAW_CMD_TIC;
-                        p_buf-> r_TIC_1.cda = (__u32)real_TIC_address;
-                        p_buf-> r_TIC_1.flags = 0;
-                        p_buf-> r_TIC_1.count      = 0;
-
-                        /*  initialize r_read_FF command */
-                        p_buf-> r_read_FF.cmd_code = CCW_CLAW_CMD_READFF;
-                        p_buf-> r_read_FF.cda =
-                               (__u32)__pa(&(pClawH->flag));
-                        p_buf-> r_read_FF.flags =
-                               CCW_FLAG_SLI | CCW_FLAG_CC | CCW_FLAG_PCI;
-                        p_buf-> r_read_FF.count    = 1;
-
-                        /*    initialize r_TIC_2          */
-                        memcpy(&p_buf->r_TIC_2,
-                               &p_buf->r_TIC_1, sizeof(struct ccw1));
-
-                        /*     initialize Header     */
-                        p_buf->header.length=0xffff;
-                        p_buf->header.opcode=0xff;
-                        p_buf->header.flag=CLAW_PENDING;
-
-                       if (((unsigned long)p_buff+privptr->p_env->read_size) >=
-                         ((unsigned long)(p_buff+2*(privptr->p_env->read_size)
-                                -1)
-                          & PAGE_MASK)) {
-                                p_buff= p_buff+privptr->p_env->read_size;
-                        }
-                        else {
-                                p_buff=
-                               (void *)((unsigned long)
-                                       (p_buff+2*(privptr->p_env->read_size)-1)
-                                        & PAGE_MASK) ;
-                        }
-                }   /* for read_buffers   */
-          }         /* read_size < PAGE_SIZE  */
-          else {  /* read Size >= PAGE_SIZE  */
-                for (i=0 ; i< privptr->p_env->read_buffers ; i++) {
-                        p_buff = (void *)__get_free_pages(__GFP_DMA,
-                               (int)pages_to_order_of_mag(
-                                       privptr->p_buff_pages_perread));
-                        if (p_buff==NULL) {
-                                free_pages((unsigned long)privptr->p_buff_ccw,
-                                       (int)pages_to_order_of_mag(privptr->
-                                       p_buff_ccw_num));
-                               /* free the write pages  */
-                               p_buf=privptr->p_buff_write;
-                                while (p_buf!=NULL) {
-                                       free_pages(
-                                           (unsigned long)p_buf->p_buffer,
-                                           (int)pages_to_order_of_mag(
-                                           privptr->p_buff_pages_perwrite));
-                                        p_buf=p_buf->next;
-                                }
-                               /* free any read pages already alloc  */
-                               p_buf=privptr->p_buff_read;
-                                while (p_buf!=NULL) {
-                                       free_pages(
-                                           (unsigned long)p_buf->p_buffer,
-                                           (int)pages_to_order_of_mag(
-                                            privptr->p_buff_pages_perread));
-                                        p_buf=p_buf->next;
-                                }
-                                privptr->p_buff_ccw=NULL;
-                                privptr->p_buff_write=NULL;
-                                return -ENOMEM;
-                        }
-                        memset(p_buff, 0x00, privptr->p_env->read_size);
-                        p_buf        = p_free_chain;
-                        privptr->p_buff_read = p_buf;
-                        p_free_chain = p_buf->next;
-
-                        if (p_last_CCWB==NULL) {
-                                p_buf->next=NULL;
-                                real_TIC_address=0;
-                                p_last_CCWB=p_buf;
-                        }
-                        else {
-                                p_buf->next=p_first_CCWB;
-                                real_TIC_address=
-                                       (addr_t)__pa(
-                                               &p_first_CCWB -> read );
-                        }
-
-                        p_first_CCWB=p_buf;
-                               /* save buff address */
-                        p_buf->p_buffer=(struct clawbuf *)p_buff;
-                        /*  initialize read command */
-                        p_buf-> read.cmd_code = CCW_CLAW_CMD_READ;
-                        p_buf-> read.cda = (__u32)__pa(p_buff);
-                        p_buf-> read.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
-                        p_buf-> read.count       = privptr->p_env->read_size;
-
-                        /*  initialize read_h command */
-                        p_buf-> read_h.cmd_code = CCW_CLAW_CMD_READHEADER;
-                        p_buf-> read_h.cda =
-                               (__u32)__pa(&(p_buf->header));
-                        p_buf-> read_h.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
-                        p_buf-> read_h.count      = sizeof(struct clawh);
-
-                        /*  initialize Signal command */
-                        p_buf-> signal.cmd_code = CCW_CLAW_CMD_SIGNAL_SMOD;
-                        p_buf-> signal.cda =
-                               (__u32)__pa(&(pClawH->flag));
-                        p_buf-> signal.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
-                        p_buf-> signal.count     = 1;
-
-                        /*  initialize r_TIC_1 command */
-                        p_buf-> r_TIC_1.cmd_code = CCW_CLAW_CMD_TIC;
-                        p_buf-> r_TIC_1.cda = (__u32)real_TIC_address;
-                        p_buf-> r_TIC_1.flags = 0;
-                        p_buf-> r_TIC_1.count      = 0;
-
-                        /*  initialize r_read_FF command */
-                        p_buf-> r_read_FF.cmd_code = CCW_CLAW_CMD_READFF;
-                        p_buf-> r_read_FF.cda =
-                               (__u32)__pa(&(pClawH->flag));
-                        p_buf-> r_read_FF.flags =
-                               CCW_FLAG_SLI | CCW_FLAG_CC | CCW_FLAG_PCI;
-                        p_buf-> r_read_FF.count    = 1;
-
-                        /*    initialize r_TIC_2          */
-                        memcpy(&p_buf->r_TIC_2, &p_buf->r_TIC_1,
-                               sizeof(struct ccw1));
-
-                        /*     initialize Header     */
-                        p_buf->header.length=0xffff;
-                        p_buf->header.opcode=0xff;
-                        p_buf->header.flag=CLAW_PENDING;
-
-                }    /* For read_buffers   */
-          }     /*  read_size >= PAGE_SIZE   */
-        }       /*  pBuffread = NULL */
-        add_claw_reads( dev  ,p_first_CCWB , p_last_CCWB);
-       privptr->buffs_alloc = 1;
-
-        return 0;
-}    /*    end of init_ccw_bk */
-
-/*-------------------------------------------------------------------*
-*                                                                    *
-*       probe_error                                                  *
-*                                                                    *
-*--------------------------------------------------------------------*/
-
-static void
-probe_error( struct ccwgroup_device *cgdev)
-{
-       struct claw_privbk *privptr;
-
-       CLAW_DBF_TEXT(4, trace, "proberr");
-       privptr = dev_get_drvdata(&cgdev->dev);
-       if (privptr != NULL) {
-               dev_set_drvdata(&cgdev->dev, NULL);
-               kfree(privptr->p_env);
-               kfree(privptr->p_mtc_envelope);
-               kfree(privptr);
-       }
-}    /*    probe_error    */
-
-/*-------------------------------------------------------------------*
-*    claw_process_control                                            *
-*                                                                    *
-*                                                                    *
-*--------------------------------------------------------------------*/
-
-static int
-claw_process_control( struct net_device *dev, struct ccwbk * p_ccw)
-{
-
-        struct clawbuf *p_buf;
-        struct clawctl  ctlbk;
-        struct clawctl *p_ctlbk;
-        char    temp_host_name[8];
-        char    temp_ws_name[8];
-        struct claw_privbk *privptr;
-        struct claw_env *p_env;
-        struct sysval *p_sysval;
-        struct conncmd *p_connect=NULL;
-        int rc;
-        struct chbk *p_ch = NULL;
-       struct device *tdev;
-       CLAW_DBF_TEXT(2, setup, "clw_cntl");
-        udelay(1000);  /* Wait a ms for the control packets to
-                       *catch up to each other */
-       privptr = dev->ml_priv;
-        p_env=privptr->p_env;
-       tdev = &privptr->channel[READ_CHANNEL].cdev->dev;
-       memcpy( &temp_host_name, p_env->host_name, 8);
-        memcpy( &temp_ws_name, p_env->adapter_name , 8);
-       dev_info(tdev, "%s: CLAW device %.8s: "
-               "Received Control Packet\n",
-               dev->name, temp_ws_name);
-        if (privptr->release_pend==1) {
-                return 0;
-        }
-        p_buf=p_ccw->p_buffer;
-        p_ctlbk=&ctlbk;
-       if (p_env->packing == DO_PACKED) { /* packing in progress?*/
-               memcpy(p_ctlbk, &p_buf->buffer[4], sizeof(struct clawctl));
-       } else {
-               memcpy(p_ctlbk, p_buf, sizeof(struct clawctl));
-       }
-        switch (p_ctlbk->command)
-        {
-       case SYSTEM_VALIDATE_REQUEST:
-               if (p_ctlbk->version != CLAW_VERSION_ID) {
-                       claw_snd_sys_validate_rsp(dev, p_ctlbk,
-                               CLAW_RC_WRONG_VERSION);
-                       dev_warn(tdev, "The communication peer of %s"
-                               " uses an incorrect API version %d\n",
-                               dev->name, p_ctlbk->version);
-               }
-               p_sysval = (struct sysval *)&(p_ctlbk->data);
-               dev_info(tdev, "%s: Recv Sys Validate Request: "
-                       "Vers=%d,link_id=%d,Corr=%d,WS name=%.8s,"
-                       "Host name=%.8s\n",
-                       dev->name, p_ctlbk->version,
-                       p_ctlbk->linkid,
-                       p_ctlbk->correlator,
-                       p_sysval->WS_name,
-                       p_sysval->host_name);
-               if (memcmp(temp_host_name, p_sysval->host_name, 8)) {
-                       claw_snd_sys_validate_rsp(dev, p_ctlbk,
-                               CLAW_RC_NAME_MISMATCH);
-                       CLAW_DBF_TEXT(2, setup, "HSTBAD");
-                       CLAW_DBF_TEXT_(2, setup, "%s", p_sysval->host_name);
-                       CLAW_DBF_TEXT_(2, setup, "%s", temp_host_name);
-                       dev_warn(tdev,
-                               "Host name %s for %s does not match the"
-                               " remote adapter name %s\n",
-                               p_sysval->host_name,
-                               dev->name,
-                               temp_host_name);
-               }
-               if (memcmp(temp_ws_name, p_sysval->WS_name, 8)) {
-                       claw_snd_sys_validate_rsp(dev, p_ctlbk,
-                               CLAW_RC_NAME_MISMATCH);
-                       CLAW_DBF_TEXT(2, setup, "WSNBAD");
-                       CLAW_DBF_TEXT_(2, setup, "%s", p_sysval->WS_name);
-                       CLAW_DBF_TEXT_(2, setup, "%s", temp_ws_name);
-                       dev_warn(tdev, "Adapter name %s for %s does not match"
-                               " the remote host name %s\n",
-                               p_sysval->WS_name,
-                               dev->name,
-                               temp_ws_name);
-               }
-               if ((p_sysval->write_frame_size < p_env->write_size) &&
-                   (p_env->packing == 0)) {
-                       claw_snd_sys_validate_rsp(dev, p_ctlbk,
-                               CLAW_RC_HOST_RCV_TOO_SMALL);
-                       dev_warn(tdev,
-                               "The local write buffer is smaller than the"
-                               " remote read buffer\n");
-                       CLAW_DBF_TEXT(2, setup, "wrtszbad");
-               }
-               if ((p_sysval->read_frame_size < p_env->read_size) &&
-                   (p_env->packing == 0)) {
-                       claw_snd_sys_validate_rsp(dev, p_ctlbk,
-                               CLAW_RC_HOST_RCV_TOO_SMALL);
-                       dev_warn(tdev,
-                               "The local read buffer is smaller than the"
-                               " remote write buffer\n");
-                       CLAW_DBF_TEXT(2, setup, "rdsizbad");
-               }
-               claw_snd_sys_validate_rsp(dev, p_ctlbk, 0);
-               dev_info(tdev,
-                       "CLAW device %.8s: System validate"
-                       " completed.\n", temp_ws_name);
-               dev_info(tdev,
-                       "%s: sys Validate Rsize:%d Wsize:%d\n",
-                       dev->name, p_sysval->read_frame_size,
-                       p_sysval->write_frame_size);
-               privptr->system_validate_comp = 1;
-               if (strncmp(p_env->api_type, WS_APPL_NAME_PACKED, 6) == 0)
-                       p_env->packing = PACKING_ASK;
-               claw_strt_conn_req(dev);
-               break;
-       case SYSTEM_VALIDATE_RESPONSE:
-               p_sysval = (struct sysval *)&(p_ctlbk->data);
-               dev_info(tdev,
-                       "Settings for %s validated (version=%d, "
-                       "remote device=%d, rc=%d, adapter name=%.8s, "
-                       "host name=%.8s)\n",
-                       dev->name,
-                       p_ctlbk->version,
-                       p_ctlbk->correlator,
-                       p_ctlbk->rc,
-                       p_sysval->WS_name,
-                       p_sysval->host_name);
-               switch (p_ctlbk->rc) {
-               case 0:
-                       dev_info(tdev, "%s: CLAW device "
-                               "%.8s: System validate completed.\n",
-                               dev->name, temp_ws_name);
-                       if (privptr->system_validate_comp == 0)
-                               claw_strt_conn_req(dev);
-                       privptr->system_validate_comp = 1;
-                       break;
-               case CLAW_RC_NAME_MISMATCH:
-                       dev_warn(tdev, "Validating %s failed because of"
-                               " a host or adapter name mismatch\n",
-                               dev->name);
-                       break;
-               case CLAW_RC_WRONG_VERSION:
-                       dev_warn(tdev, "Validating %s failed because of a"
-                               " version conflict\n",
-                               dev->name);
-                       break;
-               case CLAW_RC_HOST_RCV_TOO_SMALL:
-                       dev_warn(tdev, "Validating %s failed because of a"
-                               " frame size conflict\n",
-                               dev->name);
-                       break;
-               default:
-                       dev_warn(tdev, "The communication peer of %s rejected"
-                               " the connection\n",
-                                dev->name);
-                       break;
-               }
-               break;
-
-       case CONNECTION_REQUEST:
-               p_connect = (struct conncmd *)&(p_ctlbk->data);
-               dev_info(tdev, "%s: Recv Conn Req: Vers=%d,link_id=%d,"
-                       "Corr=%d,HOST appl=%.8s,WS appl=%.8s\n",
-                       dev->name,
-                       p_ctlbk->version,
-                       p_ctlbk->linkid,
-                       p_ctlbk->correlator,
-                       p_connect->host_name,
-                       p_connect->WS_name);
-               if (privptr->active_link_ID != 0) {
-                       claw_snd_disc(dev, p_ctlbk);
-                       dev_info(tdev, "%s rejected a connection request"
-                               " because it is already active\n",
-                               dev->name);
-               }
-               if (p_ctlbk->linkid != 1) {
-                       claw_snd_disc(dev, p_ctlbk);
-                       dev_info(tdev, "%s rejected a request to open multiple"
-                               " connections\n",
-                               dev->name);
-               }
-               rc = find_link(dev, p_connect->host_name, p_connect->WS_name);
-               if (rc != 0) {
-                       claw_snd_disc(dev, p_ctlbk);
-                       dev_info(tdev, "%s rejected a connection request"
-                               " because of a type mismatch\n",
-                               dev->name);
-               }
-               claw_send_control(dev,
-                       CONNECTION_CONFIRM, p_ctlbk->linkid,
-                       p_ctlbk->correlator,
-                       0, p_connect->host_name,
-                       p_connect->WS_name);
-               if (p_env->packing == PACKING_ASK) {
-                       p_env->packing = PACK_SEND;
-                       claw_snd_conn_req(dev, 0);
-               }
-               dev_info(tdev, "%s: CLAW device %.8s: Connection "
-                       "completed link_id=%d.\n",
-                       dev->name, temp_ws_name,
-                       p_ctlbk->linkid);
-                       privptr->active_link_ID = p_ctlbk->linkid;
-                       p_ch = &privptr->channel[WRITE_CHANNEL];
-                       wake_up(&p_ch->wait);  /* wake up claw_open ( WRITE) */
-               break;
-       case CONNECTION_RESPONSE:
-               p_connect = (struct conncmd *)&(p_ctlbk->data);
-               dev_info(tdev, "%s: Recv Conn Resp: Vers=%d,link_id=%d,"
-                       "Corr=%d,RC=%d,Host appl=%.8s, WS appl=%.8s\n",
-                       dev->name,
-                       p_ctlbk->version,
-                       p_ctlbk->linkid,
-                       p_ctlbk->correlator,
-                       p_ctlbk->rc,
-                       p_connect->host_name,
-                       p_connect->WS_name);
-
-               if (p_ctlbk->rc != 0) {
-                       dev_warn(tdev, "The communication peer of %s rejected"
-                               " a connection request\n",
-                               dev->name);
-                       return 1;
-               }
-               rc = find_link(dev,
-                       p_connect->host_name, p_connect->WS_name);
-               if (rc != 0) {
-                       claw_snd_disc(dev, p_ctlbk);
-                       dev_warn(tdev, "The communication peer of %s"
-                               " rejected a connection "
-                               "request because of a type mismatch\n",
-                                dev->name);
-               }
-               /* should be until CONNECTION_CONFIRM */
-               privptr->active_link_ID = -(p_ctlbk->linkid);
-               break;
-       case CONNECTION_CONFIRM:
-               p_connect = (struct conncmd *)&(p_ctlbk->data);
-               dev_info(tdev,
-                       "%s: Recv Conn Confirm:Vers=%d,link_id=%d,"
-                       "Corr=%d,Host appl=%.8s,WS appl=%.8s\n",
-                       dev->name,
-                       p_ctlbk->version,
-                       p_ctlbk->linkid,
-                       p_ctlbk->correlator,
-                       p_connect->host_name,
-                       p_connect->WS_name);
-               if (p_ctlbk->linkid == -(privptr->active_link_ID)) {
-                       privptr->active_link_ID = p_ctlbk->linkid;
-                       if (p_env->packing > PACKING_ASK) {
-                               dev_info(tdev,
-                               "%s: Confirmed Now packing\n", dev->name);
-                               p_env->packing = DO_PACKED;
-                       }
-                       p_ch = &privptr->channel[WRITE_CHANNEL];
-                       wake_up(&p_ch->wait);
-               } else {
-                       dev_warn(tdev, "Activating %s failed because of"
-                               " an incorrect link ID=%d\n",
-                               dev->name, p_ctlbk->linkid);
-                       claw_snd_disc(dev, p_ctlbk);
-               }
-               break;
-       case DISCONNECT:
-               dev_info(tdev, "%s: Disconnect: "
-                       "Vers=%d,link_id=%d,Corr=%d\n",
-                       dev->name, p_ctlbk->version,
-                       p_ctlbk->linkid, p_ctlbk->correlator);
-               if ((p_ctlbk->linkid == 2) &&
-                   (p_env->packing == PACK_SEND)) {
-                       privptr->active_link_ID = 1;
-                       p_env->packing = DO_PACKED;
-               } else
-                       privptr->active_link_ID = 0;
-               break;
-       case CLAW_ERROR:
-               dev_warn(tdev, "The communication peer of %s failed\n",
-                       dev->name);
-               break;
-       default:
-               dev_warn(tdev, "The communication peer of %s sent"
-                       " an unknown command code\n",
-                       dev->name);
-               break;
-        }
-
-        return 0;
-}   /*    end of claw_process_control    */
-
-
-/*-------------------------------------------------------------------*
-*               claw_send_control                                    *
-*                                                                    *
-*--------------------------------------------------------------------*/
-
-static int
-claw_send_control(struct net_device *dev, __u8 type, __u8 link,
-        __u8 correlator, __u8 rc, char *local_name, char *remote_name)
-{
-        struct claw_privbk             *privptr;
-        struct clawctl                  *p_ctl;
-        struct sysval                   *p_sysval;
-        struct conncmd                  *p_connect;
-        struct sk_buff                         *skb;
-
-       CLAW_DBF_TEXT(2, setup, "sndcntl");
-       privptr = dev->ml_priv;
-        p_ctl=(struct clawctl *)&privptr->ctl_bk;
-
-        p_ctl->command=type;
-        p_ctl->version=CLAW_VERSION_ID;
-        p_ctl->linkid=link;
-        p_ctl->correlator=correlator;
-        p_ctl->rc=rc;
-
-        p_sysval=(struct sysval *)&p_ctl->data;
-        p_connect=(struct conncmd *)&p_ctl->data;
-
-        switch (p_ctl->command) {
-                case SYSTEM_VALIDATE_REQUEST:
-                case SYSTEM_VALIDATE_RESPONSE:
-                        memcpy(&p_sysval->host_name, local_name, 8);
-                        memcpy(&p_sysval->WS_name, remote_name, 8);
-                       if (privptr->p_env->packing > 0) {
-                               p_sysval->read_frame_size = DEF_PACK_BUFSIZE;
-                               p_sysval->write_frame_size = DEF_PACK_BUFSIZE;
-                       } else {
-                               /* how big is the biggest group of packets */
-                          p_sysval->read_frame_size =
-                               privptr->p_env->read_size;
-                          p_sysval->write_frame_size =
-                               privptr->p_env->write_size;
-                       }
-                        memset(&p_sysval->reserved, 0x00, 4);
-                        break;
-                case CONNECTION_REQUEST:
-                case CONNECTION_RESPONSE:
-                case CONNECTION_CONFIRM:
-                case DISCONNECT:
-                        memcpy(&p_sysval->host_name, local_name, 8);
-                        memcpy(&p_sysval->WS_name, remote_name, 8);
-                       if (privptr->p_env->packing > 0) {
-                       /* How big is the biggest packet */
-                               p_connect->reserved1[0]=CLAW_FRAME_SIZE;
-                               p_connect->reserved1[1]=CLAW_FRAME_SIZE;
-                       } else {
-                               memset(&p_connect->reserved1, 0x00, 4);
-                               memset(&p_connect->reserved2, 0x00, 4);
-                       }
-                        break;
-                default:
-                        break;
-        }
-
-        /*      write Control Record to the device                   */
-
-
-        skb = dev_alloc_skb(sizeof(struct clawctl));
-        if (!skb) {
-                return -ENOMEM;
-        }
-       memcpy(skb_put(skb, sizeof(struct clawctl)),
-               p_ctl, sizeof(struct clawctl));
-       if (privptr->p_env->packing >= PACK_SEND)
-               claw_hw_tx(skb, dev, 1);
-       else
-               claw_hw_tx(skb, dev, 0);
-        return 0;
-}  /*   end of claw_send_control  */
-
-/*-------------------------------------------------------------------*
-*               claw_snd_conn_req                                    *
-*                                                                    *
-*--------------------------------------------------------------------*/
-static int
-claw_snd_conn_req(struct net_device *dev, __u8 link)
-{
-        int                rc;
-       struct claw_privbk *privptr = dev->ml_priv;
-        struct clawctl            *p_ctl;
-
-       CLAW_DBF_TEXT(2, setup, "snd_conn");
-       rc = 1;
-        p_ctl=(struct clawctl *)&privptr->ctl_bk;
-       p_ctl->linkid = link;
-        if ( privptr->system_validate_comp==0x00 ) {
-                return rc;
-        }
-       if (privptr->p_env->packing == PACKING_ASK )
-               rc=claw_send_control(dev, CONNECTION_REQUEST,0,0,0,
-                       WS_APPL_NAME_PACKED, WS_APPL_NAME_PACKED);
-       if (privptr->p_env->packing == PACK_SEND)  {
-               rc=claw_send_control(dev, CONNECTION_REQUEST,0,0,0,
-                       WS_APPL_NAME_IP_NAME, WS_APPL_NAME_IP_NAME);
-       }
-       if (privptr->p_env->packing == 0)
-               rc=claw_send_control(dev, CONNECTION_REQUEST,0,0,0,
-                               HOST_APPL_NAME, privptr->p_env->api_type);
-        return rc;
-
-}  /*  end of claw_snd_conn_req */
-
-
-/*-------------------------------------------------------------------*
-*               claw_snd_disc                                        *
-*                                                                    *
-*--------------------------------------------------------------------*/
-
-static int
-claw_snd_disc(struct net_device *dev, struct clawctl * p_ctl)
-{
-        int rc;
-        struct conncmd *  p_connect;
-
-       CLAW_DBF_TEXT(2, setup, "snd_dsc");
-        p_connect=(struct conncmd *)&p_ctl->data;
-
-        rc=claw_send_control(dev, DISCONNECT, p_ctl->linkid,
-               p_ctl->correlator, 0,
-                p_connect->host_name, p_connect->WS_name);
-        return rc;
-}     /*   end of claw_snd_disc    */
-
-
-/*-------------------------------------------------------------------*
-*               claw_snd_sys_validate_rsp                            *
-*                                                                    *
-*--------------------------------------------------------------------*/
-
-static int
-claw_snd_sys_validate_rsp(struct net_device *dev,
-       struct clawctl *p_ctl, __u32 return_code)
-{
-        struct claw_env *  p_env;
-        struct claw_privbk *privptr;
-        int    rc;
-
-       CLAW_DBF_TEXT(2, setup, "chkresp");
-       privptr = dev->ml_priv;
-        p_env=privptr->p_env;
-        rc=claw_send_control(dev, SYSTEM_VALIDATE_RESPONSE,
-               p_ctl->linkid,
-               p_ctl->correlator,
-                return_code,
-               p_env->host_name,
-               p_env->adapter_name  );
-        return rc;
-}     /*    end of claw_snd_sys_validate_rsp    */
-
-/*-------------------------------------------------------------------*
-*               claw_strt_conn_req                                   *
-*                                                                    *
-*--------------------------------------------------------------------*/
-
-static int
-claw_strt_conn_req(struct net_device *dev )
-{
-        int rc;
-
-       CLAW_DBF_TEXT(2, setup, "conn_req");
-        rc=claw_snd_conn_req(dev, 1);
-        return rc;
-}    /*   end of claw_strt_conn_req   */
-
-
-
-/*-------------------------------------------------------------------*
- *   claw_stats                                                      *
- *-------------------------------------------------------------------*/
-
-static struct
-net_device_stats *claw_stats(struct net_device *dev)
-{
-        struct claw_privbk *privptr;
-
-       CLAW_DBF_TEXT(4, trace, "stats");
-       privptr = dev->ml_priv;
-        return &privptr->stats;
-}     /*   end of claw_stats   */
-
-
-/*-------------------------------------------------------------------*
-*       unpack_read                                                  *
-*                                                                    *
-*--------------------------------------------------------------------*/
-static void
-unpack_read(struct net_device *dev )
-{
-        struct sk_buff *skb;
-        struct claw_privbk *privptr;
-       struct claw_env    *p_env;
-        struct ccwbk   *p_this_ccw;
-        struct ccwbk   *p_first_ccw;
-        struct ccwbk   *p_last_ccw;
-       struct clawph   *p_packh;
-       void            *p_packd;
-       struct clawctl  *p_ctlrec=NULL;
-       struct device   *p_dev;
-
-        __u32  len_of_data;
-       __u32   pack_off;
-        __u8   link_num;
-        __u8   mtc_this_frm=0;
-        __u32  bytes_to_mov;
-        int    i=0;
-       int     p=0;
-
-       CLAW_DBF_TEXT(4, trace, "unpkread");
-        p_first_ccw=NULL;
-        p_last_ccw=NULL;
-       p_packh=NULL;
-       p_packd=NULL;
-       privptr = dev->ml_priv;
-
-       p_dev = &privptr->channel[READ_CHANNEL].cdev->dev;
-       p_env = privptr->p_env;
-        p_this_ccw=privptr->p_read_active_first;
-       while (p_this_ccw!=NULL && p_this_ccw->header.flag!=CLAW_PENDING) {
-               pack_off = 0;
-               p = 0;
-               p_this_ccw->header.flag=CLAW_PENDING;
-               privptr->p_read_active_first=p_this_ccw->next;
-                p_this_ccw->next=NULL;
-               p_packh = (struct clawph *)p_this_ccw->p_buffer;
-               if ((p_env->packing == PACK_SEND) &&
-                   (p_packh->len == 32)           &&
-                   (p_packh->link_num == 0)) {   /* is it a packed ctl rec? */
-                       p_packh++;  /* peek past pack header */
-                       p_ctlrec = (struct clawctl *)p_packh;
-                       p_packh--;  /* un peek */
-                       if ((p_ctlrec->command == CONNECTION_RESPONSE) ||
-                           (p_ctlrec->command == CONNECTION_CONFIRM))
-                               p_env->packing = DO_PACKED;
-               }
-               if (p_env->packing == DO_PACKED)
-                       link_num=p_packh->link_num;
-               else
-                       link_num=p_this_ccw->header.opcode / 8;
-                if ((p_this_ccw->header.opcode & MORE_to_COME_FLAG)!=0) {
-                        mtc_this_frm=1;
-                        if (p_this_ccw->header.length!=
-                               privptr->p_env->read_size ) {
-                               dev_warn(p_dev,
-                                       "The communication peer of %s"
-                                       " sent a faulty"
-                                       " frame of length %02x\n",
-                                        dev->name, p_this_ccw->header.length);
-                        }
-                }
-
-                if (privptr->mtc_skipping) {
-                        /*
-                        *   We're in the mode of skipping past a
-                       *   multi-frame message
-                        *   that we can't process for some reason or other.
-                        *   The first frame without the More-To-Come flag is
-                       *   the last frame of the skipped message.
-                        */
-                        /*  in case of More-To-Come not set in this frame */
-                        if (mtc_this_frm==0) {
-                                privptr->mtc_skipping=0; /* Ok, the end */
-                                privptr->mtc_logical_link=-1;
-                        }
-                        goto NextFrame;
-                }
-
-                if (link_num==0) {
-                        claw_process_control(dev, p_this_ccw);
-                       CLAW_DBF_TEXT(4, trace, "UnpkCntl");
-                        goto NextFrame;
-                }
-unpack_next:
-               if (p_env->packing == DO_PACKED) {
-                       if (pack_off > p_env->read_size)
-                               goto NextFrame;
-                       p_packd = p_this_ccw->p_buffer+pack_off;
-                       p_packh = (struct clawph *) p_packd;
-                       if ((p_packh->len == 0) || /* done with this frame? */
-                           (p_packh->flag != 0))
-                               goto NextFrame;
-                       bytes_to_mov = p_packh->len;
-                       pack_off += bytes_to_mov+sizeof(struct clawph);
-                       p++;
-               } else {
-                       bytes_to_mov=p_this_ccw->header.length;
-               }
-                if (privptr->mtc_logical_link<0) {
-
-                /*
-                *  if More-To-Come is set in this frame then we don't know
-                *  length of entire message, and hence have to allocate
-               *  large buffer   */
-
-                /*      We are starting a new envelope  */
-                privptr->mtc_offset=0;
-                        privptr->mtc_logical_link=link_num;
-                }
-
-                if (bytes_to_mov > (MAX_ENVELOPE_SIZE- privptr->mtc_offset) ) {
-                        /*      error     */
-                        privptr->stats.rx_frame_errors++;
-                        goto NextFrame;
-                }
-               if (p_env->packing == DO_PACKED) {
-                       memcpy( privptr->p_mtc_envelope+ privptr->mtc_offset,
-                               p_packd+sizeof(struct clawph), bytes_to_mov);
-
-               } else  {
-                       memcpy( privptr->p_mtc_envelope+ privptr->mtc_offset,
-                               p_this_ccw->p_buffer, bytes_to_mov);
-               }
-                if (mtc_this_frm==0) {
-                        len_of_data=privptr->mtc_offset+bytes_to_mov;
-                        skb=dev_alloc_skb(len_of_data);
-                        if (skb) {
-                                memcpy(skb_put(skb,len_of_data),
-                                       privptr->p_mtc_envelope,
-                                       len_of_data);
-                                skb->dev=dev;
-                               skb_reset_mac_header(skb);
-                                skb->protocol=htons(ETH_P_IP);
-                                skb->ip_summed=CHECKSUM_UNNECESSARY;
-                                privptr->stats.rx_packets++;
-                               privptr->stats.rx_bytes+=len_of_data;
-                                netif_rx(skb);
-                        }
-                        else {
-                               dev_info(p_dev, "Allocating a buffer for"
-                                       " incoming data failed\n");
-                                privptr->stats.rx_dropped++;
-                        }
-                        privptr->mtc_offset=0;
-                        privptr->mtc_logical_link=-1;
-                }
-                else {
-                        privptr->mtc_offset+=bytes_to_mov;
-                }
-               if (p_env->packing == DO_PACKED)
-                       goto unpack_next;
-NextFrame:
-                /*
-                *   Remove ThisCCWblock from active read queue, and add it
-                *   to queue of free blocks to be reused.
-                */
-                i++;
-                p_this_ccw->header.length=0xffff;
-                p_this_ccw->header.opcode=0xff;
-                /*
-                *       add this one to the free queue for later reuse
-                */
-                if (p_first_ccw==NULL) {
-                        p_first_ccw = p_this_ccw;
-                }
-                else {
-                        p_last_ccw->next = p_this_ccw;
-                }
-                p_last_ccw = p_this_ccw;
-                /*
-                *       chain to next block on active read queue
-                */
-                p_this_ccw = privptr->p_read_active_first;
-               CLAW_DBF_TEXT_(4, trace, "rxpkt %d", p);
-        } /* end of while */
-
-        /*      check validity                  */
-
-       CLAW_DBF_TEXT_(4, trace, "rxfrm %d", i);
-        add_claw_reads(dev, p_first_ccw, p_last_ccw);
-        claw_strt_read(dev, LOCK_YES);
-        return;
-}     /*  end of unpack_read   */
-
-/*-------------------------------------------------------------------*
-*       claw_strt_read                                               *
-*                                                                    *
-*--------------------------------------------------------------------*/
-static void
-claw_strt_read (struct net_device *dev, int lock )
-{
-        int        rc = 0;
-        __u32      parm;
-        unsigned long  saveflags = 0;
-       struct claw_privbk *privptr = dev->ml_priv;
-        struct ccwbk*p_ccwbk;
-        struct chbk *p_ch;
-        struct clawh *p_clawh;
-       p_ch = &privptr->channel[READ_CHANNEL];
-
-       CLAW_DBF_TEXT(4, trace, "StRdNter");
-        p_clawh=(struct clawh *)privptr->p_claw_signal_blk;
-        p_clawh->flag=CLAW_IDLE;    /* 0x00 */
-
-        if ((privptr->p_write_active_first!=NULL &&
-             privptr->p_write_active_first->header.flag!=CLAW_PENDING) ||
-            (privptr->p_read_active_first!=NULL &&
-             privptr->p_read_active_first->header.flag!=CLAW_PENDING )) {
-                p_clawh->flag=CLAW_BUSY;    /* 0xff */
-        }
-        if (lock==LOCK_YES) {
-                spin_lock_irqsave(get_ccwdev_lock(p_ch->cdev), saveflags);
-        }
-        if (test_and_set_bit(0, (void *)&p_ch->IO_active) == 0) {
-               CLAW_DBF_TEXT(4, trace, "HotRead");
-                p_ccwbk=privptr->p_read_active_first;
-                parm = (unsigned long) p_ch;
-                rc = ccw_device_start (p_ch->cdev, &p_ccwbk->read, parm,
-                                      0xff, 0);
-                if (rc != 0) {
-                        ccw_check_return_code(p_ch->cdev, rc);
-                }
-        }
-       else {
-               CLAW_DBF_TEXT(2, trace, "ReadAct");
-       }
-
-        if (lock==LOCK_YES) {
-                spin_unlock_irqrestore(get_ccwdev_lock(p_ch->cdev), saveflags);
-        }
-       CLAW_DBF_TEXT(4, trace, "StRdExit");
-        return;
-}       /*    end of claw_strt_read    */
-
-/*-------------------------------------------------------------------*
-*       claw_strt_out_IO                                             *
-*                                                                    *
-*--------------------------------------------------------------------*/
-
-static void
-claw_strt_out_IO( struct net_device *dev )
-{
-        int                    rc = 0;
-        unsigned long          parm;
-        struct claw_privbk     *privptr;
-        struct chbk            *p_ch;
-        struct ccwbk           *p_first_ccw;
-
-       if (!dev) {
-               return;
-       }
-       privptr = (struct claw_privbk *)dev->ml_priv;
-       p_ch = &privptr->channel[WRITE_CHANNEL];
-
-       CLAW_DBF_TEXT(4, trace, "strt_io");
-        p_first_ccw=privptr->p_write_active_first;
-
-        if (p_ch->claw_state == CLAW_STOP)
-                return;
-        if (p_first_ccw == NULL) {
-                return;
-        }
-        if (test_and_set_bit(0, (void *)&p_ch->IO_active) == 0) {
-                parm = (unsigned long) p_ch;
-               CLAW_DBF_TEXT(2, trace, "StWrtIO");
-               rc = ccw_device_start(p_ch->cdev, &p_first_ccw->write, parm,
-                                     0xff, 0);
-                if (rc != 0) {
-                        ccw_check_return_code(p_ch->cdev, rc);
-                }
-        }
-        dev->trans_start = jiffies;
-        return;
-}       /*    end of claw_strt_out_IO    */
-
-/*-------------------------------------------------------------------*
-*       Free write buffers                                           *
-*                                                                    *
-*--------------------------------------------------------------------*/
-
-static void
-claw_free_wrt_buf( struct net_device *dev )
-{
-
-       struct claw_privbk *privptr = (struct claw_privbk *)dev->ml_priv;
-       struct ccwbk*p_this_ccw;
-       struct ccwbk*p_next_ccw;
-
-       CLAW_DBF_TEXT(4, trace, "freewrtb");
-        /*  scan the write queue to free any completed write packets   */
-        p_this_ccw=privptr->p_write_active_first;
-        while ( (p_this_ccw!=NULL) && (p_this_ccw->header.flag!=CLAW_PENDING))
-        {
-                p_next_ccw = p_this_ccw->next;
-                if (((p_next_ccw!=NULL) &&
-                    (p_next_ccw->header.flag!=CLAW_PENDING)) ||
-                    ((p_this_ccw == privptr->p_write_active_last) &&
-                     (p_this_ccw->header.flag!=CLAW_PENDING))) {
-                        /* The next CCW is OK or this is  */
-                       /* the last CCW...free it   @A1A  */
-                        privptr->p_write_active_first=p_this_ccw->next;
-                       p_this_ccw->header.flag=CLAW_PENDING;
-                        p_this_ccw->next=privptr->p_write_free_chain;
-                       privptr->p_write_free_chain=p_this_ccw;
-                        ++privptr->write_free_count;
-                       privptr->stats.tx_bytes+= p_this_ccw->write.count;
-                       p_this_ccw=privptr->p_write_active_first;
-                        privptr->stats.tx_packets++;
-                }
-                else {
-                       break;
-                }
-        }
-        if (privptr->write_free_count!=0) {
-                claw_clearbit_busy(TB_NOBUFFER,dev);
-        }
-        /*   whole chain removed?   */
-        if (privptr->p_write_active_first==NULL) {
-                privptr->p_write_active_last=NULL;
-        }
-       CLAW_DBF_TEXT_(4, trace, "FWC=%d", privptr->write_free_count);
-        return;
-}
-
-/*-------------------------------------------------------------------*
-*       claw free netdevice                                          *
-*                                                                    *
-*--------------------------------------------------------------------*/
-static void
-claw_free_netdevice(struct net_device * dev, int free_dev)
-{
-       struct claw_privbk *privptr;
-
-       CLAW_DBF_TEXT(2, setup, "free_dev");
-       if (!dev)
-               return;
-       CLAW_DBF_TEXT_(2, setup, "%s", dev->name);
-       privptr = dev->ml_priv;
-       if (dev->flags & IFF_RUNNING)
-               claw_release(dev);
-       if (privptr) {
-               privptr->channel[READ_CHANNEL].ndev = NULL;  /* say it's free */
-       }
-       dev->ml_priv = NULL;
-#ifdef MODULE
-       if (free_dev) {
-               free_netdev(dev);
-       }
-#endif
-       CLAW_DBF_TEXT(2, setup, "free_ok");
-}
-
-/**
- * Claw init netdevice
- * Initialize everything of the net device except the name and the
- * channel structs.
- */
-static const struct net_device_ops claw_netdev_ops = {
-       .ndo_open               = claw_open,
-       .ndo_stop               = claw_release,
-       .ndo_get_stats          = claw_stats,
-       .ndo_start_xmit         = claw_tx,
-       .ndo_change_mtu         = claw_change_mtu,
-};
-
-static void
-claw_init_netdevice(struct net_device * dev)
-{
-       CLAW_DBF_TEXT(2, setup, "init_dev");
-       CLAW_DBF_TEXT_(2, setup, "%s", dev->name);
-       dev->mtu = CLAW_DEFAULT_MTU_SIZE;
-       dev->hard_header_len = 0;
-       dev->addr_len = 0;
-       dev->type = ARPHRD_SLIP;
-       dev->tx_queue_len = 1300;
-       dev->flags = IFF_POINTOPOINT | IFF_NOARP;
-       dev->netdev_ops = &claw_netdev_ops;
-       CLAW_DBF_TEXT(2, setup, "initok");
-       return;
-}
-
-/**
- * Init a new channel in the privptr->channel[i].
- *
- * @param cdev  The ccw_device to be added.
- *
- * @return 0 on success, !0 on error.
- */
-static int
-add_channel(struct ccw_device *cdev,int i,struct claw_privbk *privptr)
-{
-       struct chbk *p_ch;
-       struct ccw_dev_id dev_id;
-
-       CLAW_DBF_TEXT_(2, setup, "%s", dev_name(&cdev->dev));
-       privptr->channel[i].flag  = i+1;   /* Read is 1 Write is 2 */
-       p_ch = &privptr->channel[i];
-       p_ch->cdev = cdev;
-       snprintf(p_ch->id, CLAW_ID_SIZE, "cl-%s", dev_name(&cdev->dev));
-       ccw_device_get_id(cdev, &dev_id);
-       p_ch->devno = dev_id.devno;
-       if ((p_ch->irb = kzalloc(sizeof (struct irb),GFP_KERNEL)) == NULL) {
-               return -ENOMEM;
-       }
-       return 0;
-}
-
-
-/**
- *
- * Setup an interface.
- *
- * @param cgdev  Device to be setup.
- *
- * @returns 0 on success, !0 on failure.
- */
-static int
-claw_new_device(struct ccwgroup_device *cgdev)
-{
-       struct claw_privbk *privptr;
-       struct claw_env *p_env;
-       struct net_device *dev;
-       int ret;
-       struct ccw_dev_id dev_id;
-
-       dev_info(&cgdev->dev, "add for %s\n",
-                dev_name(&cgdev->cdev[READ_CHANNEL]->dev));
-       CLAW_DBF_TEXT(2, setup, "new_dev");
-       privptr = dev_get_drvdata(&cgdev->dev);
-       dev_set_drvdata(&cgdev->cdev[READ_CHANNEL]->dev, privptr);
-       dev_set_drvdata(&cgdev->cdev[WRITE_CHANNEL]->dev, privptr);
-       if (!privptr)
-               return -ENODEV;
-       p_env = privptr->p_env;
-       ccw_device_get_id(cgdev->cdev[READ_CHANNEL], &dev_id);
-       p_env->devno[READ_CHANNEL] = dev_id.devno;
-       ccw_device_get_id(cgdev->cdev[WRITE_CHANNEL], &dev_id);
-       p_env->devno[WRITE_CHANNEL] = dev_id.devno;
-       ret = add_channel(cgdev->cdev[0],0,privptr);
-       if (ret == 0)
-               ret = add_channel(cgdev->cdev[1],1,privptr);
-       if (ret != 0) {
-               dev_warn(&cgdev->dev, "Creating a CLAW group device"
-                       " failed with error code %d\n", ret);
-               goto out;
-       }
-       ret = ccw_device_set_online(cgdev->cdev[READ_CHANNEL]);
-       if (ret != 0) {
-               dev_warn(&cgdev->dev,
-                       "Setting the read subchannel online"
-                       " failed with error code %d\n", ret);
-               goto out;
-       }
-       ret = ccw_device_set_online(cgdev->cdev[WRITE_CHANNEL]);
-       if (ret != 0) {
-               dev_warn(&cgdev->dev,
-                       "Setting the write subchannel online "
-                       "failed with error code %d\n", ret);
-               goto out;
-       }
-       dev = alloc_netdev(0, "claw%d", NET_NAME_UNKNOWN, claw_init_netdevice);
-       if (!dev) {
-               dev_warn(&cgdev->dev,
-                       "Activating the CLAW device failed\n");
-               goto out;
-       }
-       dev->ml_priv = privptr;
-       dev_set_drvdata(&cgdev->dev, privptr);
-       dev_set_drvdata(&cgdev->cdev[READ_CHANNEL]->dev, privptr);
-       dev_set_drvdata(&cgdev->cdev[WRITE_CHANNEL]->dev, privptr);
-       /* sysfs magic */
-        SET_NETDEV_DEV(dev, &cgdev->dev);
-       if (register_netdev(dev) != 0) {
-               claw_free_netdevice(dev, 1);
-               CLAW_DBF_TEXT(2, trace, "regfail");
-               goto out;
-       }
-       dev->flags &=~IFF_RUNNING;
-       if (privptr->buffs_alloc == 0) {
-               ret=init_ccw_bk(dev);
-               if (ret !=0) {
-                       unregister_netdev(dev);
-                       claw_free_netdevice(dev,1);
-                       CLAW_DBF_TEXT(2, trace, "ccwmem");
-                       goto out;
-               }
-       }
-       privptr->channel[READ_CHANNEL].ndev = dev;
-       privptr->channel[WRITE_CHANNEL].ndev = dev;
-       privptr->p_env->ndev = dev;
-
-       dev_info(&cgdev->dev, "%s:readsize=%d  writesize=%d "
-               "readbuffer=%d writebuffer=%d read=0x%04x write=0x%04x\n",
-                dev->name, p_env->read_size,
-               p_env->write_size, p_env->read_buffers,
-               p_env->write_buffers, p_env->devno[READ_CHANNEL],
-               p_env->devno[WRITE_CHANNEL]);
-       dev_info(&cgdev->dev, "%s:host_name:%.8s, adapter_name "
-               ":%.8s api_type: %.8s\n",
-                dev->name, p_env->host_name,
-               p_env->adapter_name , p_env->api_type);
-       return 0;
-out:
-       ccw_device_set_offline(cgdev->cdev[1]);
-       ccw_device_set_offline(cgdev->cdev[0]);
-       return -ENODEV;
-}
-
-static void
-claw_purge_skb_queue(struct sk_buff_head *q)
-{
-        struct sk_buff *skb;
-
-       CLAW_DBF_TEXT(4, trace, "purgque");
-        while ((skb = skb_dequeue(q))) {
-                atomic_dec(&skb->users);
-                dev_kfree_skb_any(skb);
-        }
-}
-
-/**
- * Shutdown an interface.
- *
- * @param cgdev  Device to be shut down.
- *
- * @returns 0 on success, !0 on failure.
- */
-static int
-claw_shutdown_device(struct ccwgroup_device *cgdev)
-{
-       struct claw_privbk *priv;
-       struct net_device *ndev;
-       int ret = 0;
-
-       CLAW_DBF_TEXT_(2, setup, "%s", dev_name(&cgdev->dev));
-       priv = dev_get_drvdata(&cgdev->dev);
-       if (!priv)
-               return -ENODEV;
-       ndev = priv->channel[READ_CHANNEL].ndev;
-       if (ndev) {
-               /* Close the device */
-               dev_info(&cgdev->dev, "%s: shutting down\n",
-                       ndev->name);
-               if (ndev->flags & IFF_RUNNING)
-                       ret = claw_release(ndev);
-               ndev->flags &=~IFF_RUNNING;
-               unregister_netdev(ndev);
-               ndev->ml_priv = NULL;  /* cgdev data, not ndev's to free */
-               claw_free_netdevice(ndev, 1);
-               priv->channel[READ_CHANNEL].ndev = NULL;
-               priv->channel[WRITE_CHANNEL].ndev = NULL;
-               priv->p_env->ndev = NULL;
-       }
-       ccw_device_set_offline(cgdev->cdev[1]);
-       ccw_device_set_offline(cgdev->cdev[0]);
-       return ret;
-}
-
-static void
-claw_remove_device(struct ccwgroup_device *cgdev)
-{
-       struct claw_privbk *priv;
-
-       CLAW_DBF_TEXT_(2, setup, "%s", dev_name(&cgdev->dev));
-       priv = dev_get_drvdata(&cgdev->dev);
-       dev_info(&cgdev->dev, " will be removed.\n");
-       if (cgdev->state == CCWGROUP_ONLINE)
-               claw_shutdown_device(cgdev);
-       kfree(priv->p_mtc_envelope);
-       priv->p_mtc_envelope=NULL;
-       kfree(priv->p_env);
-       priv->p_env=NULL;
-       kfree(priv->channel[0].irb);
-       priv->channel[0].irb=NULL;
-       kfree(priv->channel[1].irb);
-       priv->channel[1].irb=NULL;
-       kfree(priv);
-       dev_set_drvdata(&cgdev->dev, NULL);
-       dev_set_drvdata(&cgdev->cdev[READ_CHANNEL]->dev, NULL);
-       dev_set_drvdata(&cgdev->cdev[WRITE_CHANNEL]->dev, NULL);
-       put_device(&cgdev->dev);
-
-       return;
-}
-
-
-/*
- * sysfs attributes
- */
-static ssize_t
-claw_hname_show(struct device *dev, struct device_attribute *attr, char *buf)
-{
-       struct claw_privbk *priv;
-       struct claw_env *  p_env;
-
-       priv = dev_get_drvdata(dev);
-       if (!priv)
-               return -ENODEV;
-       p_env = priv->p_env;
-       return sprintf(buf, "%s\n",p_env->host_name);
-}
-
-static ssize_t
-claw_hname_write(struct device *dev, struct device_attribute *attr,
-        const char *buf, size_t count)
-{
-       struct claw_privbk *priv;
-       struct claw_env *  p_env;
-
-       priv = dev_get_drvdata(dev);
-       if (!priv)
-               return -ENODEV;
-       p_env = priv->p_env;
-       if (count > MAX_NAME_LEN+1)
-               return -EINVAL;
-       memset(p_env->host_name, 0x20, MAX_NAME_LEN);
-       strncpy(p_env->host_name,buf, count);
-       p_env->host_name[count-1] = 0x20;  /* clear extra 0x0a */
-       p_env->host_name[MAX_NAME_LEN] = 0x00;
-       CLAW_DBF_TEXT(2, setup, "HstnSet");
-       CLAW_DBF_TEXT_(2, setup, "%s", p_env->host_name);
-
-       return count;
-}
-
-static DEVICE_ATTR(host_name, 0644, claw_hname_show, claw_hname_write);
-
-static ssize_t
-claw_adname_show(struct device *dev, struct device_attribute *attr, char *buf)
-{
-       struct claw_privbk *priv;
-       struct claw_env *  p_env;
-
-       priv = dev_get_drvdata(dev);
-       if (!priv)
-               return -ENODEV;
-       p_env = priv->p_env;
-       return sprintf(buf, "%s\n", p_env->adapter_name);
-}
-
-static ssize_t
-claw_adname_write(struct device *dev, struct device_attribute *attr,
-        const char *buf, size_t count)
-{
-       struct claw_privbk *priv;
-       struct claw_env *  p_env;
-
-       priv = dev_get_drvdata(dev);
-       if (!priv)
-               return -ENODEV;
-       p_env = priv->p_env;
-       if (count > MAX_NAME_LEN+1)
-               return -EINVAL;
-       memset(p_env->adapter_name, 0x20, MAX_NAME_LEN);
-       strncpy(p_env->adapter_name,buf, count);
-       p_env->adapter_name[count-1] = 0x20; /* clear extra 0x0a */
-       p_env->adapter_name[MAX_NAME_LEN] = 0x00;
-       CLAW_DBF_TEXT(2, setup, "AdnSet");
-       CLAW_DBF_TEXT_(2, setup, "%s", p_env->adapter_name);
-
-       return count;
-}
-
-static DEVICE_ATTR(adapter_name, 0644, claw_adname_show, claw_adname_write);
-
-static ssize_t
-claw_apname_show(struct device *dev, struct device_attribute *attr, char *buf)
-{
-       struct claw_privbk *priv;
-       struct claw_env *  p_env;
-
-       priv = dev_get_drvdata(dev);
-       if (!priv)
-               return -ENODEV;
-       p_env = priv->p_env;
-       return sprintf(buf, "%s\n",
-                      p_env->api_type);
-}
-
-static ssize_t
-claw_apname_write(struct device *dev, struct device_attribute *attr,
-       const char *buf, size_t count)
-{
-       struct claw_privbk *priv;
-       struct claw_env *  p_env;
-
-       priv = dev_get_drvdata(dev);
-       if (!priv)
-               return -ENODEV;
-       p_env = priv->p_env;
-       if (count > MAX_NAME_LEN+1)
-               return -EINVAL;
-       memset(p_env->api_type, 0x20, MAX_NAME_LEN);
-       strncpy(p_env->api_type,buf, count);
-       p_env->api_type[count-1] = 0x20;  /* we get a loose 0x0a */
-       p_env->api_type[MAX_NAME_LEN] = 0x00;
-       if(strncmp(p_env->api_type,WS_APPL_NAME_PACKED,6) == 0) {
-               p_env->read_size=DEF_PACK_BUFSIZE;
-               p_env->write_size=DEF_PACK_BUFSIZE;
-               p_env->packing=PACKING_ASK;
-               CLAW_DBF_TEXT(2, setup, "PACKING");
-       }
-       else {
-               p_env->packing=0;
-               p_env->read_size=CLAW_FRAME_SIZE;
-               p_env->write_size=CLAW_FRAME_SIZE;
-               CLAW_DBF_TEXT(2, setup, "ApiSet");
-       }
-       CLAW_DBF_TEXT_(2, setup, "%s", p_env->api_type);
-       return count;
-}
-
-static DEVICE_ATTR(api_type, 0644, claw_apname_show, claw_apname_write);
-
-static ssize_t
-claw_wbuff_show(struct device *dev, struct device_attribute *attr, char *buf)
-{
-       struct claw_privbk *priv;
-       struct claw_env * p_env;
-
-       priv = dev_get_drvdata(dev);
-       if (!priv)
-               return -ENODEV;
-       p_env = priv->p_env;
-       return sprintf(buf, "%d\n", p_env->write_buffers);
-}
-
-static ssize_t
-claw_wbuff_write(struct device *dev, struct device_attribute *attr,
-       const char *buf, size_t count)
-{
-       struct claw_privbk *priv;
-       struct claw_env *  p_env;
-       int nnn,max;
-
-       priv = dev_get_drvdata(dev);
-       if (!priv)
-               return -ENODEV;
-       p_env = priv->p_env;
-       sscanf(buf, "%i", &nnn);
-       if (p_env->packing) {
-               max = 64;
-       }
-       else {
-               max = 512;
-       }
-       if ((nnn > max ) || (nnn < 2))
-               return -EINVAL;
-       p_env->write_buffers = nnn;
-       CLAW_DBF_TEXT(2, setup, "Wbufset");
-       CLAW_DBF_TEXT_(2, setup, "WB=%d", p_env->write_buffers);
-       return count;
-}
-
-static DEVICE_ATTR(write_buffer, 0644, claw_wbuff_show, claw_wbuff_write);
-
-static ssize_t
-claw_rbuff_show(struct device *dev, struct device_attribute *attr, char *buf)
-{
-       struct claw_privbk *priv;
-       struct claw_env *  p_env;
-
-       priv = dev_get_drvdata(dev);
-       if (!priv)
-               return -ENODEV;
-       p_env = priv->p_env;
-       return sprintf(buf, "%d\n", p_env->read_buffers);
-}
-
-static ssize_t
-claw_rbuff_write(struct device *dev, struct device_attribute *attr,
-       const char *buf, size_t count)
-{
-       struct claw_privbk *priv;
-       struct claw_env *p_env;
-       int nnn,max;
-
-       priv = dev_get_drvdata(dev);
-       if (!priv)
-               return -ENODEV;
-       p_env = priv->p_env;
-       sscanf(buf, "%i", &nnn);
-       if (p_env->packing) {
-               max = 64;
-       }
-       else {
-               max = 512;
-       }
-       if ((nnn > max ) || (nnn < 2))
-               return -EINVAL;
-       p_env->read_buffers = nnn;
-       CLAW_DBF_TEXT(2, setup, "Rbufset");
-       CLAW_DBF_TEXT_(2, setup, "RB=%d", p_env->read_buffers);
-       return count;
-}
-static DEVICE_ATTR(read_buffer, 0644, claw_rbuff_show, claw_rbuff_write);
-
-static struct attribute *claw_attr[] = {
-       &dev_attr_read_buffer.attr,
-       &dev_attr_write_buffer.attr,
-       &dev_attr_adapter_name.attr,
-       &dev_attr_api_type.attr,
-       &dev_attr_host_name.attr,
-       NULL,
-};
-static struct attribute_group claw_attr_group = {
-       .attrs = claw_attr,
-};
-static const struct attribute_group *claw_attr_groups[] = {
-       &claw_attr_group,
-       NULL,
-};
-static const struct device_type claw_devtype = {
-       .name = "claw",
-       .groups = claw_attr_groups,
-};
-
-/*----------------------------------------------------------------*
- *   claw_probe                                                  *
- *     this function is called for each CLAW device.             *
- *----------------------------------------------------------------*/
-static int claw_probe(struct ccwgroup_device *cgdev)
-{
-       struct claw_privbk *privptr = NULL;
-
-       CLAW_DBF_TEXT(2, setup, "probe");
-       if (!get_device(&cgdev->dev))
-               return -ENODEV;
-       privptr = kzalloc(sizeof(struct claw_privbk), GFP_KERNEL);
-       dev_set_drvdata(&cgdev->dev, privptr);
-       if (privptr == NULL) {
-               probe_error(cgdev);
-               put_device(&cgdev->dev);
-               CLAW_DBF_TEXT_(2, setup, "probex%d", -ENOMEM);
-               return -ENOMEM;
-       }
-       privptr->p_mtc_envelope = kzalloc(MAX_ENVELOPE_SIZE, GFP_KERNEL);
-       privptr->p_env = kzalloc(sizeof(struct claw_env), GFP_KERNEL);
-       if ((privptr->p_mtc_envelope == NULL) || (privptr->p_env == NULL)) {
-               probe_error(cgdev);
-               put_device(&cgdev->dev);
-               CLAW_DBF_TEXT_(2, setup, "probex%d", -ENOMEM);
-               return -ENOMEM;
-       }
-       memcpy(privptr->p_env->adapter_name, WS_NAME_NOT_DEF, 8);
-       memcpy(privptr->p_env->host_name, WS_NAME_NOT_DEF, 8);
-       memcpy(privptr->p_env->api_type, WS_NAME_NOT_DEF, 8);
-       privptr->p_env->packing = 0;
-       privptr->p_env->write_buffers = 5;
-       privptr->p_env->read_buffers = 5;
-       privptr->p_env->read_size = CLAW_FRAME_SIZE;
-       privptr->p_env->write_size = CLAW_FRAME_SIZE;
-       privptr->p_env->p_priv = privptr;
-       cgdev->cdev[0]->handler = claw_irq_handler;
-       cgdev->cdev[1]->handler = claw_irq_handler;
-       cgdev->dev.type = &claw_devtype;
-       CLAW_DBF_TEXT(2, setup, "prbext 0");
-
-       return 0;
-}  /*  end of claw_probe       */
-
-/*--------------------------------------------------------------------*
-*    claw_init  and cleanup                                           *
-*---------------------------------------------------------------------*/
-
-static void __exit claw_cleanup(void)
-{
-       ccwgroup_driver_unregister(&claw_group_driver);
-       ccw_driver_unregister(&claw_ccw_driver);
-       root_device_unregister(claw_root_dev);
-       claw_unregister_debug_facility();
-       pr_info("Driver unloaded\n");
-}
-
-/**
- * Initialize module.
- * This is called just after the module is loaded.
- *
- * @return 0 on success, !0 on error.
- */
-static int __init claw_init(void)
-{
-       int ret = 0;
-
-       pr_info("Loading %s\n", version);
-       ret = claw_register_debug_facility();
-       if (ret) {
-               pr_err("Registering with the S/390 debug feature"
-                       " failed with error code %d\n", ret);
-               goto out_err;
-       }
-       CLAW_DBF_TEXT(2, setup, "init_mod");
-       claw_root_dev = root_device_register("claw");
-       ret = PTR_ERR_OR_ZERO(claw_root_dev);
-       if (ret)
-               goto register_err;
-       ret = ccw_driver_register(&claw_ccw_driver);
-       if (ret)
-               goto ccw_err;
-       claw_group_driver.driver.groups = claw_drv_attr_groups;
-       ret = ccwgroup_driver_register(&claw_group_driver);
-       if (ret)
-               goto ccwgroup_err;
-       return 0;
-
-ccwgroup_err:
-       ccw_driver_unregister(&claw_ccw_driver);
-ccw_err:
-       root_device_unregister(claw_root_dev);
-register_err:
-       CLAW_DBF_TEXT(2, setup, "init_bad");
-       claw_unregister_debug_facility();
-out_err:
-       pr_err("Initializing the claw device driver failed\n");
-       return ret;
-}
-
-module_init(claw_init);
-module_exit(claw_cleanup);
-
-MODULE_AUTHOR("Andy Richter <richtera@us.ibm.com>");
-MODULE_DESCRIPTION("Linux for System z CLAW Driver\n" \
-                       "Copyright IBM Corp. 2000, 2008\n");
-MODULE_LICENSE("GPL");
diff --git a/drivers/s390/net/claw.h b/drivers/s390/net/claw.h
deleted file mode 100644 (file)
index 3339b9b..0000000
+++ /dev/null
@@ -1,348 +0,0 @@
-/*******************************************************
-*  Define constants                                    *
-*                                                      *
-********************************************************/
-
-/*-----------------------------------------------------*
-*     CCW command codes for CLAW protocol              *
-*------------------------------------------------------*/
-
-#define CCW_CLAW_CMD_WRITE           0x01      /* write - not including link */
-#define CCW_CLAW_CMD_READ            0x02      /* read */
-#define CCW_CLAW_CMD_NOP             0x03      /* NOP */
-#define CCW_CLAW_CMD_SENSE           0x04      /* Sense */
-#define CCW_CLAW_CMD_SIGNAL_SMOD     0x05      /* Signal Status Modifier */
-#define CCW_CLAW_CMD_TIC             0x08      /* TIC */
-#define CCW_CLAW_CMD_READHEADER      0x12      /* read header data */
-#define CCW_CLAW_CMD_READFF          0x22      /* read an FF */
-#define CCW_CLAW_CMD_SENSEID         0xe4      /* Sense ID */
-
-
-/*-----------------------------------------------------*
-*    CLAW Unique constants                             *
-*------------------------------------------------------*/
-
-#define MORE_to_COME_FLAG       0x04   /* OR with write CCW in case of m-t-c */
-#define CLAW_IDLE               0x00   /* flag to indicate CLAW is idle */
-#define CLAW_BUSY               0xff   /* flag to indicate CLAW is busy */
-#define CLAW_PENDING            0x00   /* flag to indicate i/o is pending */
-#define CLAW_COMPLETE           0xff   /* flag to indicate i/o completed */
-
-/*-----------------------------------------------------*
-*     CLAW control command code                        *
-*------------------------------------------------------*/
-
-#define SYSTEM_VALIDATE_REQUEST   0x01  /* System Validate request */
-#define SYSTEM_VALIDATE_RESPONSE  0x02  /* System Validate response */
-#define CONNECTION_REQUEST        0x21  /* Connection request */
-#define CONNECTION_RESPONSE       0x22  /* Connection response */
-#define CONNECTION_CONFIRM        0x23  /* Connection confirm */
-#define DISCONNECT                0x24  /* Disconnect */
-#define CLAW_ERROR                0x41  /* CLAW error message */
-#define CLAW_VERSION_ID           2     /* CLAW version ID */
-
-/*-----------------------------------------------------*
-*  CLAW adater sense bytes                             *
-*------------------------------------------------------*/
-
-#define CLAW_ADAPTER_SENSE_BYTE 0x41   /* Stop command issued to adapter */
-
-/*-----------------------------------------------------*
-*      CLAW control command return codes               *
-*------------------------------------------------------*/
-
-#define CLAW_RC_NAME_MISMATCH       166  /*  names do not match */
-#define CLAW_RC_WRONG_VERSION       167  /*  wrong CLAW version number */
-#define CLAW_RC_HOST_RCV_TOO_SMALL  180  /*  Host maximum receive is   */
-                                        /*  less than Linux on zSeries*/
-                                         /*  transmit size             */
-
-/*-----------------------------------------------------*
-*      CLAW Constants application name                 *
-*------------------------------------------------------*/
-
-#define HOST_APPL_NAME          "TCPIP   "
-#define WS_APPL_NAME_IP_LINK    "TCPIP   "
-#define WS_APPL_NAME_IP_NAME   "IP      "
-#define WS_APPL_NAME_API_LINK   "API     "
-#define WS_APPL_NAME_PACKED     "PACKED  "
-#define WS_NAME_NOT_DEF         "NOT_DEF "
-#define PACKING_ASK            1
-#define PACK_SEND              2
-#define DO_PACKED              3
-
-#define MAX_ENVELOPE_SIZE       65536
-#define CLAW_DEFAULT_MTU_SIZE   4096
-#define DEF_PACK_BUFSIZE       32768
-#define READ_CHANNEL           0
-#define WRITE_CHANNEL          1
-
-#define TB_TX                   0          /* sk buffer handling in process  */
-#define TB_STOP                 1          /* network device stop in process */
-#define TB_RETRY                2          /* retry in process               */
-#define TB_NOBUFFER             3          /* no buffer on free queue        */
-#define CLAW_MAX_LINK_ID        1
-#define CLAW_MAX_DEV            256        /*      max claw devices          */
-#define MAX_NAME_LEN            8          /* host name, adapter name length */
-#define CLAW_FRAME_SIZE         4096
-#define CLAW_ID_SIZE           20+3
-
-/* state machine codes used in claw_irq_handler */
-
-#define CLAW_STOP                0
-#define CLAW_START_HALT_IO       1
-#define CLAW_START_SENSEID       2
-#define CLAW_START_READ          3
-#define CLAW_START_WRITE         4
-
-/*-----------------------------------------------------*
-*    Lock flag                                         *
-*------------------------------------------------------*/
-#define LOCK_YES             0
-#define LOCK_NO              1
-
-/*-----------------------------------------------------*
-*    DBF Debug macros                                  *
-*------------------------------------------------------*/
-#define CLAW_DBF_TEXT(level, name, text) \
-       do { \
-               debug_text_event(claw_dbf_##name, level, text); \
-       } while (0)
-
-#define CLAW_DBF_HEX(level,name,addr,len) \
-do { \
-       debug_event(claw_dbf_##name,level,(void*)(addr),len); \
-} while (0)
-
-#define CLAW_DBF_TEXT_(level,name,text...) \
-       do { \
-               if (debug_level_enabled(claw_dbf_##name, level)) { \
-                       sprintf(debug_buffer, text); \
-                       debug_text_event(claw_dbf_##name, level, \
-                                               debug_buffer); \
-               } \
-       } while (0)
-
-/**
- * Enum for classifying detected devices.
- */
-enum claw_channel_types {
-       /* Device is not a channel  */
-       claw_channel_type_none,
-
-       /* Device is a CLAW channel device */
-       claw_channel_type_claw
-};
-
-
-/*******************************************************
-*  Define Control Blocks                               *
-*                                                      *
-********************************************************/
-
-/*------------------------------------------------------*/
-/*     CLAW header                                      */
-/*------------------------------------------------------*/
-
-struct clawh {
-        __u16  length;     /* length of data read by preceding read CCW */
-        __u8   opcode;     /* equivalent read CCW */
-        __u8   flag;       /* flag of FF to indicate read was completed */
-};
-
-/*------------------------------------------------------*/
-/*     CLAW Packing header   4 bytes                    */
-/*------------------------------------------------------*/
-struct clawph {
-       __u16 len;      /* Length of Packed Data Area   */
-       __u8  flag;     /* Reserved not used            */
-       __u8  link_num; /* Link ID                      */
-};
-
-/*------------------------------------------------------*/
-/*     CLAW Ending struct ccwbk                         */
-/*------------------------------------------------------*/
-struct endccw {
-       __u32     real;            /* real address of this block */
-       __u8      write1;          /* write 1 is active */
-        __u8      read1;           /* read 1 is active  */
-        __u16     reserved;        /* reserved for future use */
-        struct ccw1    write1_nop1;
-        struct ccw1    write1_nop2;
-        struct ccw1    write2_nop1;
-        struct ccw1    write2_nop2;
-        struct ccw1    read1_nop1;
-        struct ccw1    read1_nop2;
-        struct ccw1    read2_nop1;
-        struct ccw1    read2_nop2;
-};
-
-/*------------------------------------------------------*/
-/*     CLAW struct ccwbk                                       */
-/*------------------------------------------------------*/
-struct ccwbk {
-        void   *next;        /* pointer to next ccw block */
-        __u32     real;         /* real address of this ccw */
-        void      *p_buffer;    /* virtual address of data */
-        struct clawh     header;       /* claw header */
-        struct ccw1    write;   /* write CCW    */
-        struct ccw1    w_read_FF; /* read FF */
-        struct ccw1    w_TIC_1;        /* TIC */
-        struct ccw1    read;         /* read CCW  */
-        struct ccw1    read_h;        /* read header */
-        struct ccw1    signal;       /* signal SMOD  */
-        struct ccw1    r_TIC_1;        /* TIC1 */
-        struct ccw1    r_read_FF;      /* read FF  */
-        struct ccw1    r_TIC_2;        /* TIC2 */
-};
-
-/*------------------------------------------------------*/
-/*     CLAW control block                               */
-/*------------------------------------------------------*/
-struct clawctl {
-        __u8    command;      /* control command */
-        __u8    version;      /* CLAW protocol version */
-        __u8    linkid;       /* link ID   */
-        __u8    correlator;   /* correlator */
-        __u8    rc;           /* return code */
-        __u8    reserved1;    /* reserved */
-        __u8    reserved2;    /* reserved */
-        __u8    reserved3;    /* reserved */
-        __u8    data[24];     /* command specific fields */
-};
-
-/*------------------------------------------------------*/
-/*     Data for SYSTEMVALIDATE command                  */
-/*------------------------------------------------------*/
-struct sysval  {
-        char    WS_name[8];        /* Workstation System name  */
-        char    host_name[8];      /* Host system name     */
-        __u16   read_frame_size;   /* read frame size */
-        __u16   write_frame_size;  /* write frame size */
-        __u8    reserved[4];       /* reserved */
-};
-
-/*------------------------------------------------------*/
-/*     Data for Connect command                         */
-/*------------------------------------------------------*/
-struct conncmd  {
-        char     WS_name[8];       /* Workstation application name  */
-        char     host_name[8];     /* Host application name      */
-        __u16    reserved1[2];     /* read frame size */
-        __u8     reserved2[4];     /* reserved  */
-};
-
-/*------------------------------------------------------*/
-/*     Data for CLAW error                              */
-/*------------------------------------------------------*/
-struct clawwerror  {
-        char      reserved1[8];   /* reserved */
-        char      reserved2[8];   /* reserved  */
-        char      reserved3[8];   /* reserved  */
-};
-
-/*------------------------------------------------------*/
-/*     Data buffer for CLAW                             */
-/*------------------------------------------------------*/
-struct clawbuf  {
-       char      buffer[MAX_ENVELOPE_SIZE];   /* data buffer */
-};
-
-/*------------------------------------------------------*/
-/*     Channel control block for read and write channel */
-/*------------------------------------------------------*/
-
-struct chbk {
-        unsigned int        devno;
-        int                 irq;
-       char                id[CLAW_ID_SIZE];
-       __u32               IO_active;
-        __u8                claw_state;
-        struct irb          *irb;
-               struct ccw_device   *cdev;  /* pointer to the channel device */
-       struct net_device   *ndev;
-        wait_queue_head_t   wait;
-        struct tasklet_struct    tasklet;
-        struct timer_list   timer;
-        unsigned long       flag_a;    /* atomic flags */
-#define CLAW_BH_ACTIVE      0
-        unsigned long       flag_b;    /* atomic flags */
-#define CLAW_WRITE_ACTIVE   0
-        __u8                last_dstat;
-        __u8                flag;
-       struct sk_buff_head collect_queue;
-       spinlock_t collect_lock;
-#define CLAW_WRITE      0x02      /* - Set if this is a write channel */
-#define CLAW_READ      0x01      /* - Set if this is a read channel  */
-#define CLAW_TIMER      0x80      /* - Set if timer made the wake_up  */
-};
-
-/*--------------------------------------------------------------*
-*           CLAW  environment block                             *
-*---------------------------------------------------------------*/
-
-struct claw_env {
-        unsigned int            devno[2];       /* device number */
-        char                    host_name[9];   /* Host name */
-        char                    adapter_name [9]; /* adapter name */
-        char                    api_type[9];    /* TCPIP, API or PACKED */
-        void                    *p_priv;        /* privptr */
-        __u16                   read_buffers;   /* read buffer number */
-        __u16                   write_buffers;  /* write buffer number */
-        __u16                   read_size;      /* read buffer size */
-        __u16                   write_size;     /* write buffer size */
-        __u16                   dev_id;         /* device ident */
-       __u8                    packing;        /* are we packing? */
-        __u8                    in_use;         /* device active flag */
-        struct net_device       *ndev;         /* backward ptr to the net dev*/
-};
-
-/*--------------------------------------------------------------*
-*           CLAW  main control block                            *
-*---------------------------------------------------------------*/
-
-struct claw_privbk {
-        void *p_buff_ccw;
-        __u32      p_buff_ccw_num;
-        void  *p_buff_read;
-        __u32      p_buff_read_num;
-        __u32      p_buff_pages_perread;
-        void  *p_buff_write;
-        __u32      p_buff_write_num;
-        __u32      p_buff_pages_perwrite;
-        long       active_link_ID;           /* Active logical link ID */
-        struct ccwbk *p_write_free_chain;     /* pointer to free ccw chain */
-        struct ccwbk *p_write_active_first;   /* ptr to the first write ccw */
-        struct ccwbk *p_write_active_last;    /* ptr to the last write ccw */
-        struct ccwbk *p_read_active_first;    /* ptr to the first read ccw */
-        struct ccwbk *p_read_active_last;     /* ptr to the last read ccw */
-        struct endccw *p_end_ccw;              /*ptr to ending ccw */
-        struct ccwbk *p_claw_signal_blk;      /* ptr to signal block */
-        __u32      write_free_count;       /* number of free bufs for write */
-       struct     net_device_stats  stats; /*   device status    */
-        struct chbk channel[2];            /* Channel control blocks */
-        __u8       mtc_skipping;
-        int        mtc_offset;
-        int        mtc_logical_link;
-        void       *p_mtc_envelope;
-       struct     sk_buff      *pk_skb;        /* packing buffer    */
-       int        pk_cnt;
-        struct clawctl ctl_bk;
-        struct claw_env *p_env;
-        __u8       system_validate_comp;
-        __u8       release_pend;
-        __u8      checksum_received_ip_pkts;
-       __u8      buffs_alloc;
-        struct endccw  end_ccw;
-        unsigned long  tbusy;
-
-};
-
-
-/************************************************************/
-/* define global constants                                  */
-/************************************************************/
-
-#define CCWBK_SIZE sizeof(struct ccwbk)
-
-
index 642c77c76b8432d07534e393335353cfbc27233a..3466d3cb76474c2092cdd1d679d3a7704e793984 100644 (file)
@@ -4218,7 +4218,7 @@ void qeth_setadp_promisc_mode(struct qeth_card *card)
        QETH_CARD_TEXT_(card, 4, "mode:%x", mode);
 
        iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_PROMISC_MODE,
-                       sizeof(struct qeth_ipacmd_setadpparms));
+                       sizeof(struct qeth_ipacmd_setadpparms_hdr) + 8);
        if (!iob)
                return;
        cmd = (struct qeth_ipa_cmd *)(iob->data + IPA_PDU_HEADER_SIZE);
@@ -4290,7 +4290,8 @@ int qeth_setadpparms_change_macaddr(struct qeth_card *card)
        QETH_CARD_TEXT(card, 4, "chgmac");
 
        iob = qeth_get_adapter_cmd(card, IPA_SETADP_ALTER_MAC_ADDRESS,
-                                  sizeof(struct qeth_ipacmd_setadpparms));
+                                  sizeof(struct qeth_ipacmd_setadpparms_hdr) +
+                                  sizeof(struct qeth_change_addr));
        if (!iob)
                return -ENOMEM;
        cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
index d9631e15f7b5c1a389a87b0687a61351d71cf625..dbe416ff46c270f7fd118c7cc2b0b07cfb451eba 100644 (file)
@@ -1172,7 +1172,7 @@ static struct pci_error_handlers csio_err_handler = {
  *  Macros needed to support the PCI Device ID Table ...
  */
 #define CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN \
-       static struct pci_device_id csio_pci_tbl[] = {
+       static const struct pci_device_id csio_pci_tbl[] = {
 /* Define for FCoE uses PF6 */
 #define CH_PCI_DEVICE_ID_FUNCTION      0x6
 
index 2270bd51f9c2c240c669e562eb77052f89425a83..9d7b7db75e4b96b6fbb33bf24b91c205add07d79 100644 (file)
@@ -33,7 +33,6 @@ static int sg_version_num = 30536;    /* 2 digits for each component */
 #include <linux/sched.h>
 #include <linux/string.h>
 #include <linux/mm.h>
-#include <linux/aio.h>
 #include <linux/errno.h>
 #include <linux/mtio.h>
 #include <linux/ioctl.h>
@@ -51,6 +50,7 @@ static int sg_version_num = 30536;    /* 2 digits for each component */
 #include <linux/mutex.h>
 #include <linux/atomic.h>
 #include <linux/ratelimit.h>
+#include <linux/uio.h>
 
 #include "scsi.h"
 #include <scsi/scsi_dbg.h>
@@ -1745,17 +1745,14 @@ sg_start_req(Sg_request *srp, unsigned char *cmd)
        }
 
        if (iov_count) {
-               int size = sizeof(struct iovec) * iov_count;
-               struct iovec *iov;
+               struct iovec *iov = NULL;
                struct iov_iter i;
 
-               iov = memdup_user(hp->dxferp, size);
-               if (IS_ERR(iov))
-                       return PTR_ERR(iov);
+               res = import_iovec(rw, hp->dxferp, iov_count, 0, &iov, &i);
+               if (res < 0)
+                       return res;
 
-               iov_iter_init(&i, rw, iov, iov_count,
-                             min_t(size_t, hp->dxfer_len,
-                                   iov_length(iov, iov_count)));
+               iov_iter_truncate(&i, hp->dxfer_len);
 
                res = blk_rq_map_user_iov(q, rq, md, &i, GFP_ATOMIC);
                kfree(iov);
index 1e180c400f1721a45c34295d5b7b72f64e722107..a48a7439a2067d4311f81ce625f89ebcea6d1a24 100644 (file)
@@ -1135,6 +1135,8 @@ static u32 ssb_tmslow_reject_bitmask(struct ssb_device *dev)
        case SSB_IDLOW_SSBREV_25:     /* TODO - find the proper REJECT bit */
        case SSB_IDLOW_SSBREV_27:     /* same here */
                return SSB_TMSLOW_REJECT;       /* this is a guess */
+       case SSB_IDLOW_SSBREV:
+               break;
        default:
                WARN(1, KERN_INFO "ssb: Backplane Revision 0x%.8X\n", rev);
        }
index d140b733940cc3ffd8ea567bb286aa4ed50ac8f4..c5c037ccf32cc0097f25fe16ddc7848cbd41e547 100644 (file)
@@ -310,7 +310,7 @@ static ssize_t ashmem_read(struct file *file, char __user *buf,
         * be destroyed until all references to the file are dropped and
         * ashmem_release is called.
         */
-       ret = asma->file->f_op->read(asma->file, buf, len, pos);
+       ret = __vfs_read(asma->file, buf, len, pos);
        if (ret >= 0) {
                /** Update backing file pos, since f_ops->read() doesn't */
                asma->file->f_pos = *pos;
index 71226ee9064efc4c829a6b8804ac620df61551ac..a7f170e60633bcd8c70acd685870e37a1045f938 100644 (file)
@@ -110,24 +110,16 @@ static int serial2002_tty_write(struct file *f, unsigned char *buf, int count)
 {
        const char __user *p = (__force const char __user *)buf;
        int result;
+       loff_t offset = 0;
        mm_segment_t oldfs;
 
        oldfs = get_fs();
        set_fs(KERNEL_DS);
-       f->f_pos = 0;
-       result = f->f_op->write(f, p, count, &f->f_pos);
+       result = __vfs_write(f, p, count, &offset);
        set_fs(oldfs);
        return result;
 }
 
-static int serial2002_tty_readb(struct file *f, unsigned char *buf)
-{
-       char __user *p = (__force char __user *)buf;
-
-       f->f_pos = 0;
-       return f->f_op->read(f, p, 1, &f->f_pos);
-}
-
 static void serial2002_tty_read_poll_wait(struct file *f, int timeout)
 {
        struct poll_wqueues table;
@@ -163,13 +155,15 @@ static int serial2002_tty_read(struct file *f, int timeout)
        result = -1;
        if (!IS_ERR(f)) {
                mm_segment_t oldfs;
+               char __user *p = (__force char __user *)&ch;
+               loff_t offset = 0;
 
                oldfs = get_fs();
                set_fs(KERNEL_DS);
                if (f->f_op->poll) {
                        serial2002_tty_read_poll_wait(f, timeout);
 
-                       if (serial2002_tty_readb(f, &ch) == 1)
+                       if (__vfs_read(f, p, 1, &offset) == 1)
                                result = ch;
                } else {
                        /* Device does not support poll, busy wait */
@@ -180,7 +174,7 @@ static int serial2002_tty_read(struct file *f, int timeout)
                                if (retries >= timeout)
                                        break;
 
-                               if (serial2002_tty_readb(f, &ch) == 1) {
+                               if (__vfs_read(f, p, 1, &offset) == 1) {
                                        result = ch;
                                        break;
                                }
index 24183028bd712b11af46cd4531f60b33b4e57338..6d5b38d6957852ee81caa09cb6e3c047c55d26b8 100644 (file)
@@ -38,6 +38,7 @@ config IIO_SIMPLE_DUMMY_EVENTS
 config IIO_SIMPLE_DUMMY_BUFFER
        bool "Buffered capture support"
        select IIO_BUFFER
+       select IIO_TRIGGER
        select IIO_KFIFO_BUF
        help
          Add buffered data capture to the simple dummy driver.
index fd171d8b38fbcc444f3e7118bb66b2d51bdf698c..90cc18b703cf67ae6c089c36d51d8ebde6f28106 100644 (file)
@@ -592,6 +592,7 @@ int hmc5843_common_probe(struct device *dev, struct regmap *regmap,
        mutex_init(&data->lock);
 
        indio_dev->dev.parent = dev;
+       indio_dev->name = dev->driver->name;
        indio_dev->info = &hmc5843_info;
        indio_dev->modes = INDIO_DIRECT_MODE;
        indio_dev->channels = data->variant->channels;
index 5ebee6ca0a108330711cd02aefea5f87eaccd771..d73111ef949eee71f8e696b3e800be03f0e2dc1d 100644 (file)
@@ -3128,9 +3128,7 @@ int ll_inode_permission(struct inode *inode, int mask)
 
 /* -o localflock - only provides locally consistent flock locks */
 struct file_operations ll_file_operations = {
-       .read      = new_sync_read,
        .read_iter = ll_file_read_iter,
-       .write    = new_sync_write,
        .write_iter = ll_file_write_iter,
        .unlocked_ioctl = ll_file_ioctl,
        .open      = ll_file_open,
@@ -3143,9 +3141,7 @@ struct file_operations ll_file_operations = {
 };
 
 struct file_operations ll_file_operations_flock = {
-       .read      = new_sync_read,
        .read_iter    = ll_file_read_iter,
-       .write    = new_sync_write,
        .write_iter   = ll_file_write_iter,
        .unlocked_ioctl = ll_file_ioctl,
        .open      = ll_file_open,
@@ -3161,9 +3157,7 @@ struct file_operations ll_file_operations_flock = {
 
 /* These are for -o noflock - to return ENOSYS on flock calls */
 struct file_operations ll_file_operations_noflock = {
-       .read      = new_sync_read,
        .read_iter    = ll_file_read_iter,
-       .write    = new_sync_write,
        .write_iter   = ll_file_write_iter,
        .unlocked_ioctl = ll_file_ioctl,
        .open      = ll_file_open,
index 2af1d7286250a32097ac89d1735e08ae3c97ebad..e7422f5c9c6f408deb97d93583a52fe30ebc20c9 100644 (file)
@@ -938,10 +938,8 @@ struct ll_cl_context {
 };
 
 struct vvp_thread_info {
-       struct iovec     vti_local_iov;
        struct vvp_io_args   vti_args;
        struct ra_io_arg     vti_ria;
-       struct kiocb     vti_kiocb;
        struct ll_cl_context vti_io_ctx;
 };
 
index 2f21304046aa9335e438b36f462be4aa62cbebd3..0d7ce6b0e23ccdf68607db847825a73e4bb2d4cc 100644 (file)
@@ -359,8 +359,8 @@ static ssize_t ll_direct_IO_26_seg(const struct lu_env *env, struct cl_io *io,
  * up to 22MB for 128kB kmalloc and up to 682MB for 4MB kmalloc. */
 #define MAX_DIO_SIZE ((MAX_MALLOC / sizeof(struct brw_page) * PAGE_CACHE_SIZE) & \
                      ~(DT_MAX_BRW_SIZE - 1))
-static ssize_t ll_direct_IO_26(int rw, struct kiocb *iocb,
-                              struct iov_iter *iter, loff_t file_offset)
+static ssize_t ll_direct_IO_26(struct kiocb *iocb, struct iov_iter *iter,
+                              loff_t file_offset)
 {
        struct lu_env *env;
        struct cl_io *io;
@@ -399,7 +399,7 @@ static ssize_t ll_direct_IO_26(int rw, struct kiocb *iocb,
         *    size changing by concurrent truncates and writes.
         * 1. Need inode mutex to operate transient pages.
         */
-       if (rw == READ)
+       if (iov_iter_rw(iter) == READ)
                mutex_lock(&inode->i_mutex);
 
        LASSERT(obj->cob_transient_pages == 0);
@@ -408,7 +408,7 @@ static ssize_t ll_direct_IO_26(int rw, struct kiocb *iocb,
                size_t offs;
 
                count = min_t(size_t, iov_iter_count(iter), size);
-               if (rw == READ) {
+               if (iov_iter_rw(iter) == READ) {
                        if (file_offset >= i_size_read(inode))
                                break;
                        if (file_offset + count > i_size_read(inode))
@@ -418,11 +418,11 @@ static ssize_t ll_direct_IO_26(int rw, struct kiocb *iocb,
                result = iov_iter_get_pages_alloc(iter, &pages, count, &offs);
                if (likely(result > 0)) {
                        int n = DIV_ROUND_UP(result + offs, PAGE_SIZE);
-                       result = ll_direct_IO_26_seg(env, io, rw, inode,
-                                                    file->f_mapping,
-                                                    result, file_offset,
-                                                    pages, n);
-                       ll_free_user_pages(pages, n, rw==READ);
+                       result = ll_direct_IO_26_seg(env, io, iov_iter_rw(iter),
+                                                    inode, file->f_mapping,
+                                                    result, file_offset, pages,
+                                                    n);
+                       ll_free_user_pages(pages, n, iov_iter_rw(iter) == READ);
                }
                if (unlikely(result <= 0)) {
                        /* If we can't allocate a large enough buffer
@@ -449,11 +449,11 @@ static ssize_t ll_direct_IO_26(int rw, struct kiocb *iocb,
        }
 out:
        LASSERT(obj->cob_transient_pages == 0);
-       if (rw == READ)
+       if (iov_iter_rw(iter) == READ)
                mutex_unlock(&inode->i_mutex);
 
        if (tot_bytes > 0) {
-               if (rw == WRITE) {
+               if (iov_iter_rw(iter) == WRITE) {
                        struct lov_stripe_md *lsm;
 
                        lsm = ccc_inode_lsm_get(inode);
index 537bd8214efe1deaa0ada916c80e3c13b6603437..a6116fdc8678ec418bc6aa0fe952bb3ee1603efd 100644 (file)
@@ -2580,6 +2580,7 @@ static const struct net_device_ops rtw_cfg80211_monitor_if_ops = {
 };
 
 static int rtw_cfg80211_add_monitor_if(struct rtw_adapter *padapter, char *name,
+                                      unsigned char name_assign_type,
                                       struct net_device **ndev)
 {
        int ret = 0;
@@ -2612,6 +2613,7 @@ static int rtw_cfg80211_add_monitor_if(struct rtw_adapter *padapter, char *name,
        mon_ndev->type = ARPHRD_IEEE80211_RADIOTAP;
        strncpy(mon_ndev->name, name, IFNAMSIZ);
        mon_ndev->name[IFNAMSIZ - 1] = 0;
+       mon_ndev->name_assign_type = name_assign_type;
        mon_ndev->destructor = rtw_ndev_destructor;
 
        mon_ndev->netdev_ops = &rtw_cfg80211_monitor_if_ops;
@@ -2654,6 +2656,7 @@ out:
 
 static struct wireless_dev *
 cfg80211_rtw_add_virtual_intf(struct wiphy *wiphy, const char *name,
+                             unsigned char name_assign_type,
                              enum nl80211_iftype type, u32 *flags,
                              struct vif_params *params)
 {
@@ -2673,7 +2676,8 @@ cfg80211_rtw_add_virtual_intf(struct wiphy *wiphy, const char *name,
                break;
        case NL80211_IFTYPE_MONITOR:
                ret =
-                   rtw_cfg80211_add_monitor_if(padapter, (char *)name, &ndev);
+                   rtw_cfg80211_add_monitor_if(padapter, (char *)name,
+                                               name_assign_type, &ndev);
                break;
 
        case NL80211_IFTYPE_P2P_CLIENT:
index 4019a0d63645fe53380caec301679a68f9979e87..52648d4d99220cc6916cef894299dd4102248097 100644 (file)
@@ -46,7 +46,6 @@
 #include <linux/module.h>
 #include <linux/moduleparam.h>
 #include <linux/fcntl.h>
-#include <linux/aio.h>
 #include <linux/workqueue.h>
 #include <linux/kthread.h>
 #include <linux/seq_file.h>
index b1893f3f88f1c6b6fdb2e8e42ad565ba23e18fd1..3ad1458bfeb0fc32afe790b2aa0f3e36961b7b45 100644 (file)
@@ -921,6 +921,9 @@ static void lpuart_setup_watermark(struct lpuart_port *sport)
        writeb(val | UARTPFIFO_TXFE | UARTPFIFO_RXFE,
                        sport->port.membase + UARTPFIFO);
 
+       /* explicitly clear RDRF */
+       readb(sport->port.membase + UARTSR1);
+
        /* flush Tx and Rx FIFO */
        writeb(UARTCFIFO_TXFLUSH | UARTCFIFO_RXFLUSH,
                        sport->port.membase + UARTCFIFO);
@@ -1076,6 +1079,8 @@ static int lpuart_startup(struct uart_port *port)
        sport->txfifo_size = 0x1 << (((temp >> UARTPFIFO_TXSIZE_OFF) &
                UARTPFIFO_FIFOSIZE_MASK) + 1);
 
+       sport->port.fifosize = sport->txfifo_size;
+
        sport->rxfifo_size = 0x1 << (((temp >> UARTPFIFO_RXSIZE_OFF) &
                UARTPFIFO_FIFOSIZE_MASK) + 1);
 
index af821a9087204ec654f637a617513ba06d68a0ef..cf08876922f1446e55a2d8ca79bf87e0d4e24fed 100644 (file)
@@ -963,6 +963,7 @@ static void s3c24xx_serial_shutdown(struct uart_port *port)
                        free_irq(ourport->tx_irq, ourport);
                tx_enabled(port) = 0;
                ourport->tx_claimed = 0;
+               ourport->tx_mode = 0;
        }
 
        if (ourport->rx_claimed) {
index 175c9956cbe3a36949526029103d38b4c97225c3..6bdb5706904497ca9eccb7fd5d979c67824d8600 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/export.h>
 #include <linux/hid.h>
 #include <linux/module.h>
+#include <linux/uio.h>
 #include <asm/unaligned.h>
 
 #include <linux/usb/composite.h>
@@ -655,9 +656,10 @@ static void ffs_user_copy_worker(struct work_struct *work)
                unuse_mm(io_data->mm);
        }
 
-       aio_complete(io_data->kiocb, ret, ret);
+       io_data->kiocb->ki_complete(io_data->kiocb, ret, ret);
 
-       if (io_data->ffs->ffs_eventfd && !io_data->kiocb->ki_eventfd)
+       if (io_data->ffs->ffs_eventfd &&
+           !(io_data->kiocb->ki_flags & IOCB_EVENTFD))
                eventfd_signal(io_data->ffs->ffs_eventfd, 1);
 
        usb_ep_free_request(io_data->ep, io_data->req);
@@ -1059,8 +1061,6 @@ static const struct file_operations ffs_epfile_operations = {
        .llseek =       no_llseek,
 
        .open =         ffs_epfile_open,
-       .write =        new_sync_write,
-       .read =         new_sync_read,
        .write_iter =   ffs_epfile_write_iter,
        .read_iter =    ffs_epfile_read_iter,
        .release =      ffs_epfile_release,
index 200f9a584064fd9199ba99ff75a2e26a33c788f7..6af58c6dba5ebe7ea134197537864b95c8bfe002 100644 (file)
@@ -26,6 +26,7 @@
 #include <linux/poll.h>
 #include <linux/mmu_context.h>
 #include <linux/aio.h>
+#include <linux/uio.h>
 
 #include <linux/device.h>
 #include <linux/moduleparam.h>
@@ -469,7 +470,7 @@ static void ep_user_copy_worker(struct work_struct *work)
                ret = -EFAULT;
 
        /* completing the iocb can drop the ctx and mm, don't touch mm after */
-       aio_complete(iocb, ret, ret);
+       iocb->ki_complete(iocb, ret, ret);
 
        kfree(priv->buf);
        kfree(priv->to_free);
@@ -497,7 +498,8 @@ static void ep_aio_complete(struct usb_ep *ep, struct usb_request *req)
                kfree(priv);
                iocb->private = NULL;
                /* aio_complete() reports bytes-transferred _and_ faults */
-               aio_complete(iocb, req->actual ? req->actual : req->status,
+
+               iocb->ki_complete(iocb, req->actual ? req->actual : req->status,
                                req->status);
        } else {
                /* ep_copy_to_user() won't report both; we hide some faults */
@@ -697,8 +699,6 @@ static const struct file_operations ep_io_operations = {
        .open =         ep_open,
        .release =      ep_release,
        .llseek =       no_llseek,
-       .read =         new_sync_read,
-       .write =        new_sync_write,
        .unlocked_ioctl = ep_ioctl,
        .read_iter =    ep_read_iter,
        .write_iter =   ep_write_iter,
index a7865c4b04980898b49317386ad6138aab051bc5..0827d7c965276382418f0a602ec5c1412c023142 100644 (file)
@@ -387,6 +387,10 @@ static void xhci_clear_port_change_bit(struct xhci_hcd *xhci, u16 wValue,
                status = PORT_PLC;
                port_change_bit = "link state";
                break;
+       case USB_PORT_FEAT_C_PORT_CONFIG_ERROR:
+               status = PORT_CEC;
+               port_change_bit = "config error";
+               break;
        default:
                /* Should never happen */
                return;
@@ -588,6 +592,8 @@ static u32 xhci_get_port_status(struct usb_hcd *hcd,
                        status |= USB_PORT_STAT_C_LINK_STATE << 16;
                if ((raw_port_status & PORT_WRC))
                        status |= USB_PORT_STAT_C_BH_RESET << 16;
+               if ((raw_port_status & PORT_CEC))
+                       status |= USB_PORT_STAT_C_CONFIG_ERROR << 16;
        }
 
        if (hcd->speed != HCD_USB3) {
@@ -1005,6 +1011,7 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
                case USB_PORT_FEAT_C_OVER_CURRENT:
                case USB_PORT_FEAT_C_ENABLE:
                case USB_PORT_FEAT_C_PORT_LINK_STATE:
+               case USB_PORT_FEAT_C_PORT_CONFIG_ERROR:
                        xhci_clear_port_change_bit(xhci, wValue, wIndex,
                                        port_array[wIndex], temp);
                        break;
@@ -1069,7 +1076,7 @@ int xhci_hub_status_data(struct usb_hcd *hcd, char *buf)
         */
        status = bus_state->resuming_ports;
 
-       mask = PORT_CSC | PORT_PEC | PORT_OCC | PORT_PLC | PORT_WRC;
+       mask = PORT_CSC | PORT_PEC | PORT_OCC | PORT_PLC | PORT_WRC | PORT_CEC;
 
        spin_lock_irqsave(&xhci->lock, flags);
        /* For each port, did anything change?  If so, set that bit in buf. */
index fd53c9ebd662a5fb4593c99ce5dd7c2552ad83c5..2af32e26fafc3727279fe656fbbcaf158736371d 100644 (file)
@@ -115,6 +115,7 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
        if (pdev->vendor == PCI_VENDOR_ID_INTEL) {
                xhci->quirks |= XHCI_LPM_SUPPORT;
                xhci->quirks |= XHCI_INTEL_HOST;
+               xhci->quirks |= XHCI_AVOID_BEI;
        }
        if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
                        pdev->device == PCI_DEVICE_ID_INTEL_PANTHERPOINT_XHCI) {
@@ -130,7 +131,6 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
                 * PPT chipsets.
                 */
                xhci->quirks |= XHCI_SPURIOUS_REBOOT;
-               xhci->quirks |= XHCI_AVOID_BEI;
        }
        if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
                pdev->device == PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI) {
index f32c292cc8689d81bfce947f7c587603e95c09be..3fc4fe7702533b785bc22ead9fe17939e1f365f8 100644 (file)
@@ -1203,7 +1203,7 @@ static int isp1760_udc_start(struct usb_gadget *gadget,
 
        if (udc->driver) {
                dev_err(udc->isp->dev, "UDC already has a gadget driver\n");
-               spin_unlock(&udc->lock);
+               spin_unlock_irqrestore(&udc->lock, flags);
                return -EBUSY;
        }
 
index 3086dec0ef53bbd5d5d3d21087b91469983492fb..8eb68a31cab6c4021617ca555cd58b086872c112 100644 (file)
@@ -604,6 +604,7 @@ static const struct usb_device_id id_table_combined[] = {
                .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
        { USB_DEVICE(FTDI_VID, FTDI_NT_ORIONLXM_PID),
                .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
+       { USB_DEVICE(FTDI_VID, FTDI_SYNAPSE_SS200_PID) },
        /*
         * ELV devices:
         */
@@ -1883,8 +1884,12 @@ static int ftdi_8u2232c_probe(struct usb_serial *serial)
 {
        struct usb_device *udev = serial->dev;
 
-       if ((udev->manufacturer && !strcmp(udev->manufacturer, "CALAO Systems")) ||
-           (udev->product && !strcmp(udev->product, "BeagleBone/XDS100V2")))
+       if (udev->manufacturer && !strcmp(udev->manufacturer, "CALAO Systems"))
+               return ftdi_jtag_probe(serial);
+
+       if (udev->product &&
+               (!strcmp(udev->product, "BeagleBone/XDS100V2") ||
+                !strcmp(udev->product, "SNAP Connect E10")))
                return ftdi_jtag_probe(serial);
 
        return 0;
index 56b1b55c4751696b2e89633ea90bfe1c2940c436..4e4f46f3c89c025670d42860756f39b2bb62ae24 100644 (file)
  */
 #define FTDI_NT_ORIONLXM_PID   0x7c90  /* OrionLXm Substation Automation Platform */
 
+/*
+ * Synapse Wireless product ids (FTDI_VID)
+ * http://www.synapse-wireless.com
+ */
+#define FTDI_SYNAPSE_SS200_PID 0x9090 /* SS200 - SNAP Stick 200 */
+
 
 /********************************/
 /** third-party VID/PID combos **/
index dd97d8b572c336e03c7c4c282fbe7e38c44646a4..4f7e072e4e001e9f7c439114f13b52a77d3250d6 100644 (file)
@@ -61,6 +61,7 @@ struct keyspan_pda_private {
 /* For Xircom PGSDB9 and older Entrega version of the same device */
 #define XIRCOM_VENDOR_ID               0x085a
 #define XIRCOM_FAKE_ID                 0x8027
+#define XIRCOM_FAKE_ID_2               0x8025 /* "PGMFHUB" serial */
 #define ENTREGA_VENDOR_ID              0x1645
 #define ENTREGA_FAKE_ID                        0x8093
 
@@ -70,6 +71,7 @@ static const struct usb_device_id id_table_combined[] = {
 #endif
 #ifdef XIRCOM
        { USB_DEVICE(XIRCOM_VENDOR_ID, XIRCOM_FAKE_ID) },
+       { USB_DEVICE(XIRCOM_VENDOR_ID, XIRCOM_FAKE_ID_2) },
        { USB_DEVICE(ENTREGA_VENDOR_ID, ENTREGA_FAKE_ID) },
 #endif
        { USB_DEVICE(KEYSPAN_VENDOR_ID, KEYSPAN_PDA_ID) },
@@ -93,6 +95,7 @@ static const struct usb_device_id id_table_fake[] = {
 #ifdef XIRCOM
 static const struct usb_device_id id_table_fake_xircom[] = {
        { USB_DEVICE(XIRCOM_VENDOR_ID, XIRCOM_FAKE_ID) },
+       { USB_DEVICE(XIRCOM_VENDOR_ID, XIRCOM_FAKE_ID_2) },
        { USB_DEVICE(ENTREGA_VENDOR_ID, ENTREGA_FAKE_ID) },
        { }
 };
index 2bbfc25e582cb8b334a1ef4083b22da56c48cc65..7d137a43cc86842ed98bc27e63d6dfcb2042f177 100644 (file)
@@ -357,13 +357,13 @@ static void handle_tx(struct vhost_net *net)
                iov_iter_init(&msg.msg_iter, WRITE, vq->iov, out, len);
                iov_iter_advance(&msg.msg_iter, hdr_size);
                /* Sanity check */
-               if (!iov_iter_count(&msg.msg_iter)) {
+               if (!msg_data_left(&msg)) {
                        vq_err(vq, "Unexpected header len for TX: "
                               "%zd expected %zd\n",
                               len, hdr_size);
                        break;
                }
-               len = iov_iter_count(&msg.msg_iter);
+               len = msg_data_left(&msg);
 
                zcopy_used = zcopy && len >= VHOST_GOODCOPY_LEN
                                   && (nvq->upend_idx + 1) % UIO_MAXIOV !=
@@ -390,7 +390,7 @@ static void handle_tx(struct vhost_net *net)
                        ubufs = NULL;
                }
                /* TODO: Check specific error and bomb out unless ENOBUFS? */
-               err = sock->ops->sendmsg(NULL, sock, &msg, len);
+               err = sock->ops->sendmsg(sock, &msg, len);
                if (unlikely(err < 0)) {
                        if (zcopy_used) {
                                vhost_net_ubuf_put(ubufs);
@@ -566,7 +566,7 @@ static void handle_rx(struct vhost_net *net)
                /* On overrun, truncate and discard */
                if (unlikely(headcount > UIO_MAXIOV)) {
                        iov_iter_init(&msg.msg_iter, READ, vq->iov, 1, 1);
-                       err = sock->ops->recvmsg(NULL, sock, &msg,
+                       err = sock->ops->recvmsg(sock, &msg,
                                                 1, MSG_DONTWAIT | MSG_TRUNC);
                        pr_debug("Discarded rx packet: len %zd\n", sock_len);
                        continue;
@@ -592,7 +592,7 @@ static void handle_rx(struct vhost_net *net)
                         */
                        iov_iter_advance(&msg.msg_iter, vhost_hlen);
                }
-               err = sock->ops->recvmsg(NULL, sock, &msg,
+               err = sock->ops->recvmsg(sock, &msg,
                                         sock_len, MSG_DONTWAIT | MSG_TRUNC);
                /* Userspace might have consumed the packet meanwhile:
                 * it's not supposed to do this usually, but might be hard
index b83ebfbf3fdc7db1ab56b36cea1c89626b24e9cb..5a0db6dec8d1fd4ad2673a03188a910606ae42b3 100644 (file)
@@ -68,14 +68,10 @@ int v9fs_file_open(struct inode *inode, struct file *file);
 void v9fs_inode2stat(struct inode *inode, struct p9_wstat *stat);
 int v9fs_uflags2omode(int uflags, int extended);
 
-ssize_t v9fs_file_readn(struct file *, char *, char __user *, u32, u64);
-ssize_t v9fs_fid_readn(struct p9_fid *, char *, char __user *, u32, u64);
 void v9fs_blank_wstat(struct p9_wstat *wstat);
 int v9fs_vfs_setattr_dotl(struct dentry *, struct iattr *);
 int v9fs_file_fsync_dotl(struct file *filp, loff_t start, loff_t end,
                         int datasync);
-ssize_t v9fs_file_write_internal(struct inode *, struct p9_fid *,
-                                const char __user *, size_t, loff_t *, int);
 int v9fs_refresh_inode(struct p9_fid *fid, struct inode *inode);
 int v9fs_refresh_inode_dotl(struct p9_fid *fid, struct inode *inode);
 static inline void v9fs_invalidate_inode_attr(struct inode *inode)
index eb14e055ea83e8509e7ea6ae569e3c1966d3b896..be35d05a4d0efc00c5955cc047ac20bea293149a 100644 (file)
@@ -33,7 +33,7 @@
 #include <linux/pagemap.h>
 #include <linux/idr.h>
 #include <linux/sched.h>
-#include <linux/aio.h>
+#include <linux/uio.h>
 #include <net/9p/9p.h>
 #include <net/9p/client.h>
 
  */
 static int v9fs_fid_readpage(struct p9_fid *fid, struct page *page)
 {
-       int retval;
-       loff_t offset;
-       char *buffer;
-       struct inode *inode;
+       struct inode *inode = page->mapping->host;
+       struct bio_vec bvec = {.bv_page = page, .bv_len = PAGE_SIZE};
+       struct iov_iter to;
+       int retval, err;
 
-       inode = page->mapping->host;
        p9_debug(P9_DEBUG_VFS, "\n");
 
        BUG_ON(!PageLocked(page));
@@ -65,16 +64,16 @@ static int v9fs_fid_readpage(struct p9_fid *fid, struct page *page)
        if (retval == 0)
                return retval;
 
-       buffer = kmap(page);
-       offset = page_offset(page);
+       iov_iter_bvec(&to, ITER_BVEC | READ, &bvec, 1, PAGE_SIZE);
 
-       retval = v9fs_fid_readn(fid, buffer, NULL, PAGE_CACHE_SIZE, offset);
-       if (retval < 0) {
+       retval = p9_client_read(fid, page_offset(page), &to, &err);
+       if (err) {
                v9fs_uncache_page(inode, page);
+               retval = err;
                goto done;
        }
 
-       memset(buffer + retval, 0, PAGE_CACHE_SIZE - retval);
+       zero_user(page, retval, PAGE_SIZE - retval);
        flush_dcache_page(page);
        SetPageUptodate(page);
 
@@ -82,7 +81,6 @@ static int v9fs_fid_readpage(struct p9_fid *fid, struct page *page)
        retval = 0;
 
 done:
-       kunmap(page);
        unlock_page(page);
        return retval;
 }
@@ -161,41 +159,32 @@ static void v9fs_invalidate_page(struct page *page, unsigned int offset,
 
 static int v9fs_vfs_writepage_locked(struct page *page)
 {
-       char *buffer;
-       int retval, len;
-       loff_t offset, size;
-       mm_segment_t old_fs;
-       struct v9fs_inode *v9inode;
        struct inode *inode = page->mapping->host;
+       struct v9fs_inode *v9inode = V9FS_I(inode);
+       loff_t size = i_size_read(inode);
+       struct iov_iter from;
+       struct bio_vec bvec;
+       int err, len;
 
-       v9inode = V9FS_I(inode);
-       size = i_size_read(inode);
        if (page->index == size >> PAGE_CACHE_SHIFT)
                len = size & ~PAGE_CACHE_MASK;
        else
                len = PAGE_CACHE_SIZE;
 
-       set_page_writeback(page);
-
-       buffer = kmap(page);
-       offset = page_offset(page);
+       bvec.bv_page = page;
+       bvec.bv_offset = 0;
+       bvec.bv_len = len;
+       iov_iter_bvec(&from, ITER_BVEC | WRITE, &bvec, 1, len);
 
-       old_fs = get_fs();
-       set_fs(get_ds());
        /* We should have writeback_fid always set */
        BUG_ON(!v9inode->writeback_fid);
 
-       retval = v9fs_file_write_internal(inode,
-                                         v9inode->writeback_fid,
-                                         (__force const char __user *)buffer,
-                                         len, &offset, 0);
-       if (retval > 0)
-               retval = 0;
+       set_page_writeback(page);
+
+       p9_client_write(v9inode->writeback_fid, page_offset(page), &from, &err);
 
-       set_fs(old_fs);
-       kunmap(page);
        end_page_writeback(page);
-       return retval;
+       return err;
 }
 
 static int v9fs_vfs_writepage(struct page *page, struct writeback_control *wbc)
@@ -241,7 +230,6 @@ static int v9fs_launder_page(struct page *page)
 
 /**
  * v9fs_direct_IO - 9P address space operation for direct I/O
- * @rw: direction (read or write)
  * @iocb: target I/O control block
  * @iov: array of vectors that define I/O buffer
  * @pos: offset in file to begin the operation
@@ -259,18 +247,23 @@ static int v9fs_launder_page(struct page *page)
  *
  */
 static ssize_t
-v9fs_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter, loff_t pos)
+v9fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter, loff_t pos)
 {
-       /*
-        * FIXME
-        * Now that we do caching with cache mode enabled, We need
-        * to support direct IO
-        */
-       p9_debug(P9_DEBUG_VFS, "v9fs_direct_IO: v9fs_direct_IO (%pD) off/no(%lld/%lu) EINVAL\n",
-                iocb->ki_filp,
-                (long long)pos, iter->nr_segs);
-
-       return -EINVAL;
+       struct file *file = iocb->ki_filp;
+       ssize_t n;
+       int err = 0;
+       if (iov_iter_rw(iter) == WRITE) {
+               n = p9_client_write(file->private_data, pos, iter, &err);
+               if (n) {
+                       struct inode *inode = file_inode(file);
+                       loff_t i_size = i_size_read(inode);
+                       if (pos + n > i_size)
+                               inode_add_bytes(inode, pos + n - i_size);
+               }
+       } else {
+               n = p9_client_read(file->private_data, pos, iter, &err);
+       }
+       return n ? n : err;
 }
 
 static int v9fs_write_begin(struct file *filp, struct address_space *mapping,
index 4f1151088ebe8779da1af2173818c0088afb2cd0..76c3b1ab6361d69b12f069ed7e9d68b48751eb82 100644 (file)
@@ -33,6 +33,7 @@
 #include <linux/inet.h>
 #include <linux/idr.h>
 #include <linux/slab.h>
+#include <linux/uio.h>
 #include <net/9p/9p.h>
 #include <net/9p/client.h>
 
@@ -115,6 +116,7 @@ static int v9fs_dir_readdir(struct file *file, struct dir_context *ctx)
        int buflen;
        int reclen = 0;
        struct p9_rdir *rdir;
+       struct kvec kvec;
 
        p9_debug(P9_DEBUG_VFS, "name %pD\n", file);
        fid = file->private_data;
@@ -124,16 +126,21 @@ static int v9fs_dir_readdir(struct file *file, struct dir_context *ctx)
        rdir = v9fs_alloc_rdir_buf(file, buflen);
        if (!rdir)
                return -ENOMEM;
+       kvec.iov_base = rdir->buf;
+       kvec.iov_len = buflen;
 
        while (1) {
                if (rdir->tail == rdir->head) {
-                       err = v9fs_file_readn(file, rdir->buf, NULL,
-                                                       buflen, ctx->pos);
-                       if (err <= 0)
+                       struct iov_iter to;
+                       int n;
+                       iov_iter_kvec(&to, READ | ITER_KVEC, &kvec, 1, buflen);
+                       n = p9_client_read(file->private_data, ctx->pos, &to,
+                                          &err);
+                       if (err)
                                return err;
 
                        rdir->head = 0;
-                       rdir->tail = err;
+                       rdir->tail = n;
                }
                while (rdir->head < rdir->tail) {
                        p9stat_init(&st);
index b40133796b8734d32cf5376b12d1e01672af85db..b5b020ace1b3cddefc9490b2157d3c205fca83c2 100644 (file)
@@ -36,6 +36,8 @@
 #include <linux/utsname.h>
 #include <asm/uaccess.h>
 #include <linux/idr.h>
+#include <linux/uio.h>
+#include <linux/slab.h>
 #include <net/9p/9p.h>
 #include <net/9p/client.h>
 
@@ -285,6 +287,7 @@ static int v9fs_file_getlock(struct file *filp, struct file_lock *fl)
                        fl->fl_end = glock.start + glock.length - 1;
                fl->fl_pid = glock.proc_id;
        }
+       kfree(glock.client_id);
        return res;
 }
 
@@ -363,63 +366,6 @@ out_err:
        return ret;
 }
 
-/**
- * v9fs_fid_readn - read from a fid
- * @fid: fid to read
- * @data: data buffer to read data into
- * @udata: user data buffer to read data into
- * @count: size of buffer
- * @offset: offset at which to read data
- *
- */
-ssize_t
-v9fs_fid_readn(struct p9_fid *fid, char *data, char __user *udata, u32 count,
-              u64 offset)
-{
-       int n, total, size;
-
-       p9_debug(P9_DEBUG_VFS, "fid %d offset %llu count %d\n",
-                fid->fid, (long long unsigned)offset, count);
-       n = 0;
-       total = 0;
-       size = fid->iounit ? fid->iounit : fid->clnt->msize - P9_IOHDRSZ;
-       do {
-               n = p9_client_read(fid, data, udata, offset, count);
-               if (n <= 0)
-                       break;
-
-               if (data)
-                       data += n;
-               if (udata)
-                       udata += n;
-
-               offset += n;
-               count -= n;
-               total += n;
-       } while (count > 0 && n == size);
-
-       if (n < 0)
-               total = n;
-
-       return total;
-}
-
-/**
- * v9fs_file_readn - read from a file
- * @filp: file pointer to read
- * @data: data buffer to read data into
- * @udata: user data buffer to read data into
- * @count: size of buffer
- * @offset: offset at which to read data
- *
- */
-ssize_t
-v9fs_file_readn(struct file *filp, char *data, char __user *udata, u32 count,
-              u64 offset)
-{
-       return v9fs_fid_readn(filp->private_data, data, udata, count, offset);
-}
-
 /**
  * v9fs_file_read - read from a file
  * @filp: file pointer to read
@@ -430,69 +376,22 @@ v9fs_file_readn(struct file *filp, char *data, char __user *udata, u32 count,
  */
 
 static ssize_t
-v9fs_file_read(struct file *filp, char __user *udata, size_t count,
-              loff_t * offset)
+v9fs_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
 {
-       int ret;
-       struct p9_fid *fid;
-       size_t size;
+       struct p9_fid *fid = iocb->ki_filp->private_data;
+       int ret, err;
 
-       p9_debug(P9_DEBUG_VFS, "count %zu offset %lld\n", count, *offset);
-       fid = filp->private_data;
+       p9_debug(P9_DEBUG_VFS, "count %zu offset %lld\n",
+                iov_iter_count(to), iocb->ki_pos);
 
-       size = fid->iounit ? fid->iounit : fid->clnt->msize - P9_IOHDRSZ;
-       if (count > size)
-               ret = v9fs_file_readn(filp, NULL, udata, count, *offset);
-       else
-               ret = p9_client_read(fid, NULL, udata, *offset, count);
-
-       if (ret > 0)
-               *offset += ret;
+       ret = p9_client_read(fid, iocb->ki_pos, to, &err);
+       if (!ret)
+               return err;
 
+       iocb->ki_pos += ret;
        return ret;
 }
 
-ssize_t
-v9fs_file_write_internal(struct inode *inode, struct p9_fid *fid,
-                        const char __user *data, size_t count,
-                        loff_t *offset, int invalidate)
-{
-       int n;
-       loff_t i_size;
-       size_t total = 0;
-       loff_t origin = *offset;
-       unsigned long pg_start, pg_end;
-
-       p9_debug(P9_DEBUG_VFS, "data %p count %d offset %x\n",
-                data, (int)count, (int)*offset);
-
-       do {
-               n = p9_client_write(fid, NULL, data+total, origin+total, count);
-               if (n <= 0)
-                       break;
-               count -= n;
-               total += n;
-       } while (count > 0);
-
-       if (invalidate && (total > 0)) {
-               pg_start = origin >> PAGE_CACHE_SHIFT;
-               pg_end = (origin + total - 1) >> PAGE_CACHE_SHIFT;
-               if (inode->i_mapping && inode->i_mapping->nrpages)
-                       invalidate_inode_pages2_range(inode->i_mapping,
-                                                     pg_start, pg_end);
-               *offset += total;
-               i_size = i_size_read(inode);
-               if (*offset > i_size) {
-                       inode_add_bytes(inode, *offset - i_size);
-                       i_size_write(inode, *offset);
-               }
-       }
-       if (n < 0)
-               return n;
-
-       return total;
-}
-
 /**
  * v9fs_file_write - write to a file
  * @filp: file pointer to write
@@ -502,35 +401,45 @@ v9fs_file_write_internal(struct inode *inode, struct p9_fid *fid,
  *
  */
 static ssize_t
-v9fs_file_write(struct file *filp, const char __user * data,
-               size_t count, loff_t *offset)
+v9fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
 {
+       struct file *file = iocb->ki_filp;
        ssize_t retval = 0;
-       loff_t origin = *offset;
+       loff_t origin = iocb->ki_pos;
+       size_t count = iov_iter_count(from);
+       int err = 0;
 
-
-       retval = generic_write_checks(filp, &origin, &count, 0);
+       retval = generic_write_checks(file, &origin, &count);
        if (retval)
-               goto out;
+               return retval;
+
+       iov_iter_truncate(from, count);
 
-       retval = -EINVAL;
-       if ((ssize_t) count < 0)
-               goto out;
-       retval = 0;
        if (!count)
-               goto out;
+               return 0;
 
-       retval = v9fs_file_write_internal(file_inode(filp),
-                                       filp->private_data,
-                                       data, count, &origin, 1);
-       /* update offset on successful write */
-       if (retval > 0)
-               *offset = origin;
-out:
-       return retval;
+       retval = p9_client_write(file->private_data, origin, from, &err);
+       if (retval > 0) {
+               struct inode *inode = file_inode(file);
+               loff_t i_size;
+               unsigned long pg_start, pg_end;
+               pg_start = origin >> PAGE_CACHE_SHIFT;
+               pg_end = (origin + retval - 1) >> PAGE_CACHE_SHIFT;
+               if (inode->i_mapping && inode->i_mapping->nrpages)
+                       invalidate_inode_pages2_range(inode->i_mapping,
+                                                     pg_start, pg_end);
+               origin += retval;
+               i_size = i_size_read(inode);
+               iocb->ki_pos = origin;
+               if (origin > i_size) {
+                       inode_add_bytes(inode, origin - i_size);
+                       i_size_write(inode, origin);
+               }
+               return retval;
+       }
+       return err;
 }
 
-
 static int v9fs_file_fsync(struct file *filp, loff_t start, loff_t end,
                           int datasync)
 {
@@ -657,44 +566,6 @@ out_unlock:
        return VM_FAULT_NOPAGE;
 }
 
-static ssize_t
-v9fs_direct_read(struct file *filp, char __user *udata, size_t count,
-                loff_t *offsetp)
-{
-       loff_t size, offset;
-       struct inode *inode;
-       struct address_space *mapping;
-
-       offset = *offsetp;
-       mapping = filp->f_mapping;
-       inode = mapping->host;
-       if (!count)
-               return 0;
-       size = i_size_read(inode);
-       if (offset < size)
-               filemap_write_and_wait_range(mapping, offset,
-                                            offset + count - 1);
-
-       return v9fs_file_read(filp, udata, count, offsetp);
-}
-
-/**
- * v9fs_cached_file_read - read from a file
- * @filp: file pointer to read
- * @data: user data buffer to read data into
- * @count: size of buffer
- * @offset: offset at which to read data
- *
- */
-static ssize_t
-v9fs_cached_file_read(struct file *filp, char __user *data, size_t count,
-                     loff_t *offset)
-{
-       if (filp->f_flags & O_DIRECT)
-               return v9fs_direct_read(filp, data, count, offset);
-       return new_sync_read(filp, data, count, offset);
-}
-
 /**
  * v9fs_mmap_file_read - read from a file
  * @filp: file pointer to read
@@ -704,84 +575,12 @@ v9fs_cached_file_read(struct file *filp, char __user *data, size_t count,
  *
  */
 static ssize_t
-v9fs_mmap_file_read(struct file *filp, char __user *data, size_t count,
-                     loff_t *offset)
+v9fs_mmap_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
 {
        /* TODO: Check if there are dirty pages */
-       return v9fs_file_read(filp, data, count, offset);
+       return v9fs_file_read_iter(iocb, to);
 }
 
-static ssize_t
-v9fs_direct_write(struct file *filp, const char __user * data,
-                 size_t count, loff_t *offsetp)
-{
-       loff_t offset;
-       ssize_t retval;
-       struct inode *inode;
-       struct address_space *mapping;
-
-       offset = *offsetp;
-       mapping = filp->f_mapping;
-       inode = mapping->host;
-       if (!count)
-               return 0;
-
-       mutex_lock(&inode->i_mutex);
-       retval = filemap_write_and_wait_range(mapping, offset,
-                                             offset + count - 1);
-       if (retval)
-               goto err_out;
-       /*
-        * After a write we want buffered reads to be sure to go to disk to get
-        * the new data.  We invalidate clean cached page from the region we're
-        * about to write.  We do this *before* the write so that if we fail
-        * here we fall back to buffered write
-        */
-       if (mapping->nrpages) {
-               pgoff_t pg_start = offset >> PAGE_CACHE_SHIFT;
-               pgoff_t pg_end   = (offset + count - 1) >> PAGE_CACHE_SHIFT;
-
-               retval = invalidate_inode_pages2_range(mapping,
-                                                       pg_start, pg_end);
-               /*
-                * If a page can not be invalidated, fall back
-                * to buffered write.
-                */
-               if (retval) {
-                       if (retval == -EBUSY)
-                               goto buff_write;
-                       goto err_out;
-               }
-       }
-       retval = v9fs_file_write(filp, data, count, offsetp);
-err_out:
-       mutex_unlock(&inode->i_mutex);
-       return retval;
-
-buff_write:
-       mutex_unlock(&inode->i_mutex);
-       return new_sync_write(filp, data, count, offsetp);
-}
-
-/**
- * v9fs_cached_file_write - write to a file
- * @filp: file pointer to write
- * @data: data buffer to write data from
- * @count: size of buffer
- * @offset: offset at which to write data
- *
- */
-static ssize_t
-v9fs_cached_file_write(struct file *filp, const char __user * data,
-                      size_t count, loff_t *offset)
-{
-
-       if (filp->f_flags & O_DIRECT)
-               return v9fs_direct_write(filp, data, count, offset);
-       return new_sync_write(filp, data, count, offset);
-}
-
-
 /**
  * v9fs_mmap_file_write - write to a file
  * @filp: file pointer to write
@@ -791,14 +590,13 @@ v9fs_cached_file_write(struct file *filp, const char __user * data,
  *
  */
 static ssize_t
-v9fs_mmap_file_write(struct file *filp, const char __user *data,
-                      size_t count, loff_t *offset)
+v9fs_mmap_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
 {
        /*
         * TODO: invalidate mmaps on filp's inode between
         * offset and offset+count
         */
-       return v9fs_file_write(filp, data, count, offset);
+       return v9fs_file_write_iter(iocb, from);
 }
 
 static void v9fs_mmap_vm_close(struct vm_area_struct *vma)
@@ -843,8 +641,6 @@ static const struct vm_operations_struct v9fs_mmap_file_vm_ops = {
 
 const struct file_operations v9fs_cached_file_operations = {
        .llseek = generic_file_llseek,
-       .read = v9fs_cached_file_read,
-       .write = v9fs_cached_file_write,
        .read_iter = generic_file_read_iter,
        .write_iter = generic_file_write_iter,
        .open = v9fs_file_open,
@@ -856,8 +652,6 @@ const struct file_operations v9fs_cached_file_operations = {
 
 const struct file_operations v9fs_cached_file_operations_dotl = {
        .llseek = generic_file_llseek,
-       .read = v9fs_cached_file_read,
-       .write = v9fs_cached_file_write,
        .read_iter = generic_file_read_iter,
        .write_iter = generic_file_write_iter,
        .open = v9fs_file_open,
@@ -870,8 +664,8 @@ const struct file_operations v9fs_cached_file_operations_dotl = {
 
 const struct file_operations v9fs_file_operations = {
        .llseek = generic_file_llseek,
-       .read = v9fs_file_read,
-       .write = v9fs_file_write,
+       .read_iter = v9fs_file_read_iter,
+       .write_iter = v9fs_file_write_iter,
        .open = v9fs_file_open,
        .release = v9fs_dir_release,
        .lock = v9fs_file_lock,
@@ -881,8 +675,8 @@ const struct file_operations v9fs_file_operations = {
 
 const struct file_operations v9fs_file_operations_dotl = {
        .llseek = generic_file_llseek,
-       .read = v9fs_file_read,
-       .write = v9fs_file_write,
+       .read_iter = v9fs_file_read_iter,
+       .write_iter = v9fs_file_write_iter,
        .open = v9fs_file_open,
        .release = v9fs_dir_release,
        .lock = v9fs_file_lock_dotl,
@@ -893,8 +687,8 @@ const struct file_operations v9fs_file_operations_dotl = {
 
 const struct file_operations v9fs_mmap_file_operations = {
        .llseek = generic_file_llseek,
-       .read = v9fs_mmap_file_read,
-       .write = v9fs_mmap_file_write,
+       .read_iter = v9fs_mmap_file_read_iter,
+       .write_iter = v9fs_mmap_file_write_iter,
        .open = v9fs_file_open,
        .release = v9fs_dir_release,
        .lock = v9fs_file_lock,
@@ -904,8 +698,8 @@ const struct file_operations v9fs_mmap_file_operations = {
 
 const struct file_operations v9fs_mmap_file_operations_dotl = {
        .llseek = generic_file_llseek,
-       .read = v9fs_mmap_file_read,
-       .write = v9fs_mmap_file_write,
+       .read_iter = v9fs_mmap_file_read_iter,
+       .write_iter = v9fs_mmap_file_write_iter,
        .open = v9fs_file_open,
        .release = v9fs_dir_release,
        .lock = v9fs_file_lock_dotl,
index f95e01e058e4de657b3cd85aafc233f0565c6e16..0cf44b6cccd6ee7dd131033845c7619ad778ab89 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/module.h>
 #include <linux/fs.h>
 #include <linux/sched.h>
+#include <linux/uio.h>
 #include <net/9p/9p.h>
 #include <net/9p/client.h>
 
@@ -25,50 +26,34 @@ ssize_t v9fs_fid_xattr_get(struct p9_fid *fid, const char *name,
                           void *buffer, size_t buffer_size)
 {
        ssize_t retval;
-       int msize, read_count;
-       u64 offset = 0, attr_size;
+       u64 attr_size;
        struct p9_fid *attr_fid;
+       struct kvec kvec = {.iov_base = buffer, .iov_len = buffer_size};
+       struct iov_iter to;
+       int err;
+
+       iov_iter_kvec(&to, READ | ITER_KVEC, &kvec, 1, buffer_size);
 
        attr_fid = p9_client_xattrwalk(fid, name, &attr_size);
        if (IS_ERR(attr_fid)) {
                retval = PTR_ERR(attr_fid);
                p9_debug(P9_DEBUG_VFS, "p9_client_attrwalk failed %zd\n",
                         retval);
-               attr_fid = NULL;
-               goto error;
-       }
-       if (!buffer_size) {
-               /* request to get the attr_size */
-               retval = attr_size;
-               goto error;
+               return retval;
        }
        if (attr_size > buffer_size) {
-               retval = -ERANGE;
-               goto error;
-       }
-       msize = attr_fid->clnt->msize;
-       while (attr_size) {
-               if (attr_size > (msize - P9_IOHDRSZ))
-                       read_count = msize - P9_IOHDRSZ;
+               if (!buffer_size) /* request to get the attr_size */
+                       retval = attr_size;
                else
-                       read_count = attr_size;
-               read_count = p9_client_read(attr_fid, ((char *)buffer)+offset,
-                                       NULL, offset, read_count);
-               if (read_count < 0) {
-                       /* error in xattr read */
-                       retval = read_count;
-                       goto error;
-               }
-               offset += read_count;
-               attr_size -= read_count;
+                       retval = -ERANGE;
+       } else {
+               iov_iter_truncate(&to, attr_size);
+               retval = p9_client_read(attr_fid, 0, &to, &err);
+               if (err)
+                       retval = err;
        }
-       /* Total read xattr bytes */
-       retval = offset;
-error:
-       if (attr_fid)
-               p9_client_clunk(attr_fid);
+       p9_client_clunk(attr_fid);
        return retval;
-
 }
 
 
@@ -120,8 +105,11 @@ int v9fs_xattr_set(struct dentry *dentry, const char *name,
 int v9fs_fid_xattr_set(struct p9_fid *fid, const char *name,
                   const void *value, size_t value_len, int flags)
 {
-       u64 offset = 0;
-       int retval, msize, write_count;
+       struct kvec kvec = {.iov_base = (void *)value, .iov_len = value_len};
+       struct iov_iter from;
+       int retval;
+
+       iov_iter_kvec(&from, WRITE | ITER_KVEC, &kvec, 1, value_len);
 
        p9_debug(P9_DEBUG_VFS, "name = %s value_len = %zu flags = %d\n",
                 name, value_len, flags);
@@ -135,29 +123,11 @@ int v9fs_fid_xattr_set(struct p9_fid *fid, const char *name,
         * On success fid points to xattr
         */
        retval = p9_client_xattrcreate(fid, name, value_len, flags);
-       if (retval < 0) {
+       if (retval < 0)
                p9_debug(P9_DEBUG_VFS, "p9_client_xattrcreate failed %d\n",
                         retval);
-               goto err;
-       }
-       msize = fid->clnt->msize;
-       while (value_len) {
-               if (value_len > (msize - P9_IOHDRSZ))
-                       write_count = msize - P9_IOHDRSZ;
-               else
-                       write_count = value_len;
-               write_count = p9_client_write(fid, ((char *)value)+offset,
-                                       NULL, offset, write_count);
-               if (write_count < 0) {
-                       /* error in xattr write */
-                       retval = write_count;
-                       goto err;
-               }
-               offset += write_count;
-               value_len -= write_count;
-       }
-       retval = 0;
-err:
+       else
+               p9_client_write(fid, 0, &from, &retval);
        p9_client_clunk(fid);
        return retval;
 }
index 07c9edce5aa768ddeb7ae203c8c9cab489ea8417..46c0d5671cd541d8bafdc9327f2fbf99d0f5133a 100644 (file)
 
 const struct file_operations adfs_file_operations = {
        .llseek         = generic_file_llseek,
-       .read           = new_sync_read,
        .read_iter      = generic_file_read_iter,
        .mmap           = generic_file_mmap,
        .fsync          = generic_file_fsync,
-       .write          = new_sync_write,
        .write_iter     = generic_file_write_iter,
        .splice_read    = generic_file_splice_read,
 };
index a91795e01a7ff0c0e85abf1bdf69f3d1d828b231..dcf27951781cfadc90711c0aa41f83401b2eb03b 100644 (file)
@@ -12,7 +12,7 @@
  *  affs regular file handling primitives
  */
 
-#include <linux/aio.h>
+#include <linux/uio.h>
 #include "affs.h"
 
 static struct buffer_head *affs_get_extblock_slow(struct inode *inode, u32 ext);
@@ -389,8 +389,7 @@ static void affs_write_failed(struct address_space *mapping, loff_t to)
 }
 
 static ssize_t
-affs_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter,
-              loff_t offset)
+affs_direct_IO(struct kiocb *iocb, struct iov_iter *iter, loff_t offset)
 {
        struct file *file = iocb->ki_filp;
        struct address_space *mapping = file->f_mapping;
@@ -398,15 +397,15 @@ affs_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter,
        size_t count = iov_iter_count(iter);
        ssize_t ret;
 
-       if (rw == WRITE) {
+       if (iov_iter_rw(iter) == WRITE) {
                loff_t size = offset + count;
 
                if (AFFS_I(inode)->mmu_private < size)
                        return 0;
        }
 
-       ret = blockdev_direct_IO(rw, iocb, inode, iter, offset, affs_get_block);
-       if (ret < 0 && (rw & WRITE))
+       ret = blockdev_direct_IO(iocb, inode, iter, offset, affs_get_block);
+       if (ret < 0 && iov_iter_rw(iter) == WRITE)
                affs_write_failed(mapping, offset + count);
        return ret;
 }
@@ -969,9 +968,7 @@ int affs_file_fsync(struct file *filp, loff_t start, loff_t end, int datasync)
 }
 const struct file_operations affs_file_operations = {
        .llseek         = generic_file_llseek,
-       .read           = new_sync_read,
        .read_iter      = generic_file_read_iter,
-       .write          = new_sync_write,
        .write_iter     = generic_file_write_iter,
        .mmap           = generic_file_mmap,
        .open           = affs_file_open,
index 932ce07948b387d7aa75dffb3a9de1e21f1f50b8..999bc3caec9276b8148448d4ec2eadb0badbf3f0 100644 (file)
@@ -31,8 +31,6 @@ const struct file_operations afs_file_operations = {
        .open           = afs_open,
        .release        = afs_release,
        .llseek         = generic_file_llseek,
-       .read           = new_sync_read,
-       .write          = new_sync_write,
        .read_iter      = generic_file_read_iter,
        .write_iter     = afs_file_write,
        .mmap           = generic_file_readonly_mmap,
index 0dd4dafee10b391f7d92b89f8c4e491ed5f09810..91ea1aa0d8b3ab0a817b525e9f9b3deec98f775f 100644 (file)
 int afs_abort_to_error(u32 abort_code)
 {
        switch (abort_code) {
+       /* low errno codes inserted into abort namespace */
        case 13:                return -EACCES;
        case 27:                return -EFBIG;
        case 30:                return -EROFS;
+
+       /* VICE "special error" codes; 101 - 111 */
        case VSALVAGE:          return -EIO;
        case VNOVNODE:          return -ENOENT;
        case VNOVOL:            return -ENOMEDIUM;
@@ -36,11 +39,18 @@ int afs_abort_to_error(u32 abort_code)
        case VOVERQUOTA:        return -EDQUOT;
        case VBUSY:             return -EBUSY;
        case VMOVED:            return -ENXIO;
-       case 0x2f6df0a:         return -EWOULDBLOCK;
+
+       /* Unified AFS error table; ET "uae" == 0x2f6df00 */
+       case 0x2f6df00:         return -EPERM;
+       case 0x2f6df01:         return -ENOENT;
+       case 0x2f6df04:         return -EIO;
+       case 0x2f6df0a:         return -EAGAIN;
+       case 0x2f6df0b:         return -ENOMEM;
        case 0x2f6df0c:         return -EACCES;
        case 0x2f6df0f:         return -EBUSY;
        case 0x2f6df10:         return -EEXIST;
        case 0x2f6df11:         return -EXDEV;
+       case 0x2f6df12:         return -ENODEV;
        case 0x2f6df13:         return -ENOTDIR;
        case 0x2f6df14:         return -EISDIR;
        case 0x2f6df15:         return -EINVAL;
@@ -54,8 +64,12 @@ int afs_abort_to_error(u32 abort_code)
        case 0x2f6df23:         return -ENAMETOOLONG;
        case 0x2f6df24:         return -ENOLCK;
        case 0x2f6df26:         return -ENOTEMPTY;
+       case 0x2f6df28:         return -EWOULDBLOCK;
+       case 0x2f6df69:         return -ENOTCONN;
+       case 0x2f6df6c:         return -ETIMEDOUT;
        case 0x2f6df78:         return -EDQUOT;
 
+       /* RXKAD abort codes; from include/rxrpc/packet.h.  ET "RXK" == 0x1260B00 */
        case RXKADINCONSISTENCY: return -EPROTO;
        case RXKADPACKETSHORT:  return -EPROTO;
        case RXKADLEVELFAIL:    return -EKEYREJECTED;
index dbc732e9a5c01eb18ab91af910a997881dfe5fd8..3a57a1b0fb510b8c8167835ca62eb06d0c4b53ca 100644 (file)
@@ -770,15 +770,12 @@ static int afs_deliver_cm_op_id(struct afs_call *call, struct sk_buff *skb,
 void afs_send_empty_reply(struct afs_call *call)
 {
        struct msghdr msg;
-       struct kvec iov[1];
 
        _enter("");
 
-       iov[0].iov_base         = NULL;
-       iov[0].iov_len          = 0;
        msg.msg_name            = NULL;
        msg.msg_namelen         = 0;
-       iov_iter_kvec(&msg.msg_iter, WRITE | ITER_KVEC, iov, 0, 0);     /* WTF? */
+       iov_iter_kvec(&msg.msg_iter, WRITE | ITER_KVEC, NULL, 0, 0);
        msg.msg_control         = NULL;
        msg.msg_controllen      = 0;
        msg.msg_flags           = 0;
index c13cb08964eda91afe26754733054147e220ecb7..0714abcd7f32321754287e46aec129196832e2ef 100644 (file)
@@ -14,7 +14,6 @@
 #include <linux/pagemap.h>
 #include <linux/writeback.h>
 #include <linux/pagevec.h>
-#include <linux/aio.h>
 #include "internal.h"
 
 static int afs_write_back_from_locked_page(struct afs_writeback *wb,
index a793f7023755dc15cb2b8bebe5206bc610bb428c..5785c4b58fea5ffdeae17d43b6b00df2572dc638 100644 (file)
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -151,6 +151,38 @@ struct kioctx {
        unsigned                id;
 };
 
+/*
+ * We use ki_cancel == KIOCB_CANCELLED to indicate that a kiocb has been either
+ * cancelled or completed (this makes a certain amount of sense because
+ * successful cancellation - io_cancel() - does deliver the completion to
+ * userspace).
+ *
+ * And since most things don't implement kiocb cancellation and we'd really like
+ * kiocb completion to be lockless when possible, we use ki_cancel to
+ * synchronize cancellation and completion - we only set it to KIOCB_CANCELLED
+ * with xchg() or cmpxchg(), see batch_complete_aio() and kiocb_cancel().
+ */
+#define KIOCB_CANCELLED                ((void *) (~0ULL))
+
+struct aio_kiocb {
+       struct kiocb            common;
+
+       struct kioctx           *ki_ctx;
+       kiocb_cancel_fn         *ki_cancel;
+
+       struct iocb __user      *ki_user_iocb;  /* user's aiocb */
+       __u64                   ki_user_data;   /* user's data for completion */
+
+       struct list_head        ki_list;        /* the aio core uses this
+                                                * for cancellation */
+
+       /*
+        * If the aio_resfd field of the userspace iocb is not zero,
+        * this is the underlying eventfd context to deliver events to.
+        */
+       struct eventfd_ctx      *ki_eventfd;
+};
+
 /*------ sysctl variables----*/
 static DEFINE_SPINLOCK(aio_nr_lock);
 unsigned long aio_nr;          /* current system wide number of aio requests */
@@ -220,7 +252,7 @@ static int __init aio_setup(void)
        if (IS_ERR(aio_mnt))
                panic("Failed to create aio fs mount.");
 
-       kiocb_cachep = KMEM_CACHE(kiocb, SLAB_HWCACHE_ALIGN|SLAB_PANIC);
+       kiocb_cachep = KMEM_CACHE(aio_kiocb, SLAB_HWCACHE_ALIGN|SLAB_PANIC);
        kioctx_cachep = KMEM_CACHE(kioctx,SLAB_HWCACHE_ALIGN|SLAB_PANIC);
 
        pr_debug("sizeof(struct page) = %zu\n", sizeof(struct page));
@@ -484,8 +516,9 @@ static int aio_setup_ring(struct kioctx *ctx)
 #define AIO_EVENTS_FIRST_PAGE  ((PAGE_SIZE - sizeof(struct aio_ring)) / sizeof(struct io_event))
 #define AIO_EVENTS_OFFSET      (AIO_EVENTS_PER_PAGE - AIO_EVENTS_FIRST_PAGE)
 
-void kiocb_set_cancel_fn(struct kiocb *req, kiocb_cancel_fn *cancel)
+void kiocb_set_cancel_fn(struct kiocb *iocb, kiocb_cancel_fn *cancel)
 {
+       struct aio_kiocb *req = container_of(iocb, struct aio_kiocb, common);
        struct kioctx *ctx = req->ki_ctx;
        unsigned long flags;
 
@@ -500,7 +533,7 @@ void kiocb_set_cancel_fn(struct kiocb *req, kiocb_cancel_fn *cancel)
 }
 EXPORT_SYMBOL(kiocb_set_cancel_fn);
 
-static int kiocb_cancel(struct kiocb *kiocb)
+static int kiocb_cancel(struct aio_kiocb *kiocb)
 {
        kiocb_cancel_fn *old, *cancel;
 
@@ -518,7 +551,7 @@ static int kiocb_cancel(struct kiocb *kiocb)
                cancel = cmpxchg(&kiocb->ki_cancel, old, KIOCB_CANCELLED);
        } while (cancel != old);
 
-       return cancel(kiocb);
+       return cancel(&kiocb->common);
 }
 
 static void free_ioctx(struct work_struct *work)
@@ -554,13 +587,13 @@ static void free_ioctx_reqs(struct percpu_ref *ref)
 static void free_ioctx_users(struct percpu_ref *ref)
 {
        struct kioctx *ctx = container_of(ref, struct kioctx, users);
-       struct kiocb *req;
+       struct aio_kiocb *req;
 
        spin_lock_irq(&ctx->ctx_lock);
 
        while (!list_empty(&ctx->active_reqs)) {
                req = list_first_entry(&ctx->active_reqs,
-                                      struct kiocb, ki_list);
+                                      struct aio_kiocb, ki_list);
 
                list_del_init(&req->ki_list);
                kiocb_cancel(req);
@@ -659,8 +692,7 @@ static struct kioctx *ioctx_alloc(unsigned nr_events)
        nr_events *= 2;
 
        /* Prevent overflows */
-       if ((nr_events > (0x10000000U / sizeof(struct io_event))) ||
-           (nr_events > (0x10000000U / sizeof(struct kiocb)))) {
+       if (nr_events > (0x10000000U / sizeof(struct io_event))) {
                pr_debug("ENOMEM: nr_events too high\n");
                return ERR_PTR(-EINVAL);
        }
@@ -786,22 +818,6 @@ static int kill_ioctx(struct mm_struct *mm, struct kioctx *ctx,
        return 0;
 }
 
-/* wait_on_sync_kiocb:
- *     Waits on the given sync kiocb to complete.
- */
-ssize_t wait_on_sync_kiocb(struct kiocb *req)
-{
-       while (!req->ki_ctx) {
-               set_current_state(TASK_UNINTERRUPTIBLE);
-               if (req->ki_ctx)
-                       break;
-               io_schedule();
-       }
-       __set_current_state(TASK_RUNNING);
-       return req->ki_user_data;
-}
-EXPORT_SYMBOL(wait_on_sync_kiocb);
-
 /*
  * exit_aio: called when the last user of mm goes away.  At this point, there is
  * no way for any new requests to be submited or any of the io_* syscalls to be
@@ -956,9 +972,9 @@ static void user_refill_reqs_available(struct kioctx *ctx)
  *     Allocate a slot for an aio request.
  * Returns NULL if no requests are free.
  */
-static inline struct kiocb *aio_get_req(struct kioctx *ctx)
+static inline struct aio_kiocb *aio_get_req(struct kioctx *ctx)
 {
-       struct kiocb *req;
+       struct aio_kiocb *req;
 
        if (!get_reqs_available(ctx)) {
                user_refill_reqs_available(ctx);
@@ -979,10 +995,10 @@ out_put:
        return NULL;
 }
 
-static void kiocb_free(struct kiocb *req)
+static void kiocb_free(struct aio_kiocb *req)
 {
-       if (req->ki_filp)
-               fput(req->ki_filp);
+       if (req->common.ki_filp)
+               fput(req->common.ki_filp);
        if (req->ki_eventfd != NULL)
                eventfd_ctx_put(req->ki_eventfd);
        kmem_cache_free(kiocb_cachep, req);
@@ -1018,8 +1034,9 @@ out:
 /* aio_complete
  *     Called when the io request on the given iocb is complete.
  */
-void aio_complete(struct kiocb *iocb, long res, long res2)
+static void aio_complete(struct kiocb *kiocb, long res, long res2)
 {
+       struct aio_kiocb *iocb = container_of(kiocb, struct aio_kiocb, common);
        struct kioctx   *ctx = iocb->ki_ctx;
        struct aio_ring *ring;
        struct io_event *ev_page, *event;
@@ -1033,13 +1050,7 @@ void aio_complete(struct kiocb *iocb, long res, long res2)
         *    ref, no other paths have a way to get another ref
         *  - the sync task helpfully left a reference to itself in the iocb
         */
-       if (is_sync_kiocb(iocb)) {
-               iocb->ki_user_data = res;
-               smp_wmb();
-               iocb->ki_ctx = ERR_PTR(-EXDEV);
-               wake_up_process(iocb->ki_obj.tsk);
-               return;
-       }
+       BUG_ON(is_sync_kiocb(kiocb));
 
        if (iocb->ki_list.next) {
                unsigned long flags;
@@ -1065,7 +1076,7 @@ void aio_complete(struct kiocb *iocb, long res, long res2)
        ev_page = kmap_atomic(ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]);
        event = ev_page + pos % AIO_EVENTS_PER_PAGE;
 
-       event->obj = (u64)(unsigned long)iocb->ki_obj.user;
+       event->obj = (u64)(unsigned long)iocb->ki_user_iocb;
        event->data = iocb->ki_user_data;
        event->res = res;
        event->res2 = res2;
@@ -1074,7 +1085,7 @@ void aio_complete(struct kiocb *iocb, long res, long res2)
        flush_dcache_page(ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]);
 
        pr_debug("%p[%u]: %p: %p %Lx %lx %lx\n",
-                ctx, tail, iocb, iocb->ki_obj.user, iocb->ki_user_data,
+                ctx, tail, iocb, iocb->ki_user_iocb, iocb->ki_user_data,
                 res, res2);
 
        /* after flagging the request as done, we
@@ -1121,7 +1132,6 @@ void aio_complete(struct kiocb *iocb, long res, long res2)
 
        percpu_ref_put(&ctx->reqs);
 }
-EXPORT_SYMBOL(aio_complete);
 
 /* aio_read_events_ring
  *     Pull an event off of the ioctx's event ring.  Returns the number of
@@ -1345,50 +1355,21 @@ SYSCALL_DEFINE1(io_destroy, aio_context_t, ctx)
        return -EINVAL;
 }
 
-typedef ssize_t (aio_rw_op)(struct kiocb *, const struct iovec *,
-                           unsigned long, loff_t);
 typedef ssize_t (rw_iter_op)(struct kiocb *, struct iov_iter *);
 
-static ssize_t aio_setup_vectored_rw(struct kiocb *kiocb,
-                                    int rw, char __user *buf,
-                                    unsigned long *nr_segs,
-                                    struct iovec **iovec,
-                                    bool compat)
+static int aio_setup_vectored_rw(int rw, char __user *buf, size_t len,
+                                struct iovec **iovec,
+                                bool compat,
+                                struct iov_iter *iter)
 {
-       ssize_t ret;
-
-       *nr_segs = kiocb->ki_nbytes;
-
 #ifdef CONFIG_COMPAT
        if (compat)
-               ret = compat_rw_copy_check_uvector(rw,
+               return compat_import_iovec(rw,
                                (struct compat_iovec __user *)buf,
-                               *nr_segs, UIO_FASTIOV, *iovec, iovec);
-       else
+                               len, UIO_FASTIOV, iovec, iter);
 #endif
-               ret = rw_copy_check_uvector(rw,
-                               (struct iovec __user *)buf,
-                               *nr_segs, UIO_FASTIOV, *iovec, iovec);
-       if (ret < 0)
-               return ret;
-
-       /* ki_nbytes now reflect bytes instead of segs */
-       kiocb->ki_nbytes = ret;
-       return 0;
-}
-
-static ssize_t aio_setup_single_vector(struct kiocb *kiocb,
-                                      int rw, char __user *buf,
-                                      unsigned long *nr_segs,
-                                      struct iovec *iovec)
-{
-       if (unlikely(!access_ok(!rw, buf, kiocb->ki_nbytes)))
-               return -EFAULT;
-
-       iovec->iov_base = buf;
-       iovec->iov_len = kiocb->ki_nbytes;
-       *nr_segs = 1;
-       return 0;
+       return import_iovec(rw, (struct iovec __user *)buf,
+                               len, UIO_FASTIOV, iovec, iter);
 }
 
 /*
@@ -1396,14 +1377,12 @@ static ssize_t aio_setup_single_vector(struct kiocb *kiocb,
  *     Performs the initial checks and io submission.
  */
 static ssize_t aio_run_iocb(struct kiocb *req, unsigned opcode,
-                           char __user *buf, bool compat)
+                           char __user *buf, size_t len, bool compat)
 {
        struct file *file = req->ki_filp;
        ssize_t ret;
-       unsigned long nr_segs;
        int rw;
        fmode_t mode;
-       aio_rw_op *rw_op;
        rw_iter_op *iter_op;
        struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
        struct iov_iter iter;
@@ -1413,7 +1392,6 @@ static ssize_t aio_run_iocb(struct kiocb *req, unsigned opcode,
        case IOCB_CMD_PREADV:
                mode    = FMODE_READ;
                rw      = READ;
-               rw_op   = file->f_op->aio_read;
                iter_op = file->f_op->read_iter;
                goto rw_common;
 
@@ -1421,51 +1399,40 @@ static ssize_t aio_run_iocb(struct kiocb *req, unsigned opcode,
        case IOCB_CMD_PWRITEV:
                mode    = FMODE_WRITE;
                rw      = WRITE;
-               rw_op   = file->f_op->aio_write;
                iter_op = file->f_op->write_iter;
                goto rw_common;
 rw_common:
                if (unlikely(!(file->f_mode & mode)))
                        return -EBADF;
 
-               if (!rw_op && !iter_op)
+               if (!iter_op)
                        return -EINVAL;
 
-               ret = (opcode == IOCB_CMD_PREADV ||
-                      opcode == IOCB_CMD_PWRITEV)
-                       ? aio_setup_vectored_rw(req, rw, buf, &nr_segs,
-                                               &iovec, compat)
-                       : aio_setup_single_vector(req, rw, buf, &nr_segs,
-                                                 iovec);
+               if (opcode == IOCB_CMD_PREADV || opcode == IOCB_CMD_PWRITEV)
+                       ret = aio_setup_vectored_rw(rw, buf, len,
+                                               &iovec, compat, &iter);
+               else {
+                       ret = import_single_range(rw, buf, len, iovec, &iter);
+                       iovec = NULL;
+               }
                if (!ret)
-                       ret = rw_verify_area(rw, file, &req->ki_pos, req->ki_nbytes);
+                       ret = rw_verify_area(rw, file, &req->ki_pos,
+                                            iov_iter_count(&iter));
                if (ret < 0) {
-                       if (iovec != inline_vecs)
-                               kfree(iovec);
+                       kfree(iovec);
                        return ret;
                }
 
-               req->ki_nbytes = ret;
-
-               /* XXX: move/kill - rw_verify_area()? */
-               /* This matches the pread()/pwrite() logic */
-               if (req->ki_pos < 0) {
-                       ret = -EINVAL;
-                       break;
-               }
+               len = ret;
 
                if (rw == WRITE)
                        file_start_write(file);
 
-               if (iter_op) {
-                       iov_iter_init(&iter, rw, iovec, nr_segs, req->ki_nbytes);
-                       ret = iter_op(req, &iter);
-               } else {
-                       ret = rw_op(req, iovec, nr_segs, req->ki_pos);
-               }
+               ret = iter_op(req, &iter);
 
                if (rw == WRITE)
                        file_end_write(file);
+               kfree(iovec);
                break;
 
        case IOCB_CMD_FDSYNC:
@@ -1487,9 +1454,6 @@ rw_common:
                return -EINVAL;
        }
 
-       if (iovec != inline_vecs)
-               kfree(iovec);
-
        if (ret != -EIOCBQUEUED) {
                /*
                 * There's no easy way to restart the syscall since other AIO's
@@ -1508,7 +1472,7 @@ rw_common:
 static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
                         struct iocb *iocb, bool compat)
 {
-       struct kiocb *req;
+       struct aio_kiocb *req;
        ssize_t ret;
 
        /* enforce forwards compatibility on users */
@@ -1531,11 +1495,14 @@ static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
        if (unlikely(!req))
                return -EAGAIN;
 
-       req->ki_filp = fget(iocb->aio_fildes);
-       if (unlikely(!req->ki_filp)) {
+       req->common.ki_filp = fget(iocb->aio_fildes);
+       if (unlikely(!req->common.ki_filp)) {
                ret = -EBADF;
                goto out_put_req;
        }
+       req->common.ki_pos = iocb->aio_offset;
+       req->common.ki_complete = aio_complete;
+       req->common.ki_flags = 0;
 
        if (iocb->aio_flags & IOCB_FLAG_RESFD) {
                /*
@@ -1550,6 +1517,8 @@ static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
                        req->ki_eventfd = NULL;
                        goto out_put_req;
                }
+
+               req->common.ki_flags |= IOCB_EVENTFD;
        }
 
        ret = put_user(KIOCB_KEY, &user_iocb->aio_key);
@@ -1558,13 +1527,12 @@ static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
                goto out_put_req;
        }
 
-       req->ki_obj.user = user_iocb;
+       req->ki_user_iocb = user_iocb;
        req->ki_user_data = iocb->aio_data;
-       req->ki_pos = iocb->aio_offset;
-       req->ki_nbytes = iocb->aio_nbytes;
 
-       ret = aio_run_iocb(req, iocb->aio_lio_opcode,
+       ret = aio_run_iocb(&req->common, iocb->aio_lio_opcode,
                           (char __user *)(unsigned long)iocb->aio_buf,
+                          iocb->aio_nbytes,
                           compat);
        if (ret)
                goto out_put_req;
@@ -1651,10 +1619,10 @@ SYSCALL_DEFINE3(io_submit, aio_context_t, ctx_id, long, nr,
 /* lookup_kiocb
  *     Finds a given iocb for cancellation.
  */
-static struct kiocb *lookup_kiocb(struct kioctx *ctx, struct iocb __user *iocb,
-                                 u32 key)
+static struct aio_kiocb *
+lookup_kiocb(struct kioctx *ctx, struct iocb __user *iocb, u32 key)
 {
-       struct list_head *pos;
+       struct aio_kiocb *kiocb;
 
        assert_spin_locked(&ctx->ctx_lock);
 
@@ -1662,9 +1630,8 @@ static struct kiocb *lookup_kiocb(struct kioctx *ctx, struct iocb __user *iocb,
                return NULL;
 
        /* TODO: use a hash or array, this sucks. */
-       list_for_each(pos, &ctx->active_reqs) {
-               struct kiocb *kiocb = list_kiocb(pos);
-               if (kiocb->ki_obj.user == iocb)
+       list_for_each_entry(kiocb, &ctx->active_reqs, ki_list) {
+               if (kiocb->ki_user_iocb == iocb)
                        return kiocb;
        }
        return NULL;
@@ -1684,7 +1651,7 @@ SYSCALL_DEFINE3(io_cancel, aio_context_t, ctx_id, struct iocb __user *, iocb,
                struct io_event __user *, result)
 {
        struct kioctx *ctx;
-       struct kiocb *kiocb;
+       struct aio_kiocb *kiocb;
        u32 key;
        int ret;
 
index 8e98cf954babb8944a32fdc58fd3c38760933a5a..d10e619632ab48b30390062455eef4b543b7c183 100644 (file)
@@ -213,7 +213,7 @@ void autofs4_clean_ino(struct autofs_info *);
 
 static inline int autofs_prepare_pipe(struct file *pipe)
 {
-       if (!pipe->f_op->write)
+       if (!(pipe->f_mode & FMODE_CAN_WRITE))
                return -EINVAL;
        if (!S_ISFIFO(file_inode(pipe)->i_mode))
                return -EINVAL;
index 116fd38ee472c74f243c05428969c2bd1bbe85e2..2ad05ab93db86efe0e4d0432b7ed8f1f37a0dd52 100644 (file)
@@ -70,7 +70,7 @@ static int autofs4_write(struct autofs_sb_info *sbi,
 
        mutex_lock(&sbi->pipe_mutex);
        while (bytes &&
-              (wr = file->f_op->write(file,data,bytes,&file->f_pos)) > 0) {
+              (wr = __vfs_write(file,data,bytes,&file->f_pos)) > 0) {
                data += wr;
                bytes -= wr;
        }
index e7f88ace1a2508d260ea8feae3addc5e2752980c..97f1b51601550e38c8c4e45bf49c4336da04bed9 100644 (file)
@@ -23,9 +23,7 @@
 
 const struct file_operations bfs_file_operations = {
        .llseek         = generic_file_llseek,
-       .read           = new_sync_read,
        .read_iter      = generic_file_read_iter,
-       .write          = new_sync_write,
        .write_iter     = generic_file_write_iter,
        .mmap           = generic_file_mmap,
        .splice_read    = generic_file_splice_read,
index 90bc079d9982928b7a9b5bcb6ad3efd6ebf1375f..fdcb4d69f430db6370e1eed7c1c04c9a3f333746 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/buffer_head.h>
 #include <linux/vfs.h>
 #include <linux/writeback.h>
+#include <linux/uio.h>
 #include <asm/uaccess.h>
 #include "bfs.h"
 
index 975266be67d319aa019a48e94cfda0a3ca8ce1e0..897ee0503932fbbf7adc88b0fd27591576696160 100644 (file)
@@ -27,7 +27,6 @@
 #include <linux/namei.h>
 #include <linux/log2.h>
 #include <linux/cleancache.h>
-#include <linux/aio.h>
 #include <asm/uaccess.h>
 #include "internal.h"
 
@@ -147,15 +146,13 @@ blkdev_get_block(struct inode *inode, sector_t iblock,
 }
 
 static ssize_t
-blkdev_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter,
-                       loff_t offset)
+blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, loff_t offset)
 {
        struct file *file = iocb->ki_filp;
        struct inode *inode = file->f_mapping->host;
 
-       return __blockdev_direct_IO(rw, iocb, inode, I_BDEV(inode), iter,
-                                   offset, blkdev_get_block,
-                                   NULL, NULL, 0);
+       return __blockdev_direct_IO(iocb, inode, I_BDEV(inode), iter, offset,
+                                   blkdev_get_block, NULL, NULL, 0);
 }
 
 int __sync_blockdev(struct block_device *bdev, int wait)
@@ -1598,9 +1595,22 @@ static long block_ioctl(struct file *file, unsigned cmd, unsigned long arg)
 ssize_t blkdev_write_iter(struct kiocb *iocb, struct iov_iter *from)
 {
        struct file *file = iocb->ki_filp;
+       struct inode *bd_inode = file->f_mapping->host;
+       loff_t size = i_size_read(bd_inode);
        struct blk_plug plug;
        ssize_t ret;
 
+       if (bdev_read_only(I_BDEV(bd_inode)))
+               return -EPERM;
+
+       if (!iov_iter_count(from))
+               return 0;
+
+       if (iocb->ki_pos >= size)
+               return -ENOSPC;
+
+       iov_iter_truncate(from, size - iocb->ki_pos);
+
        blk_start_plug(&plug);
        ret = __generic_file_write_iter(iocb, from);
        if (ret > 0) {
@@ -1660,8 +1670,6 @@ const struct file_operations def_blk_fops = {
        .open           = blkdev_open,
        .release        = blkdev_close,
        .llseek         = block_llseek,
-       .read           = new_sync_read,
-       .write          = new_sync_write,
        .read_iter      = blkdev_read_iter,
        .write_iter     = blkdev_write_iter,
        .mmap           = generic_file_mmap,
index 30982bbd31c30c2b154836b0f51b94c37e22923c..691a84a81e09149838c0a1b6b43006d70839a670 100644 (file)
@@ -24,7 +24,6 @@
 #include <linux/string.h>
 #include <linux/backing-dev.h>
 #include <linux/mpage.h>
-#include <linux/aio.h>
 #include <linux/falloc.h>
 #include <linux/swap.h>
 #include <linux/writeback.h>
@@ -32,6 +31,7 @@
 #include <linux/compat.h>
 #include <linux/slab.h>
 #include <linux/btrfs.h>
+#include <linux/uio.h>
 #include "ctree.h"
 #include "disk-io.h"
 #include "transaction.h"
@@ -1747,7 +1747,7 @@ static ssize_t btrfs_file_write_iter(struct kiocb *iocb,
        mutex_lock(&inode->i_mutex);
 
        current->backing_dev_info = inode_to_bdi(inode);
-       err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
+       err = generic_write_checks(file, &pos, &count);
        if (err) {
                mutex_unlock(&inode->i_mutex);
                goto out;
@@ -2806,8 +2806,6 @@ out:
 
 const struct file_operations btrfs_file_operations = {
        .llseek         = btrfs_file_llseek,
-       .read           = new_sync_read,
-       .write          = new_sync_write,
        .read_iter      = generic_file_read_iter,
        .splice_read    = generic_file_splice_read,
        .write_iter     = btrfs_file_write_iter,
index d2e732d7af524640bc2c197da3e7123182b4537e..43192e10cc4331f9f533c61f10f21c602bdee806 100644 (file)
@@ -32,7 +32,6 @@
 #include <linux/writeback.h>
 #include <linux/statfs.h>
 #include <linux/compat.h>
-#include <linux/aio.h>
 #include <linux/bit_spinlock.h>
 #include <linux/xattr.h>
 #include <linux/posix_acl.h>
@@ -43,6 +42,7 @@
 #include <linux/btrfs.h>
 #include <linux/blkdev.h>
 #include <linux/posix_acl_xattr.h>
+#include <linux/uio.h>
 #include "ctree.h"
 #include "disk-io.h"
 #include "transaction.h"
@@ -8081,7 +8081,7 @@ free_ordered:
        bio_endio(dio_bio, ret);
 }
 
-static ssize_t check_direct_IO(struct btrfs_root *root, int rw, struct kiocb *iocb,
+static ssize_t check_direct_IO(struct btrfs_root *root, struct kiocb *iocb,
                        const struct iov_iter *iter, loff_t offset)
 {
        int seg;
@@ -8096,7 +8096,7 @@ static ssize_t check_direct_IO(struct btrfs_root *root, int rw, struct kiocb *io
                goto out;
 
        /* If this is a write we don't need to check anymore */
-       if (rw & WRITE)
+       if (iov_iter_rw(iter) == WRITE)
                return 0;
        /*
         * Check to make sure we don't have duplicate iov_base's in this
@@ -8114,8 +8114,8 @@ out:
        return retval;
 }
 
-static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb,
-                       struct iov_iter *iter, loff_t offset)
+static ssize_t btrfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
+                              loff_t offset)
 {
        struct file *file = iocb->ki_filp;
        struct inode *inode = file->f_mapping->host;
@@ -8126,7 +8126,7 @@ static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb,
        bool relock = false;
        ssize_t ret;
 
-       if (check_direct_IO(BTRFS_I(inode)->root, rw, iocb, iter, offset))
+       if (check_direct_IO(BTRFS_I(inode)->root, iocb, iter, offset))
                return 0;
 
        atomic_inc(&inode->i_dio_count);
@@ -8144,7 +8144,7 @@ static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb,
                filemap_fdatawrite_range(inode->i_mapping, offset,
                                         offset + count - 1);
 
-       if (rw & WRITE) {
+       if (iov_iter_rw(iter) == WRITE) {
                /*
                 * If the write DIO is beyond the EOF, we need update
                 * the isize, but it is protected by i_mutex. So we can
@@ -8174,11 +8174,11 @@ static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb,
                wakeup = false;
        }
 
-       ret = __blockdev_direct_IO(rw, iocb, inode,
-                       BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev,
-                       iter, offset, btrfs_get_blocks_direct, NULL,
-                       btrfs_submit_direct, flags);
-       if (rw & WRITE) {
+       ret = __blockdev_direct_IO(iocb, inode,
+                                  BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev,
+                                  iter, offset, btrfs_get_blocks_direct, NULL,
+                                  btrfs_submit_direct, flags);
+       if (iov_iter_rw(iter) == WRITE) {
                current->journal_info = NULL;
                if (ret < 0 && ret != -EIOCBQUEUED)
                        btrfs_delalloc_release_space(inode, count);
index fd5599d323620a2c5617ea5355e2e1320d6a0954..155ab9c0246b202aed75e3db02313369fab52976 100644 (file)
@@ -1198,8 +1198,7 @@ static int ceph_write_end(struct file *file, struct address_space *mapping,
  * intercept O_DIRECT reads and writes early, this function should
  * never get called.
  */
-static ssize_t ceph_direct_io(int rw, struct kiocb *iocb,
-                             struct iov_iter *iter,
+static ssize_t ceph_direct_io(struct kiocb *iocb, struct iov_iter *iter,
                              loff_t pos)
 {
        WARN_ON(1);
index d533075a823d5eb92e709547b8fe790c59cba981..761841903160223674df8b2a63464e39f9220988 100644 (file)
@@ -7,7 +7,6 @@
 #include <linux/mount.h>
 #include <linux/namei.h>
 #include <linux/writeback.h>
-#include <linux/aio.h>
 #include <linux/falloc.h>
 
 #include "super.h"
@@ -808,7 +807,7 @@ static ssize_t ceph_read_iter(struct kiocb *iocb, struct iov_iter *to)
 {
        struct file *filp = iocb->ki_filp;
        struct ceph_file_info *fi = filp->private_data;
-       size_t len = iocb->ki_nbytes;
+       size_t len = iov_iter_count(to);
        struct inode *inode = file_inode(filp);
        struct ceph_inode_info *ci = ceph_inode(inode);
        struct page *pinned_page = NULL;
@@ -954,7 +953,7 @@ static ssize_t ceph_write_iter(struct kiocb *iocb, struct iov_iter *from)
        /* We can write back this queue in page reclaim */
        current->backing_dev_info = inode_to_bdi(inode);
 
-       err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
+       err = generic_write_checks(file, &pos, &count);
        if (err)
                goto out;
 
@@ -1332,8 +1331,6 @@ const struct file_operations ceph_file_fops = {
        .open = ceph_open,
        .release = ceph_release,
        .llseek = ceph_llseek,
-       .read = new_sync_read,
-       .write = new_sync_write,
        .read_iter = ceph_read_iter,
        .write_iter = ceph_write_iter,
        .mmap = ceph_mmap,
index 4ac7445e6ec70516848e942f54a6846a8541113b..aa0dc2573374184597b9e449fad6467f924f0f45 100644 (file)
@@ -1,6 +1,9 @@
 /*
  *   fs/cifs/cifsencrypt.c
  *
+ *   Encryption and hashing operations relating to NTLM, NTLMv2.  See MS-NLMP
+ *   for more detailed information
+ *
  *   Copyright (C) International Business Machines  Corp., 2005,2013
  *   Author(s): Steve French (sfrench@us.ibm.com)
  *
@@ -515,7 +518,8 @@ static int calc_ntlmv2_hash(struct cifs_ses *ses, char *ntlmv2_hash,
                                 __func__);
                        return rc;
                }
-       } else if (ses->serverName) {
+       } else {
+               /* We use ses->serverName if no domain name available */
                len = strlen(ses->serverName);
 
                server = kmalloc(2 + (len * 2), GFP_KERNEL);
index d72fe37f5420e03c7677e86fe2e26d478277d1ae..eaab4b2a05953225ea92075c95dd2e0174d3d75b 100644 (file)
@@ -906,8 +906,6 @@ const struct inode_operations cifs_symlink_inode_ops = {
 };
 
 const struct file_operations cifs_file_ops = {
-       .read = new_sync_read,
-       .write = new_sync_write,
        .read_iter = cifs_loose_read_iter,
        .write_iter = cifs_file_write_iter,
        .open = cifs_open,
@@ -926,8 +924,6 @@ const struct file_operations cifs_file_ops = {
 };
 
 const struct file_operations cifs_file_strict_ops = {
-       .read = new_sync_read,
-       .write = new_sync_write,
        .read_iter = cifs_strict_readv,
        .write_iter = cifs_strict_writev,
        .open = cifs_open,
@@ -947,8 +943,6 @@ const struct file_operations cifs_file_strict_ops = {
 
 const struct file_operations cifs_file_direct_ops = {
        /* BB reevaluate whether they can be done with directio, no cache */
-       .read = new_sync_read,
-       .write = new_sync_write,
        .read_iter = cifs_user_readv,
        .write_iter = cifs_user_writev,
        .open = cifs_open,
@@ -967,8 +961,6 @@ const struct file_operations cifs_file_direct_ops = {
 };
 
 const struct file_operations cifs_file_nobrl_ops = {
-       .read = new_sync_read,
-       .write = new_sync_write,
        .read_iter = cifs_loose_read_iter,
        .write_iter = cifs_file_write_iter,
        .open = cifs_open,
@@ -986,8 +978,6 @@ const struct file_operations cifs_file_nobrl_ops = {
 };
 
 const struct file_operations cifs_file_strict_nobrl_ops = {
-       .read = new_sync_read,
-       .write = new_sync_write,
        .read_iter = cifs_strict_readv,
        .write_iter = cifs_strict_writev,
        .open = cifs_open,
@@ -1006,8 +996,6 @@ const struct file_operations cifs_file_strict_nobrl_ops = {
 
 const struct file_operations cifs_file_direct_nobrl_ops = {
        /* BB reevaluate whether they can be done with directio, no cache */
-       .read = new_sync_read,
-       .write = new_sync_write,
        .read_iter = cifs_user_readv,
        .write_iter = cifs_user_writev,
        .open = cifs_open,
index d3aa999ab78520fcd4819f99548247e231df591b..480cf9c81d505b8351dd76eee0f012110c3f2b9c 100644 (file)
@@ -1599,6 +1599,8 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
                                pr_warn("CIFS: username too long\n");
                                goto cifs_parse_mount_err;
                        }
+
+                       kfree(vol->username);
                        vol->username = kstrdup(string, GFP_KERNEL);
                        if (!vol->username)
                                goto cifs_parse_mount_err;
@@ -1700,6 +1702,7 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
                                goto cifs_parse_mount_err;
                        }
 
+                       kfree(vol->domainname);
                        vol->domainname = kstrdup(string, GFP_KERNEL);
                        if (!vol->domainname) {
                                pr_warn("CIFS: no memory for domainname\n");
@@ -1731,6 +1734,7 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
                        }
 
                         if (strncasecmp(string, "default", 7) != 0) {
+                               kfree(vol->iocharset);
                                vol->iocharset = kstrdup(string,
                                                         GFP_KERNEL);
                                if (!vol->iocharset) {
@@ -2913,8 +2917,7 @@ ip_rfc1001_connect(struct TCP_Server_Info *server)
                 * calling name ends in null (byte 16) from old smb
                 * convention.
                 */
-               if (server->workstation_RFC1001_name &&
-                   server->workstation_RFC1001_name[0] != 0)
+               if (server->workstation_RFC1001_name[0] != 0)
                        rfc1002mangle(ses_init_buf->trailer.
                                      session_req.calling_name,
                                      server->workstation_RFC1001_name,
@@ -3692,6 +3695,12 @@ CIFSTCon(const unsigned int xid, struct cifs_ses *ses,
 #endif /* CIFS_WEAK_PW_HASH */
                rc = SMBNTencrypt(tcon->password, ses->server->cryptkey,
                                        bcc_ptr, nls_codepage);
+               if (rc) {
+                       cifs_dbg(FYI, "%s Can't generate NTLM rsp. Error: %d\n",
+                                __func__, rc);
+                       cifs_buf_release(smb_buffer);
+                       return rc;
+               }
 
                bcc_ptr += CIFS_AUTH_RESP_SIZE;
                if (ses->capabilities & CAP_UNICODE) {
index a94b3e67318283dd54d61fc595ecb2037ba3a515..4202e74b2db5cdf15d878add2e8f2a95e9e52b7e 100644 (file)
@@ -1823,6 +1823,7 @@ refind_writable:
                        cifsFileInfo_put(inv_file);
                        spin_lock(&cifs_file_list_lock);
                        ++refind;
+                       inv_file = NULL;
                        goto refind_writable;
                }
        }
@@ -2559,9 +2560,9 @@ cifs_write_from_iter(loff_t offset, size_t len, struct iov_iter *from,
        return rc;
 }
 
-static ssize_t
-cifs_iovec_write(struct file *file, struct iov_iter *from, loff_t *poffset)
+ssize_t cifs_user_writev(struct kiocb *iocb, struct iov_iter *from)
 {
+       struct file *file = iocb->ki_filp;
        size_t len;
        ssize_t total_written = 0;
        struct cifsFileInfo *open_file;
@@ -2572,8 +2573,14 @@ cifs_iovec_write(struct file *file, struct iov_iter *from, loff_t *poffset)
        struct iov_iter saved_from;
        int rc;
 
+       /*
+        * BB - optimize the way when signing is disabled. We can drop this
+        * extra memory-to-memory copying and use iovec buffers for constructing
+        * write request.
+        */
+
        len = iov_iter_count(from);
-       rc = generic_write_checks(file, poffset, &len, 0);
+       rc = generic_write_checks(file, &iocb->ki_pos, &len);
        if (rc)
                return rc;
 
@@ -2592,7 +2599,7 @@ cifs_iovec_write(struct file *file, struct iov_iter *from, loff_t *poffset)
 
        memcpy(&saved_from, from, sizeof(struct iov_iter));
 
-       rc = cifs_write_from_iter(*poffset, len, from, open_file, cifs_sb,
+       rc = cifs_write_from_iter(iocb->ki_pos, len, from, open_file, cifs_sb,
                                  &wdata_list);
 
        /*
@@ -2632,7 +2639,7 @@ restart_loop:
                                memcpy(&tmp_from, &saved_from,
                                       sizeof(struct iov_iter));
                                iov_iter_advance(&tmp_from,
-                                                wdata->offset - *poffset);
+                                                wdata->offset - iocb->ki_pos);
 
                                rc = cifs_write_from_iter(wdata->offset,
                                                wdata->bytes, &tmp_from,
@@ -2649,34 +2656,13 @@ restart_loop:
                kref_put(&wdata->refcount, cifs_uncached_writedata_release);
        }
 
-       if (total_written > 0)
-               *poffset += total_written;
+       if (unlikely(!total_written))
+               return rc;
 
+       iocb->ki_pos += total_written;
+       set_bit(CIFS_INO_INVALID_MAPPING, &CIFS_I(file_inode(file))->flags);
        cifs_stats_bytes_written(tcon, total_written);
-       return total_written ? total_written : (ssize_t)rc;
-}
-
-ssize_t cifs_user_writev(struct kiocb *iocb, struct iov_iter *from)
-{
-       ssize_t written;
-       struct inode *inode;
-       loff_t pos = iocb->ki_pos;
-
-       inode = file_inode(iocb->ki_filp);
-
-       /*
-        * BB - optimize the way when signing is disabled. We can drop this
-        * extra memory-to-memory copying and use iovec buffers for constructing
-        * write request.
-        */
-
-       written = cifs_iovec_write(iocb->ki_filp, from, &pos);
-       if (written > 0) {
-               set_bit(CIFS_INO_INVALID_MAPPING, &CIFS_I(inode)->flags);
-               iocb->ki_pos = pos;
-       }
-
-       return written;
+       return total_written;
 }
 
 static ssize_t
@@ -2687,8 +2673,8 @@ cifs_writev(struct kiocb *iocb, struct iov_iter *from)
        struct inode *inode = file->f_mapping->host;
        struct cifsInodeInfo *cinode = CIFS_I(inode);
        struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
-       ssize_t rc = -EACCES;
-       loff_t lock_pos = iocb->ki_pos;
+       ssize_t rc;
+       size_t count;
 
        /*
         * We need to hold the sem to be sure nobody modifies lock list
@@ -2696,23 +2682,30 @@ cifs_writev(struct kiocb *iocb, struct iov_iter *from)
         */
        down_read(&cinode->lock_sem);
        mutex_lock(&inode->i_mutex);
-       if (file->f_flags & O_APPEND)
-               lock_pos = i_size_read(inode);
-       if (!cifs_find_lock_conflict(cfile, lock_pos, iov_iter_count(from),
+
+       count = iov_iter_count(from);
+       rc = generic_write_checks(file, &iocb->ki_pos, &count);
+       if (rc)
+               goto out;
+
+       if (count == 0)
+               goto out;
+
+       iov_iter_truncate(from, count);
+
+       if (!cifs_find_lock_conflict(cfile, iocb->ki_pos, iov_iter_count(from),
                                     server->vals->exclusive_lock_type, NULL,
-                                    CIFS_WRITE_OP)) {
+                                    CIFS_WRITE_OP))
                rc = __generic_file_write_iter(iocb, from);
-               mutex_unlock(&inode->i_mutex);
-
-               if (rc > 0) {
-                       ssize_t err;
+       else
+               rc = -EACCES;
+out:
+       mutex_unlock(&inode->i_mutex);
 
-                       err = generic_write_sync(file, iocb->ki_pos - rc, rc);
-                       if (err < 0)
-                               rc = err;
-               }
-       } else {
-               mutex_unlock(&inode->i_mutex);
+       if (rc > 0) {
+               ssize_t err = generic_write_sync(file, iocb->ki_pos - rc, rc);
+               if (err < 0)
+                       rc = err;
        }
        up_read(&cinode->lock_sem);
        return rc;
@@ -3876,8 +3869,7 @@ void cifs_oplock_break(struct work_struct *work)
  * Direct IO is not yet supported in the cached mode. 
  */
 static ssize_t
-cifs_direct_io(int rw, struct kiocb *iocb, struct iov_iter *iter,
-               loff_t pos)
+cifs_direct_io(struct kiocb *iocb, struct iov_iter *iter, loff_t pos)
 {
         /*
          * FIXME
index 2d4f37235ed0fa360782ae237c89fccccbf8b719..3e126d7bb2ea5bec97c9d6e02973a49886261580 100644 (file)
@@ -771,6 +771,8 @@ cifs_get_inode_info(struct inode **inode, const char *full_path,
                                cifs_buf_release(srchinf->ntwrk_buf_start);
                        }
                        kfree(srchinf);
+                       if (rc)
+                               goto cgii_exit;
        } else
                goto cgii_exit;
 
index 689f035915cf70f075d71fca5e281ec009c5420a..22dfdf17d06547f3d1b3abbc302bb03abf1b047b 100644 (file)
@@ -322,7 +322,7 @@ smb2_get_data_area_len(int *off, int *len, struct smb2_hdr *hdr)
 
        /* return pointer to beginning of data area, ie offset from SMB start */
        if ((*off != 0) && (*len != 0))
-               return hdr->ProtocolId + *off;
+               return (char *)(&hdr->ProtocolId[0]) + *off;
        else
                return NULL;
 }
index 96b5d40a2ece611b27ed19668cc4b7b665605113..eab05e1aa587424863d6914eb351da9fdcf17437 100644 (file)
@@ -684,7 +684,8 @@ smb2_clone_range(const unsigned int xid,
 
                        /* No need to change MaxChunks since already set to 1 */
                        chunk_sizes_updated = true;
-               }
+               } else
+                       goto cchunk_out;
        }
 
 cchunk_out:
index 3417340bf89e677fe0c46bf98cf922dd39d29a3a..65cd7a84c8bc3206033a917fe9d98fc939cbe1af 100644 (file)
@@ -1218,7 +1218,7 @@ SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
        struct smb2_ioctl_req *req;
        struct smb2_ioctl_rsp *rsp;
        struct TCP_Server_Info *server;
-       struct cifs_ses *ses = tcon->ses;
+       struct cifs_ses *ses;
        struct kvec iov[2];
        int resp_buftype;
        int num_iovecs;
@@ -1233,6 +1233,11 @@ SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
        if (plen)
                *plen = 0;
 
+       if (tcon)
+               ses = tcon->ses;
+       else
+               return -EIO;
+
        if (ses && (ses->server))
                server = ses->server;
        else
@@ -1296,14 +1301,12 @@ SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
        rsp = (struct smb2_ioctl_rsp *)iov[0].iov_base;
 
        if ((rc != 0) && (rc != -EINVAL)) {
-               if (tcon)
-                       cifs_stats_fail_inc(tcon, SMB2_IOCTL_HE);
+               cifs_stats_fail_inc(tcon, SMB2_IOCTL_HE);
                goto ioctl_exit;
        } else if (rc == -EINVAL) {
                if ((opcode != FSCTL_SRV_COPYCHUNK_WRITE) &&
                    (opcode != FSCTL_SRV_COPYCHUNK)) {
-                       if (tcon)
-                               cifs_stats_fail_inc(tcon, SMB2_IOCTL_HE);
+                       cifs_stats_fail_inc(tcon, SMB2_IOCTL_HE);
                        goto ioctl_exit;
                }
        }
@@ -1629,7 +1632,7 @@ SMB2_flush(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
 
        rc = SendReceive2(xid, ses, iov, 1, &resp_buftype, 0);
 
-       if ((rc != 0) && tcon)
+       if (rc != 0)
                cifs_stats_fail_inc(tcon, SMB2_FLUSH_HE);
 
        free_rsp_buf(resp_buftype, iov[0].iov_base);
@@ -2114,7 +2117,7 @@ SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
        struct kvec iov[2];
        int rc = 0;
        int len;
-       int resp_buftype;
+       int resp_buftype = CIFS_NO_BUFFER;
        unsigned char *bufptr;
        struct TCP_Server_Info *server;
        struct cifs_ses *ses = tcon->ses;
index d244d743a23261c97551d4ee00a30399a2f30b68..1da3805f3ddcdb209a6b357e12aacac35809dda9 100644 (file)
 #include "coda_int.h"
 
 static ssize_t
-coda_file_read(struct file *coda_file, char __user *buf, size_t count, loff_t *ppos)
+coda_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
 {
-       struct coda_file_info *cfi;
-       struct file *host_file;
+       struct file *coda_file = iocb->ki_filp;
+       struct coda_file_info *cfi = CODA_FTOC(coda_file);
 
-       cfi = CODA_FTOC(coda_file);
        BUG_ON(!cfi || cfi->cfi_magic != CODA_MAGIC);
-       host_file = cfi->cfi_container;
 
-       if (!host_file->f_op->read)
-               return -EINVAL;
-
-       return host_file->f_op->read(host_file, buf, count, ppos);
+       return vfs_iter_read(cfi->cfi_container, to, &iocb->ki_pos);
 }
 
 static ssize_t
@@ -64,32 +59,25 @@ coda_file_splice_read(struct file *coda_file, loff_t *ppos,
 }
 
 static ssize_t
-coda_file_write(struct file *coda_file, const char __user *buf, size_t count, loff_t *ppos)
+coda_file_write_iter(struct kiocb *iocb, struct iov_iter *to)
 {
-       struct inode *host_inode, *coda_inode = file_inode(coda_file);
-       struct coda_file_info *cfi;
+       struct file *coda_file = iocb->ki_filp;
+       struct inode *coda_inode = file_inode(coda_file);
+       struct coda_file_info *cfi = CODA_FTOC(coda_file);
        struct file *host_file;
        ssize_t ret;
 
-       cfi = CODA_FTOC(coda_file);
        BUG_ON(!cfi || cfi->cfi_magic != CODA_MAGIC);
-       host_file = cfi->cfi_container;
-
-       if (!host_file->f_op->write)
-               return -EINVAL;
 
-       host_inode = file_inode(host_file);
+       host_file = cfi->cfi_container;
        file_start_write(host_file);
        mutex_lock(&coda_inode->i_mutex);
-
-       ret = host_file->f_op->write(host_file, buf, count, ppos);
-
-       coda_inode->i_size = host_inode->i_size;
+       ret = vfs_iter_write(cfi->cfi_container, to, &iocb->ki_pos);
+       coda_inode->i_size = file_inode(host_file)->i_size;
        coda_inode->i_blocks = (coda_inode->i_size + 511) >> 9;
        coda_inode->i_mtime = coda_inode->i_ctime = CURRENT_TIME_SEC;
        mutex_unlock(&coda_inode->i_mutex);
        file_end_write(host_file);
-
        return ret;
 }
 
@@ -231,8 +219,8 @@ int coda_fsync(struct file *coda_file, loff_t start, loff_t end, int datasync)
 
 const struct file_operations coda_file_operations = {
        .llseek         = generic_file_llseek,
-       .read           = coda_file_read,
-       .write          = coda_file_write,
+       .read_iter      = coda_file_read_iter,
+       .write_iter     = coda_file_write_iter,
        .mmap           = coda_file_mmap,
        .open           = coda_open,
        .release        = coda_release,
index afec6450450ff08e6be4c1cd7a05dbc293ef0a76..6b8e2f091f5b8fd71d63d545ea5fe9ae593d7688 100644 (file)
@@ -570,6 +570,7 @@ static int mt_ioctl_trans(unsigned int fd, unsigned int cmd, void __user *argp)
 #define BNEPCONNDEL    _IOW('B', 201, int)
 #define BNEPGETCONNLIST        _IOR('B', 210, int)
 #define BNEPGETCONNINFO        _IOR('B', 211, int)
+#define BNEPGETSUPPFEAT        _IOR('B', 212, int)
 
 #define CMTPCONNADD    _IOW('C', 200, int)
 #define CMTPCONNDEL    _IOW('C', 201, int)
@@ -1247,6 +1248,7 @@ COMPATIBLE_IOCTL(BNEPCONNADD)
 COMPATIBLE_IOCTL(BNEPCONNDEL)
 COMPATIBLE_IOCTL(BNEPGETCONNLIST)
 COMPATIBLE_IOCTL(BNEPGETCONNINFO)
+COMPATIBLE_IOCTL(BNEPGETSUPPFEAT)
 COMPATIBLE_IOCTL(CMTPCONNADD)
 COMPATIBLE_IOCTL(CMTPCONNDEL)
 COMPATIBLE_IOCTL(CMTPGETCONNLIST)
index f319926ddf8cbc5cc90a003628bf500701ce3c25..bbbe139ab28020244f62011f44869d1529cd4b05 100644 (file)
@@ -657,7 +657,7 @@ void do_coredump(const siginfo_t *siginfo)
                 */
                if (!uid_eq(inode->i_uid, current_fsuid()))
                        goto close_fail;
-               if (!cprm.file->f_op->write)
+               if (!(cprm.file->f_mode & FMODE_CAN_WRITE))
                        goto close_fail;
                if (do_truncate(cprm.file->f_path.dentry, 0, 0, cprm.file))
                        goto close_fail;
index ed1619ec6537c25707ea801e772571f3c2fcaf5d..a27846946525ed48b87071743f21ae744dacac36 100644 (file)
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -98,9 +98,9 @@ static bool buffer_size_valid(struct buffer_head *bh)
        return bh->b_state != 0;
 }
 
-static ssize_t dax_io(int rw, struct inode *inode, struct iov_iter *iter,
-                       loff_t start, loff_t end, get_block_t get_block,
-                       struct buffer_head *bh)
+static ssize_t dax_io(struct inode *inode, struct iov_iter *iter,
+                     loff_t start, loff_t end, get_block_t get_block,
+                     struct buffer_head *bh)
 {
        ssize_t retval = 0;
        loff_t pos = start;
@@ -109,7 +109,7 @@ static ssize_t dax_io(int rw, struct inode *inode, struct iov_iter *iter,
        void *addr;
        bool hole = false;
 
-       if (rw != WRITE)
+       if (iov_iter_rw(iter) != WRITE)
                end = min(end, i_size_read(inode));
 
        while (pos < end) {
@@ -124,7 +124,7 @@ static ssize_t dax_io(int rw, struct inode *inode, struct iov_iter *iter,
                                bh->b_size = PAGE_ALIGN(end - pos);
                                bh->b_state = 0;
                                retval = get_block(inode, block, bh,
-                                                               rw == WRITE);
+                                                  iov_iter_rw(iter) == WRITE);
                                if (retval)
                                        break;
                                if (!buffer_size_valid(bh))
@@ -137,7 +137,7 @@ static ssize_t dax_io(int rw, struct inode *inode, struct iov_iter *iter,
                                bh->b_size -= done;
                        }
 
-                       hole = (rw != WRITE) && !buffer_written(bh);
+                       hole = iov_iter_rw(iter) != WRITE && !buffer_written(bh);
                        if (hole) {
                                addr = NULL;
                                size = bh->b_size - first;
@@ -154,7 +154,7 @@ static ssize_t dax_io(int rw, struct inode *inode, struct iov_iter *iter,
                        max = min(pos + size, end);
                }
 
-               if (rw == WRITE)
+               if (iov_iter_rw(iter) == WRITE)
                        len = copy_from_iter(addr, max - pos, iter);
                else if (!hole)
                        len = copy_to_iter(addr, max - pos, iter);
@@ -173,7 +173,6 @@ static ssize_t dax_io(int rw, struct inode *inode, struct iov_iter *iter,
 
 /**
  * dax_do_io - Perform I/O to a DAX file
- * @rw: READ to read or WRITE to write
  * @iocb: The control block for this I/O
  * @inode: The file which the I/O is directed at
  * @iter: The addresses to do I/O from or to
@@ -189,9 +188,9 @@ static ssize_t dax_io(int rw, struct inode *inode, struct iov_iter *iter,
  * As with do_blockdev_direct_IO(), we increment i_dio_count while the I/O
  * is in progress.
  */
-ssize_t dax_do_io(int rw, struct kiocb *iocb, struct inode *inode,
-                       struct iov_iter *iter, loff_t pos,
-                       get_block_t get_block, dio_iodone_t end_io, int flags)
+ssize_t dax_do_io(struct kiocb *iocb, struct inode *inode,
+                 struct iov_iter *iter, loff_t pos, get_block_t get_block,
+                 dio_iodone_t end_io, int flags)
 {
        struct buffer_head bh;
        ssize_t retval = -EINVAL;
@@ -199,7 +198,7 @@ ssize_t dax_do_io(int rw, struct kiocb *iocb, struct inode *inode,
 
        memset(&bh, 0, sizeof(bh));
 
-       if ((flags & DIO_LOCKING) && (rw == READ)) {
+       if ((flags & DIO_LOCKING) && iov_iter_rw(iter) == READ) {
                struct address_space *mapping = inode->i_mapping;
                mutex_lock(&inode->i_mutex);
                retval = filemap_write_and_wait_range(mapping, pos, end - 1);
@@ -212,9 +211,9 @@ ssize_t dax_do_io(int rw, struct kiocb *iocb, struct inode *inode,
        /* Protects against truncate */
        atomic_inc(&inode->i_dio_count);
 
-       retval = dax_io(rw, inode, iter, pos, end, get_block, &bh);
+       retval = dax_io(inode, iter, pos, end, get_block, &bh);
 
-       if ((flags & DIO_LOCKING) && (rw == READ))
+       if ((flags & DIO_LOCKING) && iov_iter_rw(iter) == READ)
                mutex_unlock(&inode->i_mutex);
 
        if ((retval > 0) && end_io)
index c71e3732e53bcebbffca749e65b7095fd4ff6e7e..d99736a63e3cf6d5da6850e4eee02ecd7ae672e4 100644 (file)
@@ -2690,7 +2690,7 @@ static int __d_unalias(struct inode *inode,
                struct dentry *dentry, struct dentry *alias)
 {
        struct mutex *m1 = NULL, *m2 = NULL;
-       int ret = -EBUSY;
+       int ret = -ESTALE;
 
        /* If alias and dentry share a parent, then no extra locks required */
        if (alias->d_parent == dentry->d_parent)
index e181b6b2e297fb5d3bd03a07efe382f0dd204972..c3b560b24a463c50569a2a37c5d04f8d5af92c4d 100644 (file)
@@ -37,7 +37,6 @@
 #include <linux/uio.h>
 #include <linux/atomic.h>
 #include <linux/prefetch.h>
-#include <linux/aio.h>
 
 /*
  * How many user pages to map in one call to get_user_pages().  This determines
@@ -265,7 +264,7 @@ static ssize_t dio_complete(struct dio *dio, loff_t offset, ssize_t ret,
                                ret = err;
                }
 
-               aio_complete(dio->iocb, ret, 0);
+               dio->iocb->ki_complete(dio->iocb, ret, 0);
        }
 
        kmem_cache_free(dio_cache, dio);
@@ -1056,7 +1055,7 @@ static inline int drop_refcount(struct dio *dio)
         * operation.  AIO can if it was a broken operation described above or
         * in fact if all the bios race to complete before we get here.  In
         * that case dio_complete() translates the EIOCBQUEUED into the proper
-        * return code that the caller will hand to aio_complete().
+        * return code that the caller will hand to ->complete().
         *
         * This is managed by the bio_lock instead of being an atomic_t so that
         * completion paths can drop their ref and use the remaining count to
@@ -1094,10 +1093,10 @@ static inline int drop_refcount(struct dio *dio)
  * for the whole file.
  */
 static inline ssize_t
-do_blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
-       struct block_device *bdev, struct iov_iter *iter, loff_t offset, 
-       get_block_t get_block, dio_iodone_t end_io,
-       dio_submit_t submit_io, int flags)
+do_blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
+                     struct block_device *bdev, struct iov_iter *iter,
+                     loff_t offset, get_block_t get_block, dio_iodone_t end_io,
+                     dio_submit_t submit_io, int flags)
 {
        unsigned i_blkbits = ACCESS_ONCE(inode->i_blkbits);
        unsigned blkbits = i_blkbits;
@@ -1111,9 +1110,6 @@ do_blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
        struct blk_plug plug;
        unsigned long align = offset | iov_iter_alignment(iter);
 
-       if (rw & WRITE)
-               rw = WRITE_ODIRECT;
-
        /*
         * Avoid references to bdev if not absolutely needed to give
         * the early prefetch in the caller enough time.
@@ -1128,7 +1124,7 @@ do_blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
        }
 
        /* watch out for a 0 len io from a tricksy fs */
-       if (rw == READ && !iov_iter_count(iter))
+       if (iov_iter_rw(iter) == READ && !iov_iter_count(iter))
                return 0;
 
        dio = kmem_cache_alloc(dio_cache, GFP_KERNEL);
@@ -1144,7 +1140,7 @@ do_blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
 
        dio->flags = flags;
        if (dio->flags & DIO_LOCKING) {
-               if (rw == READ) {
+               if (iov_iter_rw(iter) == READ) {
                        struct address_space *mapping =
                                        iocb->ki_filp->f_mapping;
 
@@ -1170,19 +1166,19 @@ do_blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
        if (is_sync_kiocb(iocb))
                dio->is_async = false;
        else if (!(dio->flags & DIO_ASYNC_EXTEND) &&
-            (rw & WRITE) && end > i_size_read(inode))
+                iov_iter_rw(iter) == WRITE && end > i_size_read(inode))
                dio->is_async = false;
        else
                dio->is_async = true;
 
        dio->inode = inode;
-       dio->rw = rw;
+       dio->rw = iov_iter_rw(iter) == WRITE ? WRITE_ODIRECT : READ;
 
        /*
         * For AIO O_(D)SYNC writes we need to defer completions to a workqueue
         * so that we can call ->fsync.
         */
-       if (dio->is_async && (rw & WRITE) &&
+       if (dio->is_async && iov_iter_rw(iter) == WRITE &&
            ((iocb->ki_filp->f_flags & O_DSYNC) ||
             IS_SYNC(iocb->ki_filp->f_mapping->host))) {
                retval = dio_set_defer_completion(dio);
@@ -1275,7 +1271,7 @@ do_blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
         * we can let i_mutex go now that its achieved its purpose
         * of protecting us from looking up uninitialized blocks.
         */
-       if (rw == READ && (dio->flags & DIO_LOCKING))
+       if (iov_iter_rw(iter) == READ && (dio->flags & DIO_LOCKING))
                mutex_unlock(&dio->inode->i_mutex);
 
        /*
@@ -1287,7 +1283,7 @@ do_blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
         */
        BUG_ON(retval == -EIOCBQUEUED);
        if (dio->is_async && retval == 0 && dio->result &&
-           (rw == READ || dio->result == count))
+           (iov_iter_rw(iter) == READ || dio->result == count))
                retval = -EIOCBQUEUED;
        else
                dio_await_completion(dio);
@@ -1301,11 +1297,11 @@ out:
        return retval;
 }
 
-ssize_t
-__blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
-       struct block_device *bdev, struct iov_iter *iter, loff_t offset,
-       get_block_t get_block, dio_iodone_t end_io,
-       dio_submit_t submit_io, int flags)
+ssize_t __blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
+                            struct block_device *bdev, struct iov_iter *iter,
+                            loff_t offset, get_block_t get_block,
+                            dio_iodone_t end_io, dio_submit_t submit_io,
+                            int flags)
 {
        /*
         * The block device state is needed in the end to finally
@@ -1319,8 +1315,8 @@ __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
        prefetch(bdev->bd_queue);
        prefetch((char *)bdev->bd_queue + SMP_CACHE_BYTES);
 
-       return do_blockdev_direct_IO(rw, iocb, inode, bdev, iter, offset,
-                                    get_block, end_io, submit_io, flags);
+       return do_blockdev_direct_IO(iocb, inode, bdev, iter, offset, get_block,
+                                    end_io, submit_io, flags);
 }
 
 EXPORT_SYMBOL(__blockdev_direct_IO);
index fd39bad6f1bdf8bbcb4321a8fc8ff1934d67167c..a65786e26b05ac97dbaab6a6258e533aa842cece 100644 (file)
@@ -31,7 +31,6 @@
 #include <linux/security.h>
 #include <linux/compat.h>
 #include <linux/fs_stack.h>
-#include <linux/aio.h>
 #include "ecryptfs_kernel.h"
 
 /**
@@ -52,12 +51,6 @@ static ssize_t ecryptfs_read_update_atime(struct kiocb *iocb,
        struct file *file = iocb->ki_filp;
 
        rc = generic_file_read_iter(iocb, to);
-       /*
-        * Even though this is a async interface, we need to wait
-        * for IO to finish to update atime
-        */
-       if (-EIOCBQUEUED == rc)
-               rc = wait_on_sync_kiocb(iocb);
        if (rc >= 0) {
                path = ecryptfs_dentry_to_lower_path(file->f_path.dentry);
                touch_atime(path);
@@ -365,9 +358,7 @@ const struct file_operations ecryptfs_dir_fops = {
 
 const struct file_operations ecryptfs_main_fops = {
        .llseek = generic_file_llseek,
-       .read = new_sync_read,
        .read_iter = ecryptfs_read_update_atime,
-       .write = new_sync_write,
        .write_iter = generic_file_write_iter,
        .iterate = ecryptfs_readdir,
        .unlocked_ioctl = ecryptfs_unlocked_ioctl,
index 1a376b42d30582c81a9f4d0b3f4dc85b74968cb0..906de66e8e7e067e6179aa3408c6e6aa8ea528b7 100644 (file)
@@ -67,8 +67,6 @@ static int exofs_flush(struct file *file, fl_owner_t id)
 
 const struct file_operations exofs_file_operations = {
        .llseek         = generic_file_llseek,
-       .read           = new_sync_read,
-       .write          = new_sync_write,
        .read_iter      = generic_file_read_iter,
        .write_iter     = generic_file_write_iter,
        .mmap           = generic_file_mmap,
index a198e94813fec42378c75c568b013f360c5ea1e2..35073aaec6e08895f06ce3f7db86820d7dc982ec 100644 (file)
@@ -963,8 +963,8 @@ static void exofs_invalidatepage(struct page *page, unsigned int offset,
 
 
  /* TODO: Should be easy enough to do proprly */
-static ssize_t exofs_direct_IO(int rw, struct kiocb *iocb,
-               struct iov_iter *iter, loff_t offset)
+static ssize_t exofs_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
+                              loff_t offset)
 {
        return 0;
 }
index e31701713516c7f24dee6847badf0e4098b947c8..ef04fdb57dbf823ef6a2dfa9ad46d72727388d50 100644 (file)
@@ -92,8 +92,6 @@ int ext2_fsync(struct file *file, loff_t start, loff_t end, int datasync)
  */
 const struct file_operations ext2_file_operations = {
        .llseek         = generic_file_llseek,
-       .read           = new_sync_read,
-       .write          = new_sync_write,
        .read_iter      = generic_file_read_iter,
        .write_iter     = generic_file_write_iter,
        .unlocked_ioctl = ext2_ioctl,
@@ -111,8 +109,6 @@ const struct file_operations ext2_file_operations = {
 #ifdef CONFIG_FS_DAX
 const struct file_operations ext2_dax_file_operations = {
        .llseek         = generic_file_llseek,
-       .read           = new_sync_read,
-       .write          = new_sync_write,
        .read_iter      = generic_file_read_iter,
        .write_iter     = generic_file_write_iter,
        .unlocked_ioctl = ext2_ioctl,
index 6434bc00012517a30ace1cb97f2160b0c48eea3a..e1abf75e994cb9b63c22a64591149b0ed39164f4 100644 (file)
@@ -31,7 +31,7 @@
 #include <linux/mpage.h>
 #include <linux/fiemap.h>
 #include <linux/namei.h>
-#include <linux/aio.h>
+#include <linux/uio.h>
 #include "ext2.h"
 #include "acl.h"
 #include "xattr.h"
@@ -851,8 +851,7 @@ static sector_t ext2_bmap(struct address_space *mapping, sector_t block)
 }
 
 static ssize_t
-ext2_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter,
-                       loff_t offset)
+ext2_direct_IO(struct kiocb *iocb, struct iov_iter *iter, loff_t offset)
 {
        struct file *file = iocb->ki_filp;
        struct address_space *mapping = file->f_mapping;
@@ -861,12 +860,12 @@ ext2_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter,
        ssize_t ret;
 
        if (IS_DAX(inode))
-               ret = dax_do_io(rw, iocb, inode, iter, offset, ext2_get_block,
-                               NULL, DIO_LOCKING);
+               ret = dax_do_io(iocb, inode, iter, offset, ext2_get_block, NULL,
+                               DIO_LOCKING);
        else
-               ret = blockdev_direct_IO(rw, iocb, inode, iter, offset,
+               ret = blockdev_direct_IO(iocb, inode, iter, offset,
                                         ext2_get_block);
-       if (ret < 0 && (rw & WRITE))
+       if (ret < 0 && iov_iter_rw(iter) == WRITE)
                ext2_write_failed(mapping, offset + count);
        return ret;
 }
index a062fa1e1b113e56e7545bb4c6bac7ac1bf7d741..3b8f650de22ce2b917d97d3c0b22717726d593fa 100644 (file)
@@ -50,8 +50,6 @@ static int ext3_release_file (struct inode * inode, struct file * filp)
 
 const struct file_operations ext3_file_operations = {
        .llseek         = generic_file_llseek,
-       .read           = new_sync_read,
-       .write          = new_sync_write,
        .read_iter      = generic_file_read_iter,
        .write_iter     = generic_file_write_iter,
        .unlocked_ioctl = ext3_ioctl,
index 2c6ccc49ba279cacf77fe6609fe44a50b970898c..13c0868c7160ee572f30601939fcd6f421a01f11 100644 (file)
@@ -27,7 +27,7 @@
 #include <linux/writeback.h>
 #include <linux/mpage.h>
 #include <linux/namei.h>
-#include <linux/aio.h>
+#include <linux/uio.h>
 #include "ext3.h"
 #include "xattr.h"
 #include "acl.h"
@@ -1820,8 +1820,8 @@ static int ext3_releasepage(struct page *page, gfp_t wait)
  * crashes then stale disk data _may_ be exposed inside the file. But current
  * VFS code falls back into buffered path in that case so we are safe.
  */
-static ssize_t ext3_direct_IO(int rw, struct kiocb *iocb,
-                       struct iov_iter *iter, loff_t offset)
+static ssize_t ext3_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
+                             loff_t offset)
 {
        struct file *file = iocb->ki_filp;
        struct inode *inode = file->f_mapping->host;
@@ -1832,9 +1832,9 @@ static ssize_t ext3_direct_IO(int rw, struct kiocb *iocb,
        size_t count = iov_iter_count(iter);
        int retries = 0;
 
-       trace_ext3_direct_IO_enter(inode, offset, count, rw);
+       trace_ext3_direct_IO_enter(inode, offset, count, iov_iter_rw(iter));
 
-       if (rw == WRITE) {
+       if (iov_iter_rw(iter) == WRITE) {
                loff_t final_size = offset + count;
 
                if (final_size > inode->i_size) {
@@ -1856,12 +1856,12 @@ static ssize_t ext3_direct_IO(int rw, struct kiocb *iocb,
        }
 
 retry:
-       ret = blockdev_direct_IO(rw, iocb, inode, iter, offset, ext3_get_block);
+       ret = blockdev_direct_IO(iocb, inode, iter, offset, ext3_get_block);
        /*
         * In case of error extending write may have instantiated a few
         * blocks outside i_size. Trim these off again.
         */
-       if (unlikely((rw & WRITE) && ret < 0)) {
+       if (unlikely(iov_iter_rw(iter) == WRITE && ret < 0)) {
                loff_t isize = i_size_read(inode);
                loff_t end = offset + count;
 
@@ -1908,7 +1908,7 @@ retry:
                        ret = err;
        }
 out:
-       trace_ext3_direct_IO_exit(inode, offset, count, rw, ret);
+       trace_ext3_direct_IO_exit(inode, offset, count, iov_iter_rw(iter), ret);
        return ret;
 }
 
index f63c3d5805c4c156ad3ed412cbecf85e700cf9d2..2031c994024ed0e872176113f8becadb562c82e2 100644 (file)
@@ -2152,8 +2152,8 @@ extern void ext4_da_update_reserve_space(struct inode *inode,
 /* indirect.c */
 extern int ext4_ind_map_blocks(handle_t *handle, struct inode *inode,
                                struct ext4_map_blocks *map, int flags);
-extern ssize_t ext4_ind_direct_IO(int rw, struct kiocb *iocb,
-                               struct iov_iter *iter, loff_t offset);
+extern ssize_t ext4_ind_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
+                                 loff_t offset);
 extern int ext4_ind_calc_metadata_amount(struct inode *inode, sector_t lblock);
 extern int ext4_ind_trans_blocks(struct inode *inode, int nrblocks);
 extern void ext4_ind_truncate(handle_t *, struct inode *inode);
index 33a09da16c9ce1e8049fdcacdf3e8833410fd78f..42b1fa33a17a5c6c26169b67955ded4e31efba8d 100644 (file)
@@ -23,9 +23,9 @@
 #include <linux/jbd2.h>
 #include <linux/mount.h>
 #include <linux/path.h>
-#include <linux/aio.h>
 #include <linux/quotaops.h>
 #include <linux/pagevec.h>
+#include <linux/uio.h>
 #include "ext4.h"
 #include "ext4_jbd2.h"
 #include "xattr.h"
@@ -99,7 +99,7 @@ ext4_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
        int overwrite = 0;
        size_t length = iov_iter_count(from);
        ssize_t ret;
-       loff_t pos = iocb->ki_pos;
+       loff_t pos;
 
        /*
         * Unaligned direct AIO must be serialized; see comment above
@@ -109,15 +109,22 @@ ext4_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
            ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS) &&
            !is_sync_kiocb(iocb) &&
            (file->f_flags & O_APPEND ||
-            ext4_unaligned_aio(inode, from, pos))) {
+            ext4_unaligned_aio(inode, from, iocb->ki_pos))) {
                aio_mutex = ext4_aio_mutex(inode);
                mutex_lock(aio_mutex);
                ext4_unwritten_wait(inode);
        }
 
        mutex_lock(&inode->i_mutex);
-       if (file->f_flags & O_APPEND)
-               iocb->ki_pos = pos = i_size_read(inode);
+       ret = generic_write_checks(file, &iocb->ki_pos, &length);
+       if (ret)
+               goto out;
+
+       if (length == 0)
+               goto out;
+
+       iov_iter_truncate(from, length);
+       pos = iocb->ki_pos;
 
        /*
         * If we have encountered a bitmap-format file, the size limit
@@ -126,19 +133,16 @@ ext4_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
        if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
                struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
 
-               if ((pos > sbi->s_bitmap_maxbytes) ||
-                   (pos == sbi->s_bitmap_maxbytes && length > 0)) {
-                       mutex_unlock(&inode->i_mutex);
+               if (pos >= sbi->s_bitmap_maxbytes) {
                        ret = -EFBIG;
-                       goto errout;
+                       goto out;
                }
-
-               if (pos + length > sbi->s_bitmap_maxbytes)
-                       iov_iter_truncate(from, sbi->s_bitmap_maxbytes - pos);
+               iov_iter_truncate(from, sbi->s_bitmap_maxbytes - pos);
        }
 
        iocb->private = &overwrite;
        if (o_direct) {
+               length = iov_iter_count(from);
                blk_start_plug(&plug);
 
 
@@ -185,7 +189,12 @@ ext4_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
        if (o_direct)
                blk_finish_plug(&plug);
 
-errout:
+       if (aio_mutex)
+               mutex_unlock(aio_mutex);
+       return ret;
+
+out:
+       mutex_unlock(&inode->i_mutex);
        if (aio_mutex)
                mutex_unlock(aio_mutex);
        return ret;
@@ -607,8 +616,6 @@ loff_t ext4_llseek(struct file *file, loff_t offset, int whence)
 
 const struct file_operations ext4_file_operations = {
        .llseek         = ext4_llseek,
-       .read           = new_sync_read,
-       .write          = new_sync_write,
        .read_iter      = generic_file_read_iter,
        .write_iter     = ext4_file_write_iter,
        .unlocked_ioctl = ext4_ioctl,
@@ -627,8 +634,6 @@ const struct file_operations ext4_file_operations = {
 #ifdef CONFIG_FS_DAX
 const struct file_operations ext4_dax_file_operations = {
        .llseek         = ext4_llseek,
-       .read           = new_sync_read,
-       .write          = new_sync_write,
        .read_iter      = generic_file_read_iter,
        .write_iter     = ext4_file_write_iter,
        .unlocked_ioctl = ext4_ioctl,
index 45fe924f82bce2ff76e3e74b45ec1833729433ea..3580629e42d32aadaff81effd01fddb2aaa41484 100644 (file)
@@ -20,9 +20,9 @@
  *     (sct@redhat.com), 1993, 1998
  */
 
-#include <linux/aio.h>
 #include "ext4_jbd2.h"
 #include "truncate.h"
+#include <linux/uio.h>
 
 #include <trace/events/ext4.h>
 
@@ -642,8 +642,8 @@ out:
  * crashes then stale disk data _may_ be exposed inside the file. But current
  * VFS code falls back into buffered path in that case so we are safe.
  */
-ssize_t ext4_ind_direct_IO(int rw, struct kiocb *iocb,
-                          struct iov_iter *iter, loff_t offset)
+ssize_t ext4_ind_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
+                          loff_t offset)
 {
        struct file *file = iocb->ki_filp;
        struct inode *inode = file->f_mapping->host;
@@ -654,7 +654,7 @@ ssize_t ext4_ind_direct_IO(int rw, struct kiocb *iocb,
        size_t count = iov_iter_count(iter);
        int retries = 0;
 
-       if (rw == WRITE) {
+       if (iov_iter_rw(iter) == WRITE) {
                loff_t final_size = offset + count;
 
                if (final_size > inode->i_size) {
@@ -676,7 +676,7 @@ ssize_t ext4_ind_direct_IO(int rw, struct kiocb *iocb,
        }
 
 retry:
-       if (rw == READ && ext4_should_dioread_nolock(inode)) {
+       if (iov_iter_rw(iter) == READ && ext4_should_dioread_nolock(inode)) {
                /*
                 * Nolock dioread optimization may be dynamically disabled
                 * via ext4_inode_block_unlocked_dio(). Check inode's state
@@ -690,23 +690,24 @@ retry:
                        goto locked;
                }
                if (IS_DAX(inode))
-                       ret = dax_do_io(rw, iocb, inode, iter, offset,
+                       ret = dax_do_io(iocb, inode, iter, offset,
                                        ext4_get_block, NULL, 0);
                else
-                       ret = __blockdev_direct_IO(rw, iocb, inode,
-                                       inode->i_sb->s_bdev, iter, offset,
-                                       ext4_get_block, NULL, NULL, 0);
+                       ret = __blockdev_direct_IO(iocb, inode,
+                                                  inode->i_sb->s_bdev, iter,
+                                                  offset, ext4_get_block, NULL,
+                                                  NULL, 0);
                inode_dio_done(inode);
        } else {
 locked:
                if (IS_DAX(inode))
-                       ret = dax_do_io(rw, iocb, inode, iter, offset,
+                       ret = dax_do_io(iocb, inode, iter, offset,
                                        ext4_get_block, NULL, DIO_LOCKING);
                else
-                       ret = blockdev_direct_IO(rw, iocb, inode, iter,
-                                       offset, ext4_get_block);
+                       ret = blockdev_direct_IO(iocb, inode, iter, offset,
+                                                ext4_get_block);
 
-               if (unlikely((rw & WRITE) && ret < 0)) {
+               if (unlikely(iov_iter_rw(iter) == WRITE && ret < 0)) {
                        loff_t isize = i_size_read(inode);
                        loff_t end = offset + count;
 
index 5cb9a212b86f3efd69ca604df07dc20b901dabb1..42c942a950e15d7a7ac819ce9859230111bbf407 100644 (file)
@@ -37,7 +37,6 @@
 #include <linux/printk.h>
 #include <linux/slab.h>
 #include <linux/ratelimit.h>
-#include <linux/aio.h>
 #include <linux/bitops.h>
 
 #include "ext4_jbd2.h"
@@ -2953,8 +2952,8 @@ static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset,
  * if the machine crashes during the write.
  *
  */
-static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb,
-                             struct iov_iter *iter, loff_t offset)
+static ssize_t ext4_ext_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
+                                 loff_t offset)
 {
        struct file *file = iocb->ki_filp;
        struct inode *inode = file->f_mapping->host;
@@ -2967,8 +2966,8 @@ static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb,
        ext4_io_end_t *io_end = NULL;
 
        /* Use the old path for reads and writes beyond i_size. */
-       if (rw != WRITE || final_size > inode->i_size)
-               return ext4_ind_direct_IO(rw, iocb, iter, offset);
+       if (iov_iter_rw(iter) != WRITE || final_size > inode->i_size)
+               return ext4_ind_direct_IO(iocb, iter, offset);
 
        BUG_ON(iocb->private == NULL);
 
@@ -2977,7 +2976,7 @@ static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb,
         * conversion. This also disallows race between truncate() and
         * overwrite DIO as i_dio_count needs to be incremented under i_mutex.
         */
-       if (rw == WRITE)
+       if (iov_iter_rw(iter) == WRITE)
                atomic_inc(&inode->i_dio_count);
 
        /* If we do a overwrite dio, i_mutex locking can be released */
@@ -3035,10 +3034,10 @@ static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb,
                dio_flags = DIO_LOCKING;
        }
        if (IS_DAX(inode))
-               ret = dax_do_io(rw, iocb, inode, iter, offset, get_block_func,
+               ret = dax_do_io(iocb, inode, iter, offset, get_block_func,
                                ext4_end_io_dio, dio_flags);
        else
-               ret = __blockdev_direct_IO(rw, iocb, inode,
+               ret = __blockdev_direct_IO(iocb, inode,
                                           inode->i_sb->s_bdev, iter, offset,
                                           get_block_func,
                                           ext4_end_io_dio, NULL, dio_flags);
@@ -3079,7 +3078,7 @@ static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb,
        }
 
 retake_lock:
-       if (rw == WRITE)
+       if (iov_iter_rw(iter) == WRITE)
                inode_dio_done(inode);
        /* take i_mutex locking again if we do a ovewrite dio */
        if (overwrite) {
@@ -3090,8 +3089,8 @@ retake_lock:
        return ret;
 }
 
-static ssize_t ext4_direct_IO(int rw, struct kiocb *iocb,
-                             struct iov_iter *iter, loff_t offset)
+static ssize_t ext4_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
+                             loff_t offset)
 {
        struct file *file = iocb->ki_filp;
        struct inode *inode = file->f_mapping->host;
@@ -3108,12 +3107,12 @@ static ssize_t ext4_direct_IO(int rw, struct kiocb *iocb,
        if (ext4_has_inline_data(inode))
                return 0;
 
-       trace_ext4_direct_IO_enter(inode, offset, count, rw);
+       trace_ext4_direct_IO_enter(inode, offset, count, iov_iter_rw(iter));
        if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
-               ret = ext4_ext_direct_IO(rw, iocb, iter, offset);
+               ret = ext4_ext_direct_IO(iocb, iter, offset);
        else
-               ret = ext4_ind_direct_IO(rw, iocb, iter, offset);
-       trace_ext4_direct_IO_exit(inode, offset, count, rw, ret);
+               ret = ext4_ind_direct_IO(iocb, iter, offset);
+       trace_ext4_direct_IO_exit(inode, offset, count, iov_iter_rw(iter), ret);
        return ret;
 }
 
index b24a2541a9baaa0d4c22e80a75050af2517a417d..464984261e698af8317621c45b8d2089551bc790 100644 (file)
@@ -18,7 +18,6 @@
 #include <linux/pagevec.h>
 #include <linux/mpage.h>
 #include <linux/namei.h>
-#include <linux/aio.h>
 #include <linux/uio.h>
 #include <linux/bio.h>
 #include <linux/workqueue.h>
index 985ed023a750170b924455ea23e2684c50baeba4..319eda511c4ff6e4bd869aaf2e1b711ecdc6470c 100644 (file)
 #include <linux/f2fs_fs.h>
 #include <linux/buffer_head.h>
 #include <linux/mpage.h>
-#include <linux/aio.h>
 #include <linux/writeback.h>
 #include <linux/backing-dev.h>
 #include <linux/blkdev.h>
 #include <linux/bio.h>
 #include <linux/prefetch.h>
+#include <linux/uio.h>
 
 #include "f2fs.h"
 #include "node.h"
@@ -1118,12 +1118,12 @@ static int f2fs_write_end(struct file *file,
        return copied;
 }
 
-static int check_direct_IO(struct inode *inode, int rw,
-               struct iov_iter *iter, loff_t offset)
+static int check_direct_IO(struct inode *inode, struct iov_iter *iter,
+                          loff_t offset)
 {
        unsigned blocksize_mask = inode->i_sb->s_blocksize - 1;
 
-       if (rw == READ)
+       if (iov_iter_rw(iter) == READ)
                return 0;
 
        if (offset & blocksize_mask)
@@ -1135,8 +1135,8 @@ static int check_direct_IO(struct inode *inode, int rw,
        return 0;
 }
 
-static ssize_t f2fs_direct_IO(int rw, struct kiocb *iocb,
-               struct iov_iter *iter, loff_t offset)
+static ssize_t f2fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
+                             loff_t offset)
 {
        struct file *file = iocb->ki_filp;
        struct address_space *mapping = file->f_mapping;
@@ -1151,19 +1151,19 @@ static ssize_t f2fs_direct_IO(int rw, struct kiocb *iocb,
                        return err;
        }
 
-       if (check_direct_IO(inode, rw, iter, offset))
+       if (check_direct_IO(inode, iter, offset))
                return 0;
 
-       trace_f2fs_direct_IO_enter(inode, offset, count, rw);
+       trace_f2fs_direct_IO_enter(inode, offset, count, iov_iter_rw(iter));
 
-       if (rw & WRITE)
+       if (iov_iter_rw(iter) == WRITE)
                __allocate_data_blocks(inode, offset, count);
 
-       err = blockdev_direct_IO(rw, iocb, inode, iter, offset, get_data_block);
-       if (err < 0 && (rw & WRITE))
+       err = blockdev_direct_IO(iocb, inode, iter, offset, get_data_block);
+       if (err < 0 && iov_iter_rw(iter) == WRITE)
                f2fs_write_failed(mapping, offset + count);
 
-       trace_f2fs_direct_IO_exit(inode, offset, count, rw, err);
+       trace_f2fs_direct_IO_exit(inode, offset, count, iov_iter_rw(iter), err);
 
        return err;
 }
index 98dac27bc3f733975fabc96404ab30a916b20e7c..df6a0596eccf210dfefbaaca2cf2f60ca57a7f6b 100644 (file)
@@ -1104,8 +1104,6 @@ long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 
 const struct file_operations f2fs_file_operations = {
        .llseek         = f2fs_llseek,
-       .read           = new_sync_read,
-       .write          = new_sync_write,
        .read_iter      = generic_file_read_iter,
        .write_iter     = generic_file_write_iter,
        .open           = generic_file_open,
index 8429c68e30578150b7dd85667a4b04f8e0d2ed4b..1e98d333879f9952c77af87b1fe1e84177e2f6ee 100644 (file)
@@ -170,8 +170,6 @@ int fat_file_fsync(struct file *filp, loff_t start, loff_t end, int datasync)
 
 const struct file_operations fat_file_operations = {
        .llseek         = generic_file_llseek,
-       .read           = new_sync_read,
-       .write          = new_sync_write,
        .read_iter      = generic_file_read_iter,
        .write_iter     = generic_file_write_iter,
        .mmap           = generic_file_mmap,
index 497c7c5263c7ca3962c385605fbbb558d351f759..41b729933638a128225422013417775fb0858c26 100644 (file)
@@ -19,7 +19,6 @@
 #include <linux/mpage.h>
 #include <linux/buffer_head.h>
 #include <linux/mount.h>
-#include <linux/aio.h>
 #include <linux/vfs.h>
 #include <linux/parser.h>
 #include <linux/uio.h>
@@ -246,8 +245,7 @@ static int fat_write_end(struct file *file, struct address_space *mapping,
        return err;
 }
 
-static ssize_t fat_direct_IO(int rw, struct kiocb *iocb,
-                            struct iov_iter *iter,
+static ssize_t fat_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
                             loff_t offset)
 {
        struct file *file = iocb->ki_filp;
@@ -256,7 +254,7 @@ static ssize_t fat_direct_IO(int rw, struct kiocb *iocb,
        size_t count = iov_iter_count(iter);
        ssize_t ret;
 
-       if (rw == WRITE) {
+       if (iov_iter_rw(iter) == WRITE) {
                /*
                 * FIXME: blockdev_direct_IO() doesn't use ->write_begin(),
                 * so we need to update the ->mmu_private to block boundary.
@@ -275,8 +273,8 @@ static ssize_t fat_direct_IO(int rw, struct kiocb *iocb,
         * FAT need to use the DIO_LOCKING for avoiding the race
         * condition of fat_get_block() and ->truncate().
         */
-       ret = blockdev_direct_IO(rw, iocb, inode, iter, offset, fat_get_block);
-       if (ret < 0 && (rw & WRITE))
+       ret = blockdev_direct_IO(iocb, inode, iter, offset, fat_get_block);
+       if (ret < 0 && iov_iter_rw(iter) == WRITE)
                fat_write_failed(mapping, offset + count);
 
        return ret;
index 3f85411b03ce6aac983caa0e0283e8da8848225d..294174dcc2261237fa900432c5c3a7134fda3976 100644 (file)
@@ -168,10 +168,10 @@ struct file *alloc_file(struct path *path, fmode_t mode,
        file->f_inode = path->dentry->d_inode;
        file->f_mapping = path->dentry->d_inode->i_mapping;
        if ((mode & FMODE_READ) &&
-            likely(fop->read || fop->aio_read || fop->read_iter))
+            likely(fop->read || fop->read_iter))
                mode |= FMODE_CAN_READ;
        if ((mode & FMODE_WRITE) &&
-            likely(fop->write || fop->aio_write || fop->write_iter))
+            likely(fop->write || fop->write_iter))
                mode |= FMODE_CAN_WRITE;
        file->f_mode = mode;
        file->f_op = fop;
index 28d0c7abba1c2fa7748d3b1c2874b855427b3897..e5bbf748b6987a922fa211e3edb083346927f3b6 100644 (file)
@@ -38,7 +38,6 @@
 #include <linux/device.h>
 #include <linux/file.h>
 #include <linux/fs.h>
-#include <linux/aio.h>
 #include <linux/kdev_t.h>
 #include <linux/kthread.h>
 #include <linux/list.h>
@@ -48,6 +47,7 @@
 #include <linux/slab.h>
 #include <linux/stat.h>
 #include <linux/module.h>
+#include <linux/uio.h>
 
 #include "fuse_i.h"
 
@@ -88,32 +88,23 @@ static struct list_head *cuse_conntbl_head(dev_t devt)
  * FUSE file.
  */
 
-static ssize_t cuse_read(struct file *file, char __user *buf, size_t count,
-                        loff_t *ppos)
+static ssize_t cuse_read_iter(struct kiocb *kiocb, struct iov_iter *to)
 {
+       struct fuse_io_priv io = { .async = 0, .file = kiocb->ki_filp };
        loff_t pos = 0;
-       struct iovec iov = { .iov_base = buf, .iov_len = count };
-       struct fuse_io_priv io = { .async = 0, .file = file };
-       struct iov_iter ii;
-       iov_iter_init(&ii, READ, &iov, 1, count);
 
-       return fuse_direct_io(&io, &ii, &pos, FUSE_DIO_CUSE);
+       return fuse_direct_io(&io, to, &pos, FUSE_DIO_CUSE);
 }
 
-static ssize_t cuse_write(struct file *file, const char __user *buf,
-                         size_t count, loff_t *ppos)
+static ssize_t cuse_write_iter(struct kiocb *kiocb, struct iov_iter *from)
 {
+       struct fuse_io_priv io = { .async = 0, .file = kiocb->ki_filp };
        loff_t pos = 0;
-       struct iovec iov = { .iov_base = (void __user *)buf, .iov_len = count };
-       struct fuse_io_priv io = { .async = 0, .file = file };
-       struct iov_iter ii;
-       iov_iter_init(&ii, WRITE, &iov, 1, count);
-
        /*
         * No locking or generic_write_checks(), the server is
         * responsible for locking and sanity checks.
         */
-       return fuse_direct_io(&io, &ii, &pos,
+       return fuse_direct_io(&io, from, &pos,
                              FUSE_DIO_WRITE | FUSE_DIO_CUSE);
 }
 
@@ -186,8 +177,8 @@ static long cuse_file_compat_ioctl(struct file *file, unsigned int cmd,
 
 static const struct file_operations cuse_frontend_fops = {
        .owner                  = THIS_MODULE,
-       .read                   = cuse_read,
-       .write                  = cuse_write,
+       .read_iter              = cuse_read_iter,
+       .write_iter             = cuse_write_iter,
        .open                   = cuse_open,
        .release                = cuse_release,
        .unlocked_ioctl         = cuse_file_ioctl,
index 39706c57ad3cb157d81594065a15f154f61d7bd8..c8b68ab2e574a86f13fab97f9ed47b14a4e139d6 100644 (file)
@@ -19,7 +19,6 @@
 #include <linux/pipe_fs_i.h>
 #include <linux/swap.h>
 #include <linux/splice.h>
-#include <linux/aio.h>
 
 MODULE_ALIAS_MISCDEV(FUSE_MINOR);
 MODULE_ALIAS("devname:fuse");
@@ -711,28 +710,26 @@ struct fuse_copy_state {
        struct fuse_conn *fc;
        int write;
        struct fuse_req *req;
-       const struct iovec *iov;
+       struct iov_iter *iter;
        struct pipe_buffer *pipebufs;
        struct pipe_buffer *currbuf;
        struct pipe_inode_info *pipe;
        unsigned long nr_segs;
-       unsigned long seglen;
-       unsigned long addr;
        struct page *pg;
        unsigned len;
        unsigned offset;
        unsigned move_pages:1;
 };
 
-static void fuse_copy_init(struct fuse_copy_state *cs, struct fuse_conn *fc,
+static void fuse_copy_init(struct fuse_copy_state *cs,
+                          struct fuse_conn *fc,
                           int write,
-                          const struct iovec *iov, unsigned long nr_segs)
+                          struct iov_iter *iter)
 {
        memset(cs, 0, sizeof(*cs));
        cs->fc = fc;
        cs->write = write;
-       cs->iov = iov;
-       cs->nr_segs = nr_segs;
+       cs->iter = iter;
 }
 
 /* Unmap and put previous page of userspace buffer */
@@ -800,22 +797,16 @@ static int fuse_copy_fill(struct fuse_copy_state *cs)
                        cs->nr_segs++;
                }
        } else {
-               if (!cs->seglen) {
-                       BUG_ON(!cs->nr_segs);
-                       cs->seglen = cs->iov[0].iov_len;
-                       cs->addr = (unsigned long) cs->iov[0].iov_base;
-                       cs->iov++;
-                       cs->nr_segs--;
-               }
-               err = get_user_pages_fast(cs->addr, 1, cs->write, &page);
+               size_t off;
+               err = iov_iter_get_pages(cs->iter, &page, PAGE_SIZE, 1, &off);
                if (err < 0)
                        return err;
-               BUG_ON(err != 1);
+               BUG_ON(!err);
+               cs->len = err;
+               cs->offset = off;
                cs->pg = page;
-               cs->offset = cs->addr % PAGE_SIZE;
-               cs->len = min(PAGE_SIZE - cs->offset, cs->seglen);
-               cs->seglen -= cs->len;
-               cs->addr += cs->len;
+               cs->offset = off;
+               iov_iter_advance(cs->iter, err);
        }
 
        return lock_request(cs->fc, cs->req);
@@ -1364,8 +1355,7 @@ static int fuse_dev_open(struct inode *inode, struct file *file)
        return 0;
 }
 
-static ssize_t fuse_dev_read(struct kiocb *iocb, const struct iovec *iov,
-                             unsigned long nr_segs, loff_t pos)
+static ssize_t fuse_dev_read(struct kiocb *iocb, struct iov_iter *to)
 {
        struct fuse_copy_state cs;
        struct file *file = iocb->ki_filp;
@@ -1373,9 +1363,12 @@ static ssize_t fuse_dev_read(struct kiocb *iocb, const struct iovec *iov,
        if (!fc)
                return -EPERM;
 
-       fuse_copy_init(&cs, fc, 1, iov, nr_segs);
+       if (!iter_is_iovec(to))
+               return -EINVAL;
 
-       return fuse_dev_do_read(fc, file, &cs, iov_length(iov, nr_segs));
+       fuse_copy_init(&cs, fc, 1, to);
+
+       return fuse_dev_do_read(fc, file, &cs, iov_iter_count(to));
 }
 
 static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
@@ -1395,7 +1388,7 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
        if (!bufs)
                return -ENOMEM;
 
-       fuse_copy_init(&cs, fc, 1, NULL, 0);
+       fuse_copy_init(&cs, fc, 1, NULL);
        cs.pipebufs = bufs;
        cs.pipe = pipe;
        ret = fuse_dev_do_read(fc, in, &cs, len);
@@ -1971,17 +1964,19 @@ static ssize_t fuse_dev_do_write(struct fuse_conn *fc,
        return err;
 }
 
-static ssize_t fuse_dev_write(struct kiocb *iocb, const struct iovec *iov,
-                             unsigned long nr_segs, loff_t pos)
+static ssize_t fuse_dev_write(struct kiocb *iocb, struct iov_iter *from)
 {
        struct fuse_copy_state cs;
        struct fuse_conn *fc = fuse_get_conn(iocb->ki_filp);
        if (!fc)
                return -EPERM;
 
-       fuse_copy_init(&cs, fc, 0, iov, nr_segs);
+       if (!iter_is_iovec(from))
+               return -EINVAL;
+
+       fuse_copy_init(&cs, fc, 0, from);
 
-       return fuse_dev_do_write(fc, &cs, iov_length(iov, nr_segs));
+       return fuse_dev_do_write(fc, &cs, iov_iter_count(from));
 }
 
 static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe,
@@ -2044,8 +2039,9 @@ static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe,
        }
        pipe_unlock(pipe);
 
-       fuse_copy_init(&cs, fc, 0, NULL, nbuf);
+       fuse_copy_init(&cs, fc, 0, NULL);
        cs.pipebufs = bufs;
+       cs.nr_segs = nbuf;
        cs.pipe = pipe;
 
        if (flags & SPLICE_F_MOVE)
@@ -2233,11 +2229,9 @@ const struct file_operations fuse_dev_operations = {
        .owner          = THIS_MODULE,
        .open           = fuse_dev_open,
        .llseek         = no_llseek,
-       .read           = do_sync_read,
-       .aio_read       = fuse_dev_read,
+       .read_iter      = fuse_dev_read,
        .splice_read    = fuse_dev_splice_read,
-       .write          = do_sync_write,
-       .aio_write      = fuse_dev_write,
+       .write_iter     = fuse_dev_write,
        .splice_write   = fuse_dev_splice_write,
        .poll           = fuse_dev_poll,
        .release        = fuse_dev_release,
index c01ec3bdcfd81090fae2cb26ae166f351d4505eb..8c15d0a077e83431fc63a02626f28aa6b1c29b23 100644 (file)
@@ -15,8 +15,8 @@
 #include <linux/module.h>
 #include <linux/compat.h>
 #include <linux/swap.h>
-#include <linux/aio.h>
 #include <linux/falloc.h>
+#include <linux/uio.h>
 
 static const struct file_operations fuse_direct_io_file_operations;
 
@@ -528,6 +528,17 @@ static void fuse_release_user_pages(struct fuse_req *req, int write)
        }
 }
 
+static ssize_t fuse_get_res_by_io(struct fuse_io_priv *io)
+{
+       if (io->err)
+               return io->err;
+
+       if (io->bytes >= 0 && io->write)
+               return -EIO;
+
+       return io->bytes < 0 ? io->size : io->bytes;
+}
+
 /**
  * In case of short read, the caller sets 'pos' to the position of
  * actual end of fuse request in IO request. Otherwise, if bytes_requested
@@ -546,6 +557,7 @@ static void fuse_release_user_pages(struct fuse_req *req, int write)
  */
 static void fuse_aio_complete(struct fuse_io_priv *io, int err, ssize_t pos)
 {
+       bool is_sync = is_sync_kiocb(io->iocb);
        int left;
 
        spin_lock(&io->lock);
@@ -555,30 +567,24 @@ static void fuse_aio_complete(struct fuse_io_priv *io, int err, ssize_t pos)
                io->bytes = pos;
 
        left = --io->reqs;
+       if (!left && is_sync)
+               complete(io->done);
        spin_unlock(&io->lock);
 
-       if (!left) {
-               long res;
+       if (!left && !is_sync) {
+               ssize_t res = fuse_get_res_by_io(io);
 
-               if (io->err)
-                       res = io->err;
-               else if (io->bytes >= 0 && io->write)
-                       res = -EIO;
-               else {
-                       res = io->bytes < 0 ? io->size : io->bytes;
-
-                       if (!is_sync_kiocb(io->iocb)) {
-                               struct inode *inode = file_inode(io->iocb->ki_filp);
-                               struct fuse_conn *fc = get_fuse_conn(inode);
-                               struct fuse_inode *fi = get_fuse_inode(inode);
+               if (res >= 0) {
+                       struct inode *inode = file_inode(io->iocb->ki_filp);
+                       struct fuse_conn *fc = get_fuse_conn(inode);
+                       struct fuse_inode *fi = get_fuse_inode(inode);
 
-                               spin_lock(&fc->lock);
-                               fi->attr_version = ++fc->attr_version;
-                               spin_unlock(&fc->lock);
-                       }
+                       spin_lock(&fc->lock);
+                       fi->attr_version = ++fc->attr_version;
+                       spin_unlock(&fc->lock);
                }
 
-               aio_complete(io->iocb, res, 0);
+               io->iocb->ki_complete(io->iocb, res, 0);
                kfree(io);
        }
 }
@@ -1161,7 +1167,7 @@ static ssize_t fuse_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
        /* We can write back this queue in page reclaim */
        current->backing_dev_info = inode_to_bdi(inode);
 
-       err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
+       err = generic_write_checks(file, &pos, &count);
        if (err)
                goto out;
 
@@ -1395,55 +1401,33 @@ static ssize_t __fuse_direct_read(struct fuse_io_priv *io,
        return res;
 }
 
-static ssize_t fuse_direct_read(struct file *file, char __user *buf,
-                                    size_t count, loff_t *ppos)
+static ssize_t fuse_direct_read_iter(struct kiocb *iocb, struct iov_iter *to)
 {
-       struct fuse_io_priv io = { .async = 0, .file = file };
-       struct iovec iov = { .iov_base = buf, .iov_len = count };
-       struct iov_iter ii;
-       iov_iter_init(&ii, READ, &iov, 1, count);
-       return __fuse_direct_read(&io, &ii, ppos);
-}
-
-static ssize_t __fuse_direct_write(struct fuse_io_priv *io,
-                                  struct iov_iter *iter,
-                                  loff_t *ppos)
-{
-       struct file *file = io->file;
-       struct inode *inode = file_inode(file);
-       size_t count = iov_iter_count(iter);
-       ssize_t res;
-
-
-       res = generic_write_checks(file, ppos, &count, 0);
-       if (!res) {
-               iov_iter_truncate(iter, count);
-               res = fuse_direct_io(io, iter, ppos, FUSE_DIO_WRITE);
-       }
-
-       fuse_invalidate_attr(inode);
-
-       return res;
+       struct fuse_io_priv io = { .async = 0, .file = iocb->ki_filp };
+       return __fuse_direct_read(&io, to, &iocb->ki_pos);
 }
 
-static ssize_t fuse_direct_write(struct file *file, const char __user *buf,
-                                size_t count, loff_t *ppos)
+static ssize_t fuse_direct_write_iter(struct kiocb *iocb, struct iov_iter *from)
 {
-       struct iovec iov = { .iov_base = (void __user *)buf, .iov_len = count };
+       struct file *file = iocb->ki_filp;
        struct inode *inode = file_inode(file);
-       ssize_t res;
        struct fuse_io_priv io = { .async = 0, .file = file };
-       struct iov_iter ii;
-       iov_iter_init(&ii, WRITE, &iov, 1, count);
+       size_t count = iov_iter_count(from);
+       ssize_t res;
 
        if (is_bad_inode(inode))
                return -EIO;
 
        /* Don't allow parallel writes to the same file */
        mutex_lock(&inode->i_mutex);
-       res = __fuse_direct_write(&io, &ii, ppos);
+       res = generic_write_checks(file, &iocb->ki_pos, &count);
+       if (!res) {
+               iov_iter_truncate(from, count);
+               res = fuse_direct_io(&io, from, &iocb->ki_pos, FUSE_DIO_WRITE);
+       }
+       fuse_invalidate_attr(inode);
        if (res > 0)
-               fuse_write_update_size(inode, *ppos);
+               fuse_write_update_size(inode, iocb->ki_pos);
        mutex_unlock(&inode->i_mutex);
 
        return res;
@@ -2798,9 +2782,9 @@ static inline loff_t fuse_round_up(loff_t off)
 }
 
 static ssize_t
-fuse_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter,
-                       loff_t offset)
+fuse_direct_IO(struct kiocb *iocb, struct iov_iter *iter, loff_t offset)
 {
+       DECLARE_COMPLETION_ONSTACK(wait);
        ssize_t ret = 0;
        struct file *file = iocb->ki_filp;
        struct fuse_file *ff = file->private_data;
@@ -2815,15 +2799,15 @@ fuse_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter,
        inode = file->f_mapping->host;
        i_size = i_size_read(inode);
 
-       if ((rw == READ) && (offset > i_size))
+       if ((iov_iter_rw(iter) == READ) && (offset > i_size))
                return 0;
 
        /* optimization for short read */
-       if (async_dio && rw != WRITE && offset + count > i_size) {
+       if (async_dio && iov_iter_rw(iter) != WRITE && offset + count > i_size) {
                if (offset >= i_size)
                        return 0;
-               count = min_t(loff_t, count, fuse_round_up(i_size - offset));
-               iov_iter_truncate(iter, count);
+               iov_iter_truncate(iter, fuse_round_up(i_size - offset));
+               count = iov_iter_count(iter);
        }
 
        io = kmalloc(sizeof(struct fuse_io_priv), GFP_KERNEL);
@@ -2834,7 +2818,7 @@ fuse_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter,
        io->bytes = -1;
        io->size = 0;
        io->offset = offset;
-       io->write = (rw == WRITE);
+       io->write = (iov_iter_rw(iter) == WRITE);
        io->err = 0;
        io->file = file;
        /*
@@ -2849,13 +2833,19 @@ fuse_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter,
         * to wait on real async I/O requests, so we must submit this request
         * synchronously.
         */
-       if (!is_sync_kiocb(iocb) && (offset + count > i_size) && rw == WRITE)
+       if (!is_sync_kiocb(iocb) && (offset + count > i_size) &&
+           iov_iter_rw(iter) == WRITE)
                io->async = false;
 
-       if (rw == WRITE)
-               ret = __fuse_direct_write(io, iter, &pos);
-       else
+       if (io->async && is_sync_kiocb(iocb))
+               io->done = &wait;
+
+       if (iov_iter_rw(iter) == WRITE) {
+               ret = fuse_direct_io(io, iter, &pos, FUSE_DIO_WRITE);
+               fuse_invalidate_attr(inode);
+       } else {
                ret = __fuse_direct_read(io, iter, &pos);
+       }
 
        if (io->async) {
                fuse_aio_complete(io, ret < 0 ? ret : 0, -1);
@@ -2864,12 +2854,13 @@ fuse_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter,
                if (!is_sync_kiocb(iocb))
                        return -EIOCBQUEUED;
 
-               ret = wait_on_sync_kiocb(iocb);
-       } else {
-               kfree(io);
+               wait_for_completion(&wait);
+               ret = fuse_get_res_by_io(io);
        }
 
-       if (rw == WRITE) {
+       kfree(io);
+
+       if (iov_iter_rw(iter) == WRITE) {
                if (ret > 0)
                        fuse_write_update_size(inode, pos);
                else if (ret < 0 && offset + count > i_size)
@@ -2957,9 +2948,7 @@ out:
 
 static const struct file_operations fuse_file_operations = {
        .llseek         = fuse_file_llseek,
-       .read           = new_sync_read,
        .read_iter      = fuse_file_read_iter,
-       .write          = new_sync_write,
        .write_iter     = fuse_file_write_iter,
        .mmap           = fuse_file_mmap,
        .open           = fuse_open,
@@ -2977,8 +2966,8 @@ static const struct file_operations fuse_file_operations = {
 
 static const struct file_operations fuse_direct_io_file_operations = {
        .llseek         = fuse_file_llseek,
-       .read           = fuse_direct_read,
-       .write          = fuse_direct_write,
+       .read_iter      = fuse_direct_read_iter,
+       .write_iter     = fuse_direct_write_iter,
        .mmap           = fuse_direct_mmap,
        .open           = fuse_open,
        .flush          = fuse_flush,
index 1cdfb07c1376b4f4b5633e86fdbdfc4320953de2..7354dc142a50845a62e9a413d82d185afc1f5b0d 100644 (file)
@@ -263,6 +263,7 @@ struct fuse_io_priv {
        int err;
        struct kiocb *iocb;
        struct file *file;
+       struct completion *done;
 };
 
 /**
index 4ad4f94edebe25cc8afa3fa7c4ec35913cb00642..20dd33da92ded309ecfae28f68d2a419da914cb4 100644 (file)
@@ -20,7 +20,7 @@
 #include <linux/swap.h>
 #include <linux/gfs2_ondisk.h>
 #include <linux/backing-dev.h>
-#include <linux/aio.h>
+#include <linux/uio.h>
 #include <trace/events/writeback.h>
 
 #include "gfs2.h"
@@ -1016,13 +1016,12 @@ out:
 /**
  * gfs2_ok_for_dio - check that dio is valid on this file
  * @ip: The inode
- * @rw: READ or WRITE
  * @offset: The offset at which we are reading or writing
  *
  * Returns: 0 (to ignore the i/o request and thus fall back to buffered i/o)
  *          1 (to accept the i/o request)
  */
-static int gfs2_ok_for_dio(struct gfs2_inode *ip, int rw, loff_t offset)
+static int gfs2_ok_for_dio(struct gfs2_inode *ip, loff_t offset)
 {
        /*
         * Should we return an error here? I can't see that O_DIRECT for
@@ -1039,8 +1038,8 @@ static int gfs2_ok_for_dio(struct gfs2_inode *ip, int rw, loff_t offset)
 
 
 
-static ssize_t gfs2_direct_IO(int rw, struct kiocb *iocb,
-                             struct iov_iter *iter, loff_t offset)
+static ssize_t gfs2_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
+                             loff_t offset)
 {
        struct file *file = iocb->ki_filp;
        struct inode *inode = file->f_mapping->host;
@@ -1061,7 +1060,7 @@ static ssize_t gfs2_direct_IO(int rw, struct kiocb *iocb,
        rv = gfs2_glock_nq(&gh);
        if (rv)
                return rv;
-       rv = gfs2_ok_for_dio(ip, rw, offset);
+       rv = gfs2_ok_for_dio(ip, offset);
        if (rv != 1)
                goto out; /* dio not valid, fall back to buffered i/o */
 
@@ -1091,13 +1090,12 @@ static ssize_t gfs2_direct_IO(int rw, struct kiocb *iocb,
                rv = filemap_write_and_wait_range(mapping, lstart, end);
                if (rv)
                        goto out;
-               if (rw == WRITE)
+               if (iov_iter_rw(iter) == WRITE)
                        truncate_inode_pages_range(mapping, lstart, end);
        }
 
-       rv = __blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev,
-                                 iter, offset,
-                                 gfs2_get_block_direct, NULL, NULL, 0);
+       rv = __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev, iter,
+                                 offset, gfs2_get_block_direct, NULL, NULL, 0);
 out:
        gfs2_glock_dq(&gh);
        gfs2_holder_uninit(&gh);
index 3e32bb8e2d7e573df59dc360a1ac38fe4ec759bd..614bb42cb7e1b765ce982ad61c0c51780e93247b 100644 (file)
@@ -25,7 +25,6 @@
 #include <asm/uaccess.h>
 #include <linux/dlm.h>
 #include <linux/dlm_plock.h>
-#include <linux/aio.h>
 #include <linux/delay.h>
 
 #include "gfs2.h"
@@ -1065,9 +1064,7 @@ static int gfs2_flock(struct file *file, int cmd, struct file_lock *fl)
 
 const struct file_operations gfs2_file_fops = {
        .llseek         = gfs2_llseek,
-       .read           = new_sync_read,
        .read_iter      = generic_file_read_iter,
-       .write          = new_sync_write,
        .write_iter     = gfs2_file_write_iter,
        .unlocked_ioctl = gfs2_ioctl,
        .mmap           = gfs2_mmap,
@@ -1097,9 +1094,7 @@ const struct file_operations gfs2_dir_fops = {
 
 const struct file_operations gfs2_file_fops_nolock = {
        .llseek         = gfs2_llseek,
-       .read           = new_sync_read,
        .read_iter      = generic_file_read_iter,
-       .write          = new_sync_write,
        .write_iter     = gfs2_file_write_iter,
        .unlocked_ioctl = gfs2_ioctl,
        .mmap           = gfs2_mmap,
index d0929bc817826e012cc829bb0f021832eea24379..75fd5d873c196dfd76006e77ef25405c9359c224 100644 (file)
@@ -14,7 +14,7 @@
 #include <linux/pagemap.h>
 #include <linux/mpage.h>
 #include <linux/sched.h>
-#include <linux/aio.h>
+#include <linux/uio.h>
 
 #include "hfs_fs.h"
 #include "btree.h"
@@ -124,8 +124,8 @@ static int hfs_releasepage(struct page *page, gfp_t mask)
        return res ? try_to_free_buffers(page) : 0;
 }
 
-static ssize_t hfs_direct_IO(int rw, struct kiocb *iocb,
-               struct iov_iter *iter, loff_t offset)
+static ssize_t hfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
+                            loff_t offset)
 {
        struct file *file = iocb->ki_filp;
        struct address_space *mapping = file->f_mapping;
@@ -133,13 +133,13 @@ static ssize_t hfs_direct_IO(int rw, struct kiocb *iocb,
        size_t count = iov_iter_count(iter);
        ssize_t ret;
 
-       ret = blockdev_direct_IO(rw, iocb, inode, iter, offset, hfs_get_block);
+       ret = blockdev_direct_IO(iocb, inode, iter, offset, hfs_get_block);
 
        /*
         * In case of error extending write may have instantiated a few
         * blocks outside i_size. Trim these off again.
         */
-       if (unlikely((rw & WRITE) && ret < 0)) {
+       if (unlikely(iov_iter_rw(iter) == WRITE && ret < 0)) {
                loff_t isize = i_size_read(inode);
                loff_t end = offset + count;
 
@@ -674,9 +674,7 @@ static int hfs_file_fsync(struct file *filp, loff_t start, loff_t end,
 
 static const struct file_operations hfs_file_operations = {
        .llseek         = generic_file_llseek,
-       .read           = new_sync_read,
        .read_iter      = generic_file_read_iter,
-       .write          = new_sync_write,
        .write_iter     = generic_file_write_iter,
        .mmap           = generic_file_mmap,
        .splice_read    = generic_file_splice_read,
index 0cf786f2d046f9fbae9b110a2a2d212c008fb3aa..a43811f909353354ca9bebd3e5d13212d30ae505 100644 (file)
@@ -14,7 +14,7 @@
 #include <linux/pagemap.h>
 #include <linux/mpage.h>
 #include <linux/sched.h>
-#include <linux/aio.h>
+#include <linux/uio.h>
 
 #include "hfsplus_fs.h"
 #include "hfsplus_raw.h"
@@ -122,8 +122,8 @@ static int hfsplus_releasepage(struct page *page, gfp_t mask)
        return res ? try_to_free_buffers(page) : 0;
 }
 
-static ssize_t hfsplus_direct_IO(int rw, struct kiocb *iocb,
-               struct iov_iter *iter, loff_t offset)
+static ssize_t hfsplus_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
+                                loff_t offset)
 {
        struct file *file = iocb->ki_filp;
        struct address_space *mapping = file->f_mapping;
@@ -131,14 +131,13 @@ static ssize_t hfsplus_direct_IO(int rw, struct kiocb *iocb,
        size_t count = iov_iter_count(iter);
        ssize_t ret;
 
-       ret = blockdev_direct_IO(rw, iocb, inode, iter, offset, 
-                                hfsplus_get_block);
+       ret = blockdev_direct_IO(iocb, inode, iter, offset, hfsplus_get_block);
 
        /*
         * In case of error extending write may have instantiated a few
         * blocks outside i_size. Trim these off again.
         */
-       if (unlikely((rw & WRITE) && ret < 0)) {
+       if (unlikely(iov_iter_rw(iter) == WRITE && ret < 0)) {
                loff_t isize = i_size_read(inode);
                loff_t end = offset + count;
 
@@ -341,9 +340,7 @@ static const struct inode_operations hfsplus_file_inode_operations = {
 
 static const struct file_operations hfsplus_file_operations = {
        .llseek         = generic_file_llseek,
-       .read           = new_sync_read,
        .read_iter      = generic_file_read_iter,
-       .write          = new_sync_write,
        .write_iter     = generic_file_write_iter,
        .mmap           = generic_file_mmap,
        .splice_read    = generic_file_splice_read,
index fd62cae0fdcb66db03712d419c25014312112546..e021188ca110aa9561be325c22724c75034d21f9 100644 (file)
@@ -378,11 +378,9 @@ static int hostfs_fsync(struct file *file, loff_t start, loff_t end,
 
 static const struct file_operations hostfs_file_fops = {
        .llseek         = generic_file_llseek,
-       .read           = new_sync_read,
        .splice_read    = generic_file_splice_read,
        .read_iter      = generic_file_read_iter,
        .write_iter     = generic_file_write_iter,
-       .write          = new_sync_write,
        .mmap           = generic_file_mmap,
        .open           = hostfs_file_open,
        .release        = hostfs_file_release,
index 7f54e5f76cececd4bf76354edb13eb246a81df78..6d8cfe9b52d611b7dadbfa1a38ec7cf3c3b3671c 100644 (file)
@@ -197,9 +197,7 @@ const struct address_space_operations hpfs_aops = {
 const struct file_operations hpfs_file_ops =
 {
        .llseek         = generic_file_llseek,
-       .read           = new_sync_read,
        .read_iter      = generic_file_read_iter,
-       .write          = new_sync_write,
        .write_iter     = generic_file_write_iter,
        .mmap           = generic_file_mmap,
        .release        = hpfs_file_release,
index c274aca8e8dc231cb4473b964bc3315058910968..8c2dad629e7c094d5808a0335b30e8f0b3abf920 100644 (file)
@@ -34,6 +34,7 @@
 #include <linux/security.h>
 #include <linux/magic.h>
 #include <linux/migrate.h>
+#include <linux/uio.h>
 
 #include <asm/uaccess.h>
 
@@ -179,42 +180,33 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
 }
 #endif
 
-static int
+static size_t
 hugetlbfs_read_actor(struct page *page, unsigned long offset,
-                       char __user *buf, unsigned long count,
-                       unsigned long size)
+                       struct iov_iter *to, unsigned long size)
 {
-       char *kaddr;
-       unsigned long left, copied = 0;
+       size_t copied = 0;
        int i, chunksize;
 
-       if (size > count)
-               size = count;
-
        /* Find which 4k chunk and offset with in that chunk */
        i = offset >> PAGE_CACHE_SHIFT;
        offset = offset & ~PAGE_CACHE_MASK;
 
        while (size) {
+               size_t n;
                chunksize = PAGE_CACHE_SIZE;
                if (offset)
                        chunksize -= offset;
                if (chunksize > size)
                        chunksize = size;
-               kaddr = kmap(&page[i]);
-               left = __copy_to_user(buf, kaddr + offset, chunksize);
-               kunmap(&page[i]);
-               if (left) {
-                       copied += (chunksize - left);
-                       break;
-               }
+               n = copy_page_to_iter(&page[i], offset, chunksize, to);
+               copied += n;
+               if (n != chunksize)
+                       return copied;
                offset = 0;
                size -= chunksize;
-               buf += chunksize;
-               copied += chunksize;
                i++;
        }
-       return copied ? copied : -EFAULT;
+       return copied;
 }
 
 /*
@@ -222,39 +214,34 @@ hugetlbfs_read_actor(struct page *page, unsigned long offset,
  * data. Its *very* similar to do_generic_mapping_read(), we can't use that
  * since it has PAGE_CACHE_SIZE assumptions.
  */
-static ssize_t hugetlbfs_read(struct file *filp, char __user *buf,
-                             size_t len, loff_t *ppos)
+static ssize_t hugetlbfs_read_iter(struct kiocb *iocb, struct iov_iter *to)
 {
-       struct hstate *h = hstate_file(filp);
-       struct address_space *mapping = filp->f_mapping;
+       struct file *file = iocb->ki_filp;
+       struct hstate *h = hstate_file(file);
+       struct address_space *mapping = file->f_mapping;
        struct inode *inode = mapping->host;
-       unsigned long index = *ppos >> huge_page_shift(h);
-       unsigned long offset = *ppos & ~huge_page_mask(h);
+       unsigned long index = iocb->ki_pos >> huge_page_shift(h);
+       unsigned long offset = iocb->ki_pos & ~huge_page_mask(h);
        unsigned long end_index;
        loff_t isize;
        ssize_t retval = 0;
 
-       /* validate length */
-       if (len == 0)
-               goto out;
-
-       for (;;) {
+       while (iov_iter_count(to)) {
                struct page *page;
-               unsigned long nr, ret;
-               int ra;
+               size_t nr, copied;
 
                /* nr is the maximum number of bytes to copy from this page */
                nr = huge_page_size(h);
                isize = i_size_read(inode);
                if (!isize)
-                       goto out;
+                       break;
                end_index = (isize - 1) >> huge_page_shift(h);
-               if (index >= end_index) {
-                       if (index > end_index)
-                               goto out;
+               if (index > end_index)
+                       break;
+               if (index == end_index) {
                        nr = ((isize - 1) & ~huge_page_mask(h)) + 1;
                        if (nr <= offset)
-                               goto out;
+                               break;
                }
                nr = nr - offset;
 
@@ -265,39 +252,27 @@ static ssize_t hugetlbfs_read(struct file *filp, char __user *buf,
                         * We have a HOLE, zero out the user-buffer for the
                         * length of the hole or request.
                         */
-                       ret = len < nr ? len : nr;
-                       if (clear_user(buf, ret))
-                               ra = -EFAULT;
-                       else
-                               ra = 0;
+                       copied = iov_iter_zero(nr, to);
                } else {
                        unlock_page(page);
 
                        /*
                         * We have the page, copy it to user space buffer.
                         */
-                       ra = hugetlbfs_read_actor(page, offset, buf, len, nr);
-                       ret = ra;
+                       copied = hugetlbfs_read_actor(page, offset, to, nr);
                        page_cache_release(page);
                }
-               if (ra < 0) {
-                       if (retval == 0)
-                               retval = ra;
-                       goto out;
+               offset += copied;
+               retval += copied;
+               if (copied != nr && iov_iter_count(to)) {
+                       if (!retval)
+                               retval = -EFAULT;
+                       break;
                }
-
-               offset += ret;
-               retval += ret;
-               len -= ret;
                index += offset >> huge_page_shift(h);
                offset &= ~huge_page_mask(h);
-
-               /* short read or no more work */
-               if ((ret != nr) || (len == 0))
-                       break;
        }
-out:
-       *ppos = ((loff_t)index << huge_page_shift(h)) + offset;
+       iocb->ki_pos = ((loff_t)index << huge_page_shift(h)) + offset;
        return retval;
 }
 
@@ -721,7 +696,7 @@ static void init_once(void *foo)
 }
 
 const struct file_operations hugetlbfs_file_operations = {
-       .read                   = hugetlbfs_read,
+       .read_iter              = hugetlbfs_read_iter,
        .mmap                   = hugetlbfs_file_mmap,
        .fsync                  = noop_fsync,
        .get_unmapped_area      = hugetlb_get_unmapped_area,
index 64989ca9ba90b71e3a8d16ff90dda67a913cc988..f509f62e12f6ef85e95b5c159353cba757ef4af1 100644 (file)
@@ -51,9 +51,7 @@ const struct file_operations jffs2_file_operations =
 {
        .llseek =       generic_file_llseek,
        .open =         generic_file_open,
-       .read =         new_sync_read,
        .read_iter =    generic_file_read_iter,
-       .write =        new_sync_write,
        .write_iter =   generic_file_write_iter,
        .unlocked_ioctl=jffs2_ioctl,
        .mmap =         generic_file_readonly_mmap,
index 10815f8dfd8b28c31e045d62d0351013ca442a03..ae46788b97231efecb7eaf7d7c0a0b9c779349ef 100644 (file)
@@ -151,8 +151,6 @@ const struct inode_operations jfs_file_inode_operations = {
 const struct file_operations jfs_file_operations = {
        .open           = jfs_open,
        .llseek         = generic_file_llseek,
-       .write          = new_sync_write,
-       .read           = new_sync_read,
        .read_iter      = generic_file_read_iter,
        .write_iter     = generic_file_write_iter,
        .mmap           = generic_file_mmap,
index bd3df1ca3c9b7f955571c056f86f98e97beda7b9..070dc4b335449423091e67dd74c0f1c34617b041 100644 (file)
@@ -22,8 +22,8 @@
 #include <linux/buffer_head.h>
 #include <linux/pagemap.h>
 #include <linux/quotaops.h>
+#include <linux/uio.h>
 #include <linux/writeback.h>
-#include <linux/aio.h>
 #include "jfs_incore.h"
 #include "jfs_inode.h"
 #include "jfs_filsys.h"
@@ -330,8 +330,8 @@ static sector_t jfs_bmap(struct address_space *mapping, sector_t block)
        return generic_block_bmap(mapping, block, jfs_get_block);
 }
 
-static ssize_t jfs_direct_IO(int rw, struct kiocb *iocb,
-       struct iov_iter *iter, loff_t offset)
+static ssize_t jfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
+                            loff_t offset)
 {
        struct file *file = iocb->ki_filp;
        struct address_space *mapping = file->f_mapping;
@@ -339,13 +339,13 @@ static ssize_t jfs_direct_IO(int rw, struct kiocb *iocb,
        size_t count = iov_iter_count(iter);
        ssize_t ret;
 
-       ret = blockdev_direct_IO(rw, iocb, inode, iter, offset, jfs_get_block);
+       ret = blockdev_direct_IO(iocb, inode, iter, offset, jfs_get_block);
 
        /*
         * In case of error extending write may have instantiated a few
         * blocks outside i_size. Trim these off again.
         */
-       if (unlikely((rw & WRITE) && ret < 0)) {
+       if (unlikely(iov_iter_rw(iter) == WRITE && ret < 0)) {
                loff_t isize = i_size_read(inode);
                loff_t end = offset + count;
 
index 8538752df2f6a7dbb3dad0119e4e8bc47f4a25f4..b2c13f739ffa27b8b8a249cadcd025737a512ce6 100644 (file)
@@ -271,8 +271,6 @@ const struct file_operations logfs_reg_fops = {
        .llseek         = generic_file_llseek,
        .mmap           = generic_file_readonly_mmap,
        .open           = generic_file_open,
-       .read           = new_sync_read,
-       .write          = new_sync_write,
 };
 
 const struct address_space_operations logfs_reg_aops = {
index a967de085ac0f4cf7193101cd4e54a08bb4fff50..6d63e27ec961c6ed03b9bbcd46414d4436e065dd 100644 (file)
@@ -14,9 +14,7 @@
  */
 const struct file_operations minix_file_operations = {
        .llseek         = generic_file_llseek,
-       .read           = new_sync_read,
        .read_iter      = generic_file_read_iter,
-       .write          = new_sync_write,
        .write_iter     = generic_file_write_iter,
        .mmap           = generic_file_mmap,
        .fsync          = generic_file_fsync,
index c83145af4bfc0ea9bb159002e3545e8a8cd65157..76fb76a0818bc274fc67b2d87b582db6690d62a6 100644 (file)
  * PATH_MAX includes the nul terminator --RR.
  */
 
-#define EMBEDDED_NAME_MAX      (PATH_MAX - sizeof(struct filename))
+#define EMBEDDED_NAME_MAX      (PATH_MAX - offsetof(struct filename, iname))
 
 struct filename *
 getname_flags(const char __user *filename, int flags, int *empty)
 {
-       struct filename *result, *err;
-       int len;
-       long max;
+       struct filename *result;
        char *kname;
+       int len;
 
        result = audit_reusename(filename);
        if (result)
@@ -136,22 +135,18 @@ getname_flags(const char __user *filename, int flags, int *empty)
        result = __getname();
        if (unlikely(!result))
                return ERR_PTR(-ENOMEM);
-       result->refcnt = 1;
 
        /*
         * First, try to embed the struct filename inside the names_cache
         * allocation
         */
-       kname = (char *)result + sizeof(*result);
+       kname = (char *)result->iname;
        result->name = kname;
-       result->separate = false;
-       max = EMBEDDED_NAME_MAX;
 
-recopy:
-       len = strncpy_from_user(kname, filename, max);
+       len = strncpy_from_user(kname, filename, EMBEDDED_NAME_MAX);
        if (unlikely(len < 0)) {
-               err = ERR_PTR(len);
-               goto error;
+               __putname(result);
+               return ERR_PTR(len);
        }
 
        /*
@@ -160,43 +155,49 @@ recopy:
         * names_cache allocation for the pathname, and re-do the copy from
         * userland.
         */
-       if (len == EMBEDDED_NAME_MAX && max == EMBEDDED_NAME_MAX) {
+       if (unlikely(len == EMBEDDED_NAME_MAX)) {
+               const size_t size = offsetof(struct filename, iname[1]);
                kname = (char *)result;
 
-               result = kzalloc(sizeof(*result), GFP_KERNEL);
-               if (!result) {
-                       err = ERR_PTR(-ENOMEM);
-                       result = (struct filename *)kname;
-                       goto error;
+               /*
+                * size is chosen that way we to guarantee that
+                * result->iname[0] is within the same object and that
+                * kname can't be equal to result->iname, no matter what.
+                */
+               result = kzalloc(size, GFP_KERNEL);
+               if (unlikely(!result)) {
+                       __putname(kname);
+                       return ERR_PTR(-ENOMEM);
                }
                result->name = kname;
-               result->separate = true;
-               result->refcnt = 1;
-               max = PATH_MAX;
-               goto recopy;
+               len = strncpy_from_user(kname, filename, PATH_MAX);
+               if (unlikely(len < 0)) {
+                       __putname(kname);
+                       kfree(result);
+                       return ERR_PTR(len);
+               }
+               if (unlikely(len == PATH_MAX)) {
+                       __putname(kname);
+                       kfree(result);
+                       return ERR_PTR(-ENAMETOOLONG);
+               }
        }
 
+       result->refcnt = 1;
        /* The empty path is special. */
        if (unlikely(!len)) {
                if (empty)
                        *empty = 1;
-               err = ERR_PTR(-ENOENT);
-               if (!(flags & LOOKUP_EMPTY))
-                       goto error;
+               if (!(flags & LOOKUP_EMPTY)) {
+                       putname(result);
+                       return ERR_PTR(-ENOENT);
+               }
        }
 
-       err = ERR_PTR(-ENAMETOOLONG);
-       if (unlikely(len >= PATH_MAX))
-               goto error;
-
        result->uptr = filename;
        result->aname = NULL;
        audit_getname(result);
        return result;
-
-error:
-       putname(result);
-       return err;
 }
 
 struct filename *
@@ -216,8 +217,7 @@ getname_kernel(const char * filename)
                return ERR_PTR(-ENOMEM);
 
        if (len <= EMBEDDED_NAME_MAX) {
-               result->name = (char *)(result) + sizeof(*result);
-               result->separate = false;
+               result->name = (char *)result->iname;
        } else if (len <= PATH_MAX) {
                struct filename *tmp;
 
@@ -227,7 +227,6 @@ getname_kernel(const char * filename)
                        return ERR_PTR(-ENOMEM);
                }
                tmp->name = (char *)result;
-               tmp->separate = true;
                result = tmp;
        } else {
                __putname(result);
@@ -249,7 +248,7 @@ void putname(struct filename *name)
        if (--name->refcnt > 0)
                return;
 
-       if (name->separate) {
+       if (name->name != name->iname) {
                __putname(name->name);
                kfree(name);
        } else
@@ -1851,10 +1850,11 @@ static int link_path_walk(const char *name, struct nameidata *nd)
        return err;
 }
 
-static int path_init(int dfd, const char *name, unsigned int flags,
+static int path_init(int dfd, const struct filename *name, unsigned int flags,
                     struct nameidata *nd)
 {
        int retval = 0;
+       const char *s = name->name;
 
        nd->last_type = LAST_ROOT; /* if there are only slashes... */
        nd->flags = flags | LOOKUP_JUMPED | LOOKUP_PARENT;
@@ -1863,7 +1863,7 @@ static int path_init(int dfd, const char *name, unsigned int flags,
        if (flags & LOOKUP_ROOT) {
                struct dentry *root = nd->root.dentry;
                struct inode *inode = root->d_inode;
-               if (*name) {
+               if (*s) {
                        if (!d_can_lookup(root))
                                return -ENOTDIR;
                        retval = inode_permission(inode, MAY_EXEC);
@@ -1885,7 +1885,7 @@ static int path_init(int dfd, const char *name, unsigned int flags,
        nd->root.mnt = NULL;
 
        nd->m_seq = read_seqbegin(&mount_lock);
-       if (*name=='/') {
+       if (*s == '/') {
                if (flags & LOOKUP_RCU) {
                        rcu_read_lock();
                        nd->seq = set_root_rcu(nd);
@@ -1919,7 +1919,7 @@ static int path_init(int dfd, const char *name, unsigned int flags,
 
                dentry = f.file->f_path.dentry;
 
-               if (*name) {
+               if (*s) {
                        if (!d_can_lookup(dentry)) {
                                fdput(f);
                                return -ENOTDIR;
@@ -1949,7 +1949,7 @@ static int path_init(int dfd, const char *name, unsigned int flags,
        return -ECHILD;
 done:
        current->total_link_count = 0;
-       return link_path_walk(name, nd);
+       return link_path_walk(s, nd);
 }
 
 static void path_cleanup(struct nameidata *nd)
@@ -1972,7 +1972,7 @@ static inline int lookup_last(struct nameidata *nd, struct path *path)
 }
 
 /* Returns 0 and nd will be valid on success; Retuns error, otherwise. */
-static int path_lookupat(int dfd, const char *name,
+static int path_lookupat(int dfd, const struct filename *name,
                                unsigned int flags, struct nameidata *nd)
 {
        struct path path;
@@ -2027,31 +2027,17 @@ static int path_lookupat(int dfd, const char *name,
 static int filename_lookup(int dfd, struct filename *name,
                                unsigned int flags, struct nameidata *nd)
 {
-       int retval = path_lookupat(dfd, name->name, flags | LOOKUP_RCU, nd);
+       int retval = path_lookupat(dfd, name, flags | LOOKUP_RCU, nd);
        if (unlikely(retval == -ECHILD))
-               retval = path_lookupat(dfd, name->name, flags, nd);
+               retval = path_lookupat(dfd, name, flags, nd);
        if (unlikely(retval == -ESTALE))
-               retval = path_lookupat(dfd, name->name,
-                                               flags | LOOKUP_REVAL, nd);
+               retval = path_lookupat(dfd, name, flags | LOOKUP_REVAL, nd);
 
        if (likely(!retval))
                audit_inode(name, nd->path.dentry, flags & LOOKUP_PARENT);
        return retval;
 }
 
-static int do_path_lookup(int dfd, const char *name,
-                               unsigned int flags, struct nameidata *nd)
-{
-       struct filename *filename = getname_kernel(name);
-       int retval = PTR_ERR(filename);
-
-       if (!IS_ERR(filename)) {
-               retval = filename_lookup(dfd, filename, flags, nd);
-               putname(filename);
-       }
-       return retval;
-}
-
 /* does lookup, returns the object with parent locked */
 struct dentry *kern_path_locked(const char *name, struct path *path)
 {
@@ -2089,9 +2075,15 @@ out:
 int kern_path(const char *name, unsigned int flags, struct path *path)
 {
        struct nameidata nd;
-       int res = do_path_lookup(AT_FDCWD, name, flags, &nd);
-       if (!res)
-               *path = nd.path;
+       struct filename *filename = getname_kernel(name);
+       int res = PTR_ERR(filename);
+
+       if (!IS_ERR(filename)) {
+               res = filename_lookup(AT_FDCWD, filename, flags, &nd);
+               putname(filename);
+               if (!res)
+                       *path = nd.path;
+       }
        return res;
 }
 EXPORT_SYMBOL(kern_path);
@@ -2108,15 +2100,22 @@ int vfs_path_lookup(struct dentry *dentry, struct vfsmount *mnt,
                    const char *name, unsigned int flags,
                    struct path *path)
 {
-       struct nameidata nd;
-       int err;
-       nd.root.dentry = dentry;
-       nd.root.mnt = mnt;
+       struct filename *filename = getname_kernel(name);
+       int err = PTR_ERR(filename);
+
        BUG_ON(flags & LOOKUP_PARENT);
-       /* the first argument of do_path_lookup() is ignored with LOOKUP_ROOT */
-       err = do_path_lookup(AT_FDCWD, name, flags | LOOKUP_ROOT, &nd);
-       if (!err)
-               *path = nd.path;
+
+       /* the first argument of filename_lookup() is ignored with LOOKUP_ROOT */
+       if (!IS_ERR(filename)) {
+               struct nameidata nd;
+               nd.root.dentry = dentry;
+               nd.root.mnt = mnt;
+               err = filename_lookup(AT_FDCWD, filename,
+                                     flags | LOOKUP_ROOT, &nd);
+               if (!err)
+                       *path = nd.path;
+               putname(filename);
+       }
        return err;
 }
 EXPORT_SYMBOL(vfs_path_lookup);
@@ -2138,9 +2137,7 @@ static struct dentry *lookup_hash(struct nameidata *nd)
  * @len:       maximum length @len should be interpreted to
  *
  * Note that this routine is purely a helper for filesystem usage and should
- * not be called by generic code.  Also note that by using this function the
- * nameidata argument is passed to the filesystem methods and a filesystem
- * using this helper needs to be prepared for that.
+ * not be called by generic code.
  */
 struct dentry *lookup_one_len(const char *name, struct dentry *base, int len)
 {
@@ -2341,7 +2338,8 @@ out:
  * Returns 0 and "path" will be valid on success; Returns error otherwise.
  */
 static int
-path_mountpoint(int dfd, const char *name, struct path *path, unsigned int flags)
+path_mountpoint(int dfd, const struct filename *name, struct path *path,
+               unsigned int flags)
 {
        struct nameidata nd;
        int err;
@@ -2370,20 +2368,20 @@ out:
 }
 
 static int
-filename_mountpoint(int dfd, struct filename *s, struct path *path,
+filename_mountpoint(int dfd, struct filename *name, struct path *path,
                        unsigned int flags)
 {
        int error;
-       if (IS_ERR(s))
-               return PTR_ERR(s);
-       error = path_mountpoint(dfd, s->name, path, flags | LOOKUP_RCU);
+       if (IS_ERR(name))
+               return PTR_ERR(name);
+       error = path_mountpoint(dfd, name, path, flags | LOOKUP_RCU);
        if (unlikely(error == -ECHILD))
-               error = path_mountpoint(dfd, s->name, path, flags);
+               error = path_mountpoint(dfd, name, path, flags);
        if (unlikely(error == -ESTALE))
-               error = path_mountpoint(dfd, s->name, path, flags | LOOKUP_REVAL);
+               error = path_mountpoint(dfd, name, path, flags | LOOKUP_REVAL);
        if (likely(!error))
-               audit_inode(s, path->dentry, 0);
-       putname(s);
+               audit_inode(name, path->dentry, 0);
+       putname(name);
        return error;
 }
 
@@ -3156,7 +3154,7 @@ static int do_tmpfile(int dfd, struct filename *pathname,
        static const struct qstr name = QSTR_INIT("/", 1);
        struct dentry *dentry, *child;
        struct inode *dir;
-       int error = path_lookupat(dfd, pathname->name,
+       int error = path_lookupat(dfd, pathname,
                                  flags | LOOKUP_DIRECTORY, nd);
        if (unlikely(error))
                return error;
@@ -3229,7 +3227,7 @@ static struct file *path_openat(int dfd, struct filename *pathname,
                goto out;
        }
 
-       error = path_init(dfd, pathname->name, flags, nd);
+       error = path_init(dfd, pathname, flags, nd);
        if (unlikely(error))
                goto out;
 
index 1dd7007f974dd97fb0df6d036f85b1f0f9950842..ab6363b165565bd930e0edb84439eaf9b4cf9e5f 100644 (file)
@@ -98,30 +98,24 @@ out:
 }
 
 static ssize_t
-ncp_file_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
+ncp_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
 {
+       struct file *file = iocb->ki_filp;
        struct inode *inode = file_inode(file);
        size_t already_read = 0;
-       off_t pos;
+       off_t pos = iocb->ki_pos;
        size_t bufsize;
        int error;
-       voidfreepage;
+       void *freepage;
        size_t freelen;
 
        ncp_dbg(1, "enter %pD2\n", file);
 
-       pos = *ppos;
-
-       if ((ssize_t) count < 0) {
-               return -EINVAL;
-       }
-       if (!count)
+       if (!iov_iter_count(to))
                return 0;
        if (pos > inode->i_sb->s_maxbytes)
                return 0;
-       if (pos + count > inode->i_sb->s_maxbytes) {
-               count = inode->i_sb->s_maxbytes - pos;
-       }
+       iov_iter_truncate(to, inode->i_sb->s_maxbytes - pos);
 
        error = ncp_make_open(inode, O_RDONLY);
        if (error) {
@@ -138,31 +132,29 @@ ncp_file_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
                goto outrel;
        error = 0;
        /* First read in as much as possible for each bufsize. */
-       while (already_read < count) {
+       while (iov_iter_count(to)) {
                int read_this_time;
-               size_t to_read = min_t(unsigned int,
+               size_t to_read = min_t(size_t,
                                     bufsize - (pos % bufsize),
-                                    count - already_read);
+                                    iov_iter_count(to));
 
                error = ncp_read_bounce(NCP_SERVER(inode),
                                NCP_FINFO(inode)->file_handle,
-                               pos, to_read, buf, &read_this_time, 
+                               pos, to_read, to, &read_this_time, 
                                freepage, freelen);
                if (error) {
                        error = -EIO;   /* NW errno -> Linux errno */
                        break;
                }
                pos += read_this_time;
-               buf += read_this_time;
                already_read += read_this_time;
 
-               if (read_this_time != to_read) {
+               if (read_this_time != to_read)
                        break;
-               }
        }
        vfree(freepage);
 
-       *ppos = pos;
+       iocb->ki_pos = pos;
 
        file_accessed(file);
 
@@ -173,42 +165,26 @@ outrel:
 }
 
 static ssize_t
-ncp_file_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
+ncp_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
 {
+       struct file *file = iocb->ki_filp;
        struct inode *inode = file_inode(file);
        size_t already_written = 0;
-       off_t pos;
+       loff_t pos = iocb->ki_pos;
+       size_t count = iov_iter_count(from);
        size_t bufsize;
        int errno;
-       voidbouncebuffer;
+       void *bouncebuffer;
 
        ncp_dbg(1, "enter %pD2\n", file);
-       if ((ssize_t) count < 0)
-               return -EINVAL;
-       pos = *ppos;
-       if (file->f_flags & O_APPEND) {
-               pos = i_size_read(inode);
-       }
-
-       if (pos + count > MAX_NON_LFS && !(file->f_flags&O_LARGEFILE)) {
-               if (pos >= MAX_NON_LFS) {
-                       return -EFBIG;
-               }
-               if (count > MAX_NON_LFS - (u32)pos) {
-                       count = MAX_NON_LFS - (u32)pos;
-               }
-       }
-       if (pos >= inode->i_sb->s_maxbytes) {
-               if (count || pos > inode->i_sb->s_maxbytes) {
-                       return -EFBIG;
-               }
-       }
-       if (pos + count > inode->i_sb->s_maxbytes) {
-               count = inode->i_sb->s_maxbytes - pos;
-       }
+       errno = generic_write_checks(file, &pos, &count);
+       if (errno)
+               return errno;
+       iov_iter_truncate(from, count);
        
        if (!count)
                return 0;
+
        errno = ncp_make_open(inode, O_WRONLY);
        if (errno) {
                ncp_dbg(1, "open failed, error=%d\n", errno);
@@ -216,8 +192,6 @@ ncp_file_write(struct file *file, const char __user *buf, size_t count, loff_t *
        }
        bufsize = NCP_SERVER(inode)->buffer_size;
 
-       already_written = 0;
-
        errno = file_update_time(file);
        if (errno)
                goto outrel;
@@ -227,13 +201,13 @@ ncp_file_write(struct file *file, const char __user *buf, size_t count, loff_t *
                errno = -EIO;   /* -ENOMEM */
                goto outrel;
        }
-       while (already_written < count) {
+       while (iov_iter_count(from)) {
                int written_this_time;
-               size_t to_write = min_t(unsigned int,
-                                     bufsize - (pos % bufsize),
-                                     count - already_written);
+               size_t to_write = min_t(size_t,
+                                     bufsize - ((off_t)pos % bufsize),
+                                     iov_iter_count(from));
 
-               if (copy_from_user(bouncebuffer, buf, to_write)) {
+               if (copy_from_iter(bouncebuffer, to_write, from) != to_write) {
                        errno = -EFAULT;
                        break;
                }
@@ -244,16 +218,14 @@ ncp_file_write(struct file *file, const char __user *buf, size_t count, loff_t *
                        break;
                }
                pos += written_this_time;
-               buf += written_this_time;
                already_written += written_this_time;
 
-               if (written_this_time != to_write) {
+               if (written_this_time != to_write)
                        break;
-               }
        }
        vfree(bouncebuffer);
 
-       *ppos = pos;
+       iocb->ki_pos = pos;
 
        if (pos > i_size_read(inode)) {
                mutex_lock(&inode->i_mutex);
@@ -277,8 +249,8 @@ static int ncp_release(struct inode *inode, struct file *file) {
 const struct file_operations ncp_file_operations =
 {
        .llseek         = generic_file_llseek,
-       .read           = ncp_file_read,
-       .write          = ncp_file_write,
+       .read_iter      = ncp_file_read_iter,
+       .write_iter     = ncp_file_write_iter,
        .unlocked_ioctl = ncp_ioctl,
 #ifdef CONFIG_COMPAT
        .compat_ioctl   = ncp_compat_ioctl,
index 482387532f547fbc0e61def87d85c1dfb4b71ebc..2b502a0d79413ef2c01069b8dd228da14ecc354c 100644 (file)
@@ -1001,8 +1001,8 @@ out:
  */
 int
 ncp_read_bounce(struct ncp_server *server, const char *file_id,
-        __u32 offset, __u16 to_read, char __user *target, int *bytes_read,
-        void* bounce, __u32 bufsize)
+        __u32 offset, __u16 to_read, struct iov_iter *to,
+        int *bytes_read, void *bounce, __u32 bufsize)
 {
        int result;
 
@@ -1025,7 +1025,7 @@ ncp_read_bounce(struct ncp_server *server, const char *file_id,
                                 (offset & 1);
                        *bytes_read = len;
                        result = 0;
-                       if (copy_to_user(target, source, len))
+                       if (copy_to_iter(source, len, to) != len)
                                result = -EFAULT;
                }
        }
index 250e443a07f32ec8188aa4f98db5bbeadcf59f19..5233fbc1747a5bf555da56c04917cc418c4390ed 100644 (file)
@@ -53,7 +53,7 @@ static inline int ncp_read_bounce_size(__u32 size) {
        return sizeof(struct ncp_reply_header) + 2 + 2 + size + 8;
 };
 int ncp_read_bounce(struct ncp_server *, const char *, __u32, __u16, 
-               char __user *, int *, void* bounce, __u32 bouncelen);
+               struct iov_iter *, int *, void *bounce, __u32 bouncelen);
 int ncp_read_kernel(struct ncp_server *, const char *, __u32, __u16, 
                char *, int *);
 int ncp_write_kernel(struct ncp_server *, const char *, __u32, __u16,
index e907c8cf732e3cff6bc9711ccf0b20c9261cdca2..5ddd77acb3f7434b74b733887e617bee346ff66b 100644 (file)
@@ -240,7 +240,6 @@ static int nfs_direct_cmp_commit_data_verf(struct nfs_direct_req *dreq,
 
 /**
  * nfs_direct_IO - NFS address space operation for direct I/O
- * @rw: direction (read or write)
  * @iocb: target I/O control block
  * @iov: array of vectors that define I/O buffer
  * @pos: offset in file to begin the operation
@@ -251,7 +250,7 @@ static int nfs_direct_cmp_commit_data_verf(struct nfs_direct_req *dreq,
  * shunt off direct read and write requests before the VFS gets them,
  * so this method is only ever called for swap.
  */
-ssize_t nfs_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter, loff_t pos)
+ssize_t nfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter, loff_t pos)
 {
        struct inode *inode = iocb->ki_filp->f_mapping->host;
 
@@ -265,9 +264,9 @@ ssize_t nfs_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter, loff_t
 
        return -EINVAL;
 #else
-       VM_BUG_ON(iocb->ki_nbytes != PAGE_SIZE);
+       VM_BUG_ON(iov_iter_count(iter) != PAGE_SIZE);
 
-       if (rw == READ)
+       if (iov_iter_rw(iter) == READ)
                return nfs_file_direct_read(iocb, iter, pos);
        return nfs_file_direct_write(iocb, iter, pos);
 #endif /* CONFIG_NFS_SWAP */
@@ -393,7 +392,7 @@ static void nfs_direct_complete(struct nfs_direct_req *dreq, bool write)
                long res = (long) dreq->error;
                if (!res)
                        res = (long) dreq->count;
-               aio_complete(dreq->iocb, res, 0);
+               dreq->iocb->ki_complete(dreq->iocb, res, 0);
        }
 
        complete_all(&dreq->completion);
@@ -978,7 +977,7 @@ ssize_t nfs_file_direct_write(struct kiocb *iocb, struct iov_iter *iter,
        dfprintk(FILE, "NFS: direct write(%pD2, %zd@%Ld)\n",
                file, count, (long long) pos);
 
-       result = generic_write_checks(file, &pos, &count, 0);
+       result = generic_write_checks(file, &pos, &count);
        if (result)
                goto out;
 
index e679d24c39d3a57d5ef510a22d5ccbe2832c5335..f6a3adedf0270b7edabb23511746a14bbe6d000c 100644 (file)
@@ -26,7 +26,6 @@
 #include <linux/nfs_mount.h>
 #include <linux/mm.h>
 #include <linux/pagemap.h>
-#include <linux/aio.h>
 #include <linux/gfp.h>
 #include <linux/swap.h>
 
@@ -927,8 +926,6 @@ EXPORT_SYMBOL_GPL(nfs_flock);
 
 const struct file_operations nfs_file_operations = {
        .llseek         = nfs_file_llseek,
-       .read           = new_sync_read,
-       .write          = new_sync_write,
        .read_iter      = nfs_file_read,
        .write_iter     = nfs_file_write,
        .mmap           = nfs_file_mmap,
index 8b46389c4c5b814806979b66348121afe0590090..0181cde1d102ab5340ef7be230dc2e3ca036993c 100644 (file)
@@ -170,8 +170,6 @@ const struct file_operations nfs4_file_operations = {
 #else
        .llseek         = nfs_file_llseek,
 #endif
-       .read           = new_sync_read,
-       .write          = new_sync_write,
        .read_iter      = nfs_file_read,
        .write_iter     = nfs_file_write,
        .mmap           = nfs_file_mmap,
index a8c728acb7a809b4078c21146ae1c3759c7f7021..54575e3cc1a24eda326fd19bde268bbc1b187f21 100644 (file)
@@ -143,8 +143,6 @@ static int nilfs_file_mmap(struct file *file, struct vm_area_struct *vma)
  */
 const struct file_operations nilfs_file_operations = {
        .llseek         = generic_file_llseek,
-       .read           = new_sync_read,
-       .write          = new_sync_write,
        .read_iter      = generic_file_read_iter,
        .write_iter     = generic_file_write_iter,
        .unlocked_ioctl = nilfs_ioctl,
index 8b5969538f39229cede14416a067d2e056c1a677..36f057fa8aa3be305ec0bb155a2924e8a9088957 100644 (file)
@@ -26,7 +26,7 @@
 #include <linux/mpage.h>
 #include <linux/pagemap.h>
 #include <linux/writeback.h>
-#include <linux/aio.h>
+#include <linux/uio.h>
 #include "nilfs.h"
 #include "btnode.h"
 #include "segment.h"
@@ -305,8 +305,7 @@ static int nilfs_write_end(struct file *file, struct address_space *mapping,
 }
 
 static ssize_t
-nilfs_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter,
-               loff_t offset)
+nilfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter, loff_t offset)
 {
        struct file *file = iocb->ki_filp;
        struct address_space *mapping = file->f_mapping;
@@ -314,18 +313,17 @@ nilfs_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter,
        size_t count = iov_iter_count(iter);
        ssize_t size;
 
-       if (rw == WRITE)
+       if (iov_iter_rw(iter) == WRITE)
                return 0;
 
        /* Needs synchronization with the cleaner */
-       size = blockdev_direct_IO(rw, iocb, inode, iter, offset,
-                                 nilfs_get_block);
+       size = blockdev_direct_IO(iocb, inode, iter, offset, nilfs_get_block);
 
        /*
         * In case of error extending write may have instantiated a few
         * blocks outside i_size. Trim these off again.
         */
-       if (unlikely((rw & WRITE) && size < 0)) {
+       if (unlikely(iov_iter_rw(iter) == WRITE && size < 0)) {
                loff_t isize = i_size_read(inode);
                loff_t end = offset + count;
 
index 36ae529511c49140417cafe6559a167cd17d92e4..2ff263e6d363dba5f9621ad705c5795aaf855326 100644 (file)
@@ -8,7 +8,7 @@ ntfs-y := aops.o attrib.o collate.o compress.o debug.o dir.o file.o \
 
 ntfs-$(CONFIG_NTFS_RW) += bitmap.o lcnalloc.o logfile.o quota.o usnjrnl.o
 
-ccflags-y := -DNTFS_VERSION=\"2.1.31\"
+ccflags-y := -DNTFS_VERSION=\"2.1.32\"
 ccflags-$(CONFIG_NTFS_DEBUG)   += -DDEBUG
 ccflags-$(CONFIG_NTFS_RW)      += -DNTFS_RW
 
index 1da9b2d184dc4e32d9ac9a95eb0ee2553c5a1e46..cec4ec3c1ede2d343c6890c88eca19cd7e9babc8 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * file.c - NTFS kernel file operations.  Part of the Linux-NTFS project.
  *
- * Copyright (c) 2001-2014 Anton Altaparmakov and Tuxera Inc.
+ * Copyright (c) 2001-2015 Anton Altaparmakov and Tuxera Inc.
  *
  * This program/include file is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License as published
@@ -28,7 +28,6 @@
 #include <linux/swap.h>
 #include <linux/uio.h>
 #include <linux/writeback.h>
-#include <linux/aio.h>
 
 #include <asm/page.h>
 #include <asm/uaccess.h>
@@ -329,62 +328,169 @@ err_out:
        return err;
 }
 
-/**
- * ntfs_fault_in_pages_readable -
- *
- * Fault a number of userspace pages into pagetables.
- *
- * Unlike include/linux/pagemap.h::fault_in_pages_readable(), this one copes
- * with more than two userspace pages as well as handling the single page case
- * elegantly.
- *
- * If you find this difficult to understand, then think of the while loop being
- * the following code, except that we do without the integer variable ret:
- *
- *     do {
- *             ret = __get_user(c, uaddr);
- *             uaddr += PAGE_SIZE;
- *     } while (!ret && uaddr < end);
- *
- * Note, the final __get_user() may well run out-of-bounds of the user buffer,
- * but _not_ out-of-bounds of the page the user buffer belongs to, and since
- * this is only a read and not a write, and since it is still in the same page,
- * it should not matter and this makes the code much simpler.
- */
-static inline void ntfs_fault_in_pages_readable(const char __user *uaddr,
-               int bytes)
-{
-       const char __user *end;
-       volatile char c;
-
-       /* Set @end to the first byte outside the last page we care about. */
-       end = (const char __user*)PAGE_ALIGN((unsigned long)uaddr + bytes);
-
-       while (!__get_user(c, uaddr) && (uaddr += PAGE_SIZE, uaddr < end))
-               ;
-}
-
-/**
- * ntfs_fault_in_pages_readable_iovec -
- *
- * Same as ntfs_fault_in_pages_readable() but operates on an array of iovecs.
- */
-static inline void ntfs_fault_in_pages_readable_iovec(const struct iovec *iov,
-               size_t iov_ofs, int bytes)
+static ssize_t ntfs_prepare_file_for_write(struct kiocb *iocb,
+               struct iov_iter *from)
 {
-       do {
-               const char __user *buf;
-               unsigned len;
+       loff_t pos;
+       s64 end, ll;
+       ssize_t err;
+       unsigned long flags;
+       struct file *file = iocb->ki_filp;
+       struct inode *vi = file_inode(file);
+       ntfs_inode *base_ni, *ni = NTFS_I(vi);
+       ntfs_volume *vol = ni->vol;
+       size_t count = iov_iter_count(from);
 
-               buf = iov->iov_base + iov_ofs;
-               len = iov->iov_len - iov_ofs;
-               if (len > bytes)
-                       len = bytes;
-               ntfs_fault_in_pages_readable(buf, len);
-               bytes -= len;
-               iov++;
-               iov_ofs = 0;
-       } while (bytes);
+       ntfs_debug("Entering for i_ino 0x%lx, attribute type 0x%x, pos "
+                       "0x%llx, count 0x%zx.", vi->i_ino,
+                       (unsigned)le32_to_cpu(ni->type),
+                       (unsigned long long)iocb->ki_pos, count);
+       err = generic_write_checks(file, &iocb->ki_pos, &count);
+       if (unlikely(err))
+               goto out;
+       iov_iter_truncate(from, count);
+       if (count == 0)
+               goto out;
+       /*
+        * All checks have passed.  Before we start doing any writing we want
+        * to abort any totally illegal writes.
+        */
+       BUG_ON(NInoMstProtected(ni));
+       BUG_ON(ni->type != AT_DATA);
+       /* If file is encrypted, deny access, just like NT4. */
+       if (NInoEncrypted(ni)) {
+               /* Only $DATA attributes can be encrypted. */
+               /*
+                * Reminder for later: Encrypted files are _always_
+                * non-resident so that the content can always be encrypted.
+                */
+               ntfs_debug("Denying write access to encrypted file.");
+               err = -EACCES;
+               goto out;
+       }
+       if (NInoCompressed(ni)) {
+               /* Only unnamed $DATA attribute can be compressed. */
+               BUG_ON(ni->name_len);
+               /*
+                * Reminder for later: If resident, the data is not actually
+                * compressed.  Only on the switch to non-resident does
+                * compression kick in.  This is in contrast to encrypted files
+                * (see above).
+                */
+               ntfs_error(vi->i_sb, "Writing to compressed files is not "
+                               "implemented yet.  Sorry.");
+               err = -EOPNOTSUPP;
+               goto out;
+       }
+       base_ni = ni;
+       if (NInoAttr(ni))
+               base_ni = ni->ext.base_ntfs_ino;
+       err = file_remove_suid(file);
+       if (unlikely(err))
+               goto out;
+       /*
+        * Our ->update_time method always succeeds thus file_update_time()
+        * cannot fail either so there is no need to check the return code.
+        */
+       file_update_time(file);
+       pos = iocb->ki_pos;
+       /* The first byte after the last cluster being written to. */
+       end = (pos + iov_iter_count(from) + vol->cluster_size_mask) &
+                       ~(u64)vol->cluster_size_mask;
+       /*
+        * If the write goes beyond the allocated size, extend the allocation
+        * to cover the whole of the write, rounded up to the nearest cluster.
+        */
+       read_lock_irqsave(&ni->size_lock, flags);
+       ll = ni->allocated_size;
+       read_unlock_irqrestore(&ni->size_lock, flags);
+       if (end > ll) {
+               /*
+                * Extend the allocation without changing the data size.
+                *
+                * Note we ensure the allocation is big enough to at least
+                * write some data but we do not require the allocation to be
+                * complete, i.e. it may be partial.
+                */
+               ll = ntfs_attr_extend_allocation(ni, end, -1, pos);
+               if (likely(ll >= 0)) {
+                       BUG_ON(pos >= ll);
+                       /* If the extension was partial truncate the write. */
+                       if (end > ll) {
+                               ntfs_debug("Truncating write to inode 0x%lx, "
+                                               "attribute type 0x%x, because "
+                                               "the allocation was only "
+                                               "partially extended.",
+                                               vi->i_ino, (unsigned)
+                                               le32_to_cpu(ni->type));
+                               iov_iter_truncate(from, ll - pos);
+                       }
+               } else {
+                       err = ll;
+                       read_lock_irqsave(&ni->size_lock, flags);
+                       ll = ni->allocated_size;
+                       read_unlock_irqrestore(&ni->size_lock, flags);
+                       /* Perform a partial write if possible or fail. */
+                       if (pos < ll) {
+                               ntfs_debug("Truncating write to inode 0x%lx "
+                                               "attribute type 0x%x, because "
+                                               "extending the allocation "
+                                               "failed (error %d).",
+                                               vi->i_ino, (unsigned)
+                                               le32_to_cpu(ni->type),
+                                               (int)-err);
+                               iov_iter_truncate(from, ll - pos);
+                       } else {
+                               if (err != -ENOSPC)
+                                       ntfs_error(vi->i_sb, "Cannot perform "
+                                                       "write to inode "
+                                                       "0x%lx, attribute "
+                                                       "type 0x%x, because "
+                                                       "extending the "
+                                                       "allocation failed "
+                                                       "(error %ld).",
+                                                       vi->i_ino, (unsigned)
+                                                       le32_to_cpu(ni->type),
+                                                       (long)-err);
+                               else
+                                       ntfs_debug("Cannot perform write to "
+                                                       "inode 0x%lx, "
+                                                       "attribute type 0x%x, "
+                                                       "because there is not "
+                                                       "space left.",
+                                                       vi->i_ino, (unsigned)
+                                                       le32_to_cpu(ni->type));
+                               goto out;
+                       }
+               }
+       }
+       /*
+        * If the write starts beyond the initialized size, extend it up to the
+        * beginning of the write and initialize all non-sparse space between
+        * the old initialized size and the new one.  This automatically also
+        * increments the vfs inode->i_size to keep it above or equal to the
+        * initialized_size.
+        */
+       read_lock_irqsave(&ni->size_lock, flags);
+       ll = ni->initialized_size;
+       read_unlock_irqrestore(&ni->size_lock, flags);
+       if (pos > ll) {
+               /*
+                * Wait for ongoing direct i/o to complete before proceeding.
+                * New direct i/o cannot start as we hold i_mutex.
+                */
+               inode_dio_wait(vi);
+               err = ntfs_attr_extend_initialized(ni, pos);
+               if (unlikely(err < 0))
+                       ntfs_error(vi->i_sb, "Cannot perform write to inode "
+                                       "0x%lx, attribute type 0x%x, because "
+                                       "extending the initialized size "
+                                       "failed (error %d).", vi->i_ino,
+                                       (unsigned)le32_to_cpu(ni->type),
+                                       (int)-err);
+       }
+out:
+       return err;
 }
 
 /**
@@ -421,8 +527,8 @@ static inline int __ntfs_grab_cache_pages(struct address_space *mapping,
                                        goto err_out;
                                }
                        }
-                       err = add_to_page_cache_lru(*cached_page, mapping, index,
-                                       GFP_KERNEL);
+                       err = add_to_page_cache_lru(*cached_page, mapping,
+                                       index, GFP_KERNEL);
                        if (unlikely(err)) {
                                if (err == -EEXIST)
                                        continue;
@@ -1268,180 +1374,6 @@ rl_not_mapped_enoent:
        return err;
 }
 
-/*
- * Copy as much as we can into the pages and return the number of bytes which
- * were successfully copied.  If a fault is encountered then clear the pages
- * out to (ofs + bytes) and return the number of bytes which were copied.
- */
-static inline size_t ntfs_copy_from_user(struct page **pages,
-               unsigned nr_pages, unsigned ofs, const char __user *buf,
-               size_t bytes)
-{
-       struct page **last_page = pages + nr_pages;
-       char *addr;
-       size_t total = 0;
-       unsigned len;
-       int left;
-
-       do {
-               len = PAGE_CACHE_SIZE - ofs;
-               if (len > bytes)
-                       len = bytes;
-               addr = kmap_atomic(*pages);
-               left = __copy_from_user_inatomic(addr + ofs, buf, len);
-               kunmap_atomic(addr);
-               if (unlikely(left)) {
-                       /* Do it the slow way. */
-                       addr = kmap(*pages);
-                       left = __copy_from_user(addr + ofs, buf, len);
-                       kunmap(*pages);
-                       if (unlikely(left))
-                               goto err_out;
-               }
-               total += len;
-               bytes -= len;
-               if (!bytes)
-                       break;
-               buf += len;
-               ofs = 0;
-       } while (++pages < last_page);
-out:
-       return total;
-err_out:
-       total += len - left;
-       /* Zero the rest of the target like __copy_from_user(). */
-       while (++pages < last_page) {
-               bytes -= len;
-               if (!bytes)
-                       break;
-               len = PAGE_CACHE_SIZE;
-               if (len > bytes)
-                       len = bytes;
-               zero_user(*pages, 0, len);
-       }
-       goto out;
-}
-
-static size_t __ntfs_copy_from_user_iovec_inatomic(char *vaddr,
-               const struct iovec *iov, size_t iov_ofs, size_t bytes)
-{
-       size_t total = 0;
-
-       while (1) {
-               const char __user *buf = iov->iov_base + iov_ofs;
-               unsigned len;
-               size_t left;
-
-               len = iov->iov_len - iov_ofs;
-               if (len > bytes)
-                       len = bytes;
-               left = __copy_from_user_inatomic(vaddr, buf, len);
-               total += len;
-               bytes -= len;
-               vaddr += len;
-               if (unlikely(left)) {
-                       total -= left;
-                       break;
-               }
-               if (!bytes)
-                       break;
-               iov++;
-               iov_ofs = 0;
-       }
-       return total;
-}
-
-static inline void ntfs_set_next_iovec(const struct iovec **iovp,
-               size_t *iov_ofsp, size_t bytes)
-{
-       const struct iovec *iov = *iovp;
-       size_t iov_ofs = *iov_ofsp;
-
-       while (bytes) {
-               unsigned len;
-
-               len = iov->iov_len - iov_ofs;
-               if (len > bytes)
-                       len = bytes;
-               bytes -= len;
-               iov_ofs += len;
-               if (iov->iov_len == iov_ofs) {
-                       iov++;
-                       iov_ofs = 0;
-               }
-       }
-       *iovp = iov;
-       *iov_ofsp = iov_ofs;
-}
-
-/*
- * This has the same side-effects and return value as ntfs_copy_from_user().
- * The difference is that on a fault we need to memset the remainder of the
- * pages (out to offset + bytes), to emulate ntfs_copy_from_user()'s
- * single-segment behaviour.
- *
- * We call the same helper (__ntfs_copy_from_user_iovec_inatomic()) both when
- * atomic and when not atomic.  This is ok because it calls
- * __copy_from_user_inatomic() and it is ok to call this when non-atomic.  In
- * fact, the only difference between __copy_from_user_inatomic() and
- * __copy_from_user() is that the latter calls might_sleep() and the former
- * should not zero the tail of the buffer on error.  And on many architectures
- * __copy_from_user_inatomic() is just defined to __copy_from_user() so it
- * makes no difference at all on those architectures.
- */
-static inline size_t ntfs_copy_from_user_iovec(struct page **pages,
-               unsigned nr_pages, unsigned ofs, const struct iovec **iov,
-               size_t *iov_ofs, size_t bytes)
-{
-       struct page **last_page = pages + nr_pages;
-       char *addr;
-       size_t copied, len, total = 0;
-
-       do {
-               len = PAGE_CACHE_SIZE - ofs;
-               if (len > bytes)
-                       len = bytes;
-               addr = kmap_atomic(*pages);
-               copied = __ntfs_copy_from_user_iovec_inatomic(addr + ofs,
-                               *iov, *iov_ofs, len);
-               kunmap_atomic(addr);
-               if (unlikely(copied != len)) {
-                       /* Do it the slow way. */
-                       addr = kmap(*pages);
-                       copied = __ntfs_copy_from_user_iovec_inatomic(addr +
-                                       ofs, *iov, *iov_ofs, len);
-                       if (unlikely(copied != len))
-                               goto err_out;
-                       kunmap(*pages);
-               }
-               total += len;
-               ntfs_set_next_iovec(iov, iov_ofs, len);
-               bytes -= len;
-               if (!bytes)
-                       break;
-               ofs = 0;
-       } while (++pages < last_page);
-out:
-       return total;
-err_out:
-       BUG_ON(copied > len);
-       /* Zero the rest of the target like __copy_from_user(). */
-       memset(addr + ofs + copied, 0, len - copied);
-       kunmap(*pages);
-       total += copied;
-       ntfs_set_next_iovec(iov, iov_ofs, copied);
-       while (++pages < last_page) {
-               bytes -= len;
-               if (!bytes)
-                       break;
-               len = PAGE_CACHE_SIZE;
-               if (len > bytes)
-                       len = bytes;
-               zero_user(*pages, 0, len);
-       }
-       goto out;
-}
-
 static inline void ntfs_flush_dcache_pages(struct page **pages,
                unsigned nr_pages)
 {
@@ -1762,86 +1694,83 @@ err_out:
        return err;
 }
 
-static void ntfs_write_failed(struct address_space *mapping, loff_t to)
+/*
+ * Copy as much as we can into the pages and return the number of bytes which
+ * were successfully copied.  If a fault is encountered then clear the pages
+ * out to (ofs + bytes) and return the number of bytes which were copied.
+ */
+static size_t ntfs_copy_from_user_iter(struct page **pages, unsigned nr_pages,
+               unsigned ofs, struct iov_iter *i, size_t bytes)
 {
-       struct inode *inode = mapping->host;
+       struct page **last_page = pages + nr_pages;
+       size_t total = 0;
+       struct iov_iter data = *i;
+       unsigned len, copied;
 
-       if (to > inode->i_size) {
-               truncate_pagecache(inode, inode->i_size);
-               ntfs_truncate_vfs(inode);
-       }
+       do {
+               len = PAGE_CACHE_SIZE - ofs;
+               if (len > bytes)
+                       len = bytes;
+               copied = iov_iter_copy_from_user_atomic(*pages, &data, ofs,
+                               len);
+               total += copied;
+               bytes -= copied;
+               if (!bytes)
+                       break;
+               iov_iter_advance(&data, copied);
+               if (copied < len)
+                       goto err;
+               ofs = 0;
+       } while (++pages < last_page);
+out:
+       return total;
+err:
+       /* Zero the rest of the target like __copy_from_user(). */
+       len = PAGE_CACHE_SIZE - copied;
+       do {
+               if (len > bytes)
+                       len = bytes;
+               zero_user(*pages, copied, len);
+               bytes -= len;
+               copied = 0;
+               len = PAGE_CACHE_SIZE;
+       } while (++pages < last_page);
+       goto out;
 }
 
 /**
- * ntfs_file_buffered_write -
- *
- * Locking: The vfs is holding ->i_mutex on the inode.
+ * ntfs_perform_write - perform buffered write to a file
+ * @file:      file to write to
+ * @i:         iov_iter with data to write
+ * @pos:       byte offset in file at which to begin writing to
  */
-static ssize_t ntfs_file_buffered_write(struct kiocb *iocb,
-               const struct iovec *iov, unsigned long nr_segs,
-               loff_t pos, loff_t *ppos, size_t count)
+static ssize_t ntfs_perform_write(struct file *file, struct iov_iter *i,
+               loff_t pos)
 {
-       struct file *file = iocb->ki_filp;
        struct address_space *mapping = file->f_mapping;
        struct inode *vi = mapping->host;
        ntfs_inode *ni = NTFS_I(vi);
        ntfs_volume *vol = ni->vol;
        struct page *pages[NTFS_MAX_PAGES_PER_CLUSTER];
        struct page *cached_page = NULL;
-       char __user *buf = NULL;
-       s64 end, ll;
        VCN last_vcn;
        LCN lcn;
-       unsigned long flags;
-       size_t bytes, iov_ofs = 0;      /* Offset in the current iovec. */
-       ssize_t status, written;
+       size_t bytes;
+       ssize_t status, written = 0;
        unsigned nr_pages;
-       int err;
 
-       ntfs_debug("Entering for i_ino 0x%lx, attribute type 0x%x, "
-                       "pos 0x%llx, count 0x%lx.",
-                       vi->i_ino, (unsigned)le32_to_cpu(ni->type),
-                       (unsigned long long)pos, (unsigned long)count);
-       if (unlikely(!count))
-               return 0;
-       BUG_ON(NInoMstProtected(ni));
-       /*
-        * If the attribute is not an index root and it is encrypted or
-        * compressed, we cannot write to it yet.  Note we need to check for
-        * AT_INDEX_ALLOCATION since this is the type of both directory and
-        * index inodes.
-        */
-       if (ni->type != AT_INDEX_ALLOCATION) {
-               /* If file is encrypted, deny access, just like NT4. */
-               if (NInoEncrypted(ni)) {
-                       /*
-                        * Reminder for later: Encrypted files are _always_
-                        * non-resident so that the content can always be
-                        * encrypted.
-                        */
-                       ntfs_debug("Denying write access to encrypted file.");
-                       return -EACCES;
-               }
-               if (NInoCompressed(ni)) {
-                       /* Only unnamed $DATA attribute can be compressed. */
-                       BUG_ON(ni->type != AT_DATA);
-                       BUG_ON(ni->name_len);
-                       /*
-                        * Reminder for later: If resident, the data is not
-                        * actually compressed.  Only on the switch to non-
-                        * resident does compression kick in.  This is in
-                        * contrast to encrypted files (see above).
-                        */
-                       ntfs_error(vi->i_sb, "Writing to compressed files is "
-                                       "not implemented yet.  Sorry.");
-                       return -EOPNOTSUPP;
-               }
-       }
+       ntfs_debug("Entering for i_ino 0x%lx, attribute type 0x%x, pos "
+                       "0x%llx, count 0x%lx.", vi->i_ino,
+                       (unsigned)le32_to_cpu(ni->type),
+                       (unsigned long long)pos,
+                       (unsigned long)iov_iter_count(i));
        /*
         * If a previous ntfs_truncate() failed, repeat it and abort if it
         * fails again.
         */
        if (unlikely(NInoTruncateFailed(ni))) {
+               int err;
+
                inode_dio_wait(vi);
                err = ntfs_truncate(vi);
                if (err || NInoTruncateFailed(ni)) {
@@ -1855,81 +1784,6 @@ static ssize_t ntfs_file_buffered_write(struct kiocb *iocb,
                        return err;
                }
        }
-       /* The first byte after the write. */
-       end = pos + count;
-       /*
-        * If the write goes beyond the allocated size, extend the allocation
-        * to cover the whole of the write, rounded up to the nearest cluster.
-        */
-       read_lock_irqsave(&ni->size_lock, flags);
-       ll = ni->allocated_size;
-       read_unlock_irqrestore(&ni->size_lock, flags);
-       if (end > ll) {
-               /* Extend the allocation without changing the data size. */
-               ll = ntfs_attr_extend_allocation(ni, end, -1, pos);
-               if (likely(ll >= 0)) {
-                       BUG_ON(pos >= ll);
-                       /* If the extension was partial truncate the write. */
-                       if (end > ll) {
-                               ntfs_debug("Truncating write to inode 0x%lx, "
-                                               "attribute type 0x%x, because "
-                                               "the allocation was only "
-                                               "partially extended.",
-                                               vi->i_ino, (unsigned)
-                                               le32_to_cpu(ni->type));
-                               end = ll;
-                               count = ll - pos;
-                       }
-               } else {
-                       err = ll;
-                       read_lock_irqsave(&ni->size_lock, flags);
-                       ll = ni->allocated_size;
-                       read_unlock_irqrestore(&ni->size_lock, flags);
-                       /* Perform a partial write if possible or fail. */
-                       if (pos < ll) {
-                               ntfs_debug("Truncating write to inode 0x%lx, "
-                                               "attribute type 0x%x, because "
-                                               "extending the allocation "
-                                               "failed (error code %i).",
-                                               vi->i_ino, (unsigned)
-                                               le32_to_cpu(ni->type), err);
-                               end = ll;
-                               count = ll - pos;
-                       } else {
-                               ntfs_error(vol->sb, "Cannot perform write to "
-                                               "inode 0x%lx, attribute type "
-                                               "0x%x, because extending the "
-                                               "allocation failed (error "
-                                               "code %i).", vi->i_ino,
-                                               (unsigned)
-                                               le32_to_cpu(ni->type), err);
-                               return err;
-                       }
-               }
-       }
-       written = 0;
-       /*
-        * If the write starts beyond the initialized size, extend it up to the
-        * beginning of the write and initialize all non-sparse space between
-        * the old initialized size and the new one.  This automatically also
-        * increments the vfs inode->i_size to keep it above or equal to the
-        * initialized_size.
-        */
-       read_lock_irqsave(&ni->size_lock, flags);
-       ll = ni->initialized_size;
-       read_unlock_irqrestore(&ni->size_lock, flags);
-       if (pos > ll) {
-               err = ntfs_attr_extend_initialized(ni, pos);
-               if (err < 0) {
-                       ntfs_error(vol->sb, "Cannot perform write to inode "
-                                       "0x%lx, attribute type 0x%x, because "
-                                       "extending the initialized size "
-                                       "failed (error code %i).", vi->i_ino,
-                                       (unsigned)le32_to_cpu(ni->type), err);
-                       status = err;
-                       goto err_out;
-               }
-       }
        /*
         * Determine the number of pages per cluster for non-resident
         * attributes.
@@ -1937,10 +1791,7 @@ static ssize_t ntfs_file_buffered_write(struct kiocb *iocb,
        nr_pages = 1;
        if (vol->cluster_size > PAGE_CACHE_SIZE && NInoNonResident(ni))
                nr_pages = vol->cluster_size >> PAGE_CACHE_SHIFT;
-       /* Finally, perform the actual write. */
        last_vcn = -1;
-       if (likely(nr_segs == 1))
-               buf = iov->iov_base;
        do {
                VCN vcn;
                pgoff_t idx, start_idx;
@@ -1965,10 +1816,10 @@ static ssize_t ntfs_file_buffered_write(struct kiocb *iocb,
                                                vol->cluster_size_bits, false);
                                up_read(&ni->runlist.lock);
                                if (unlikely(lcn < LCN_HOLE)) {
-                                       status = -EIO;
                                        if (lcn == LCN_ENOMEM)
                                                status = -ENOMEM;
-                                       else
+                                       else {
+                                               status = -EIO;
                                                ntfs_error(vol->sb, "Cannot "
                                                        "perform write to "
                                                        "inode 0x%lx, "
@@ -1977,6 +1828,7 @@ static ssize_t ntfs_file_buffered_write(struct kiocb *iocb,
                                                        "is corrupt.",
                                                        vi->i_ino, (unsigned)
                                                        le32_to_cpu(ni->type));
+                                       }
                                        break;
                                }
                                if (lcn == LCN_HOLE) {
@@ -1989,8 +1841,9 @@ static ssize_t ntfs_file_buffered_write(struct kiocb *iocb,
                                }
                        }
                }
-               if (bytes > count)
-                       bytes = count;
+               if (bytes > iov_iter_count(i))
+                       bytes = iov_iter_count(i);
+again:
                /*
                 * Bring in the user page(s) that we will copy from _first_.
                 * Otherwise there is a nasty deadlock on copying from the same
@@ -1999,10 +1852,10 @@ static ssize_t ntfs_file_buffered_write(struct kiocb *iocb,
                 * pages being swapped out between us bringing them into memory
                 * and doing the actual copying.
                 */
-               if (likely(nr_segs == 1))
-                       ntfs_fault_in_pages_readable(buf, bytes);
-               else
-                       ntfs_fault_in_pages_readable_iovec(iov, iov_ofs, bytes);
+               if (unlikely(iov_iter_fault_in_multipages_readable(i, bytes))) {
+                       status = -EFAULT;
+                       break;
+               }
                /* Get and lock @do_pages starting at index @start_idx. */
                status = __ntfs_grab_cache_pages(mapping, start_idx, do_pages,
                                pages, &cached_page);
@@ -2018,56 +1871,57 @@ static ssize_t ntfs_file_buffered_write(struct kiocb *iocb,
                        status = ntfs_prepare_pages_for_non_resident_write(
                                        pages, do_pages, pos, bytes);
                        if (unlikely(status)) {
-                               loff_t i_size;
-
                                do {
                                        unlock_page(pages[--do_pages]);
                                        page_cache_release(pages[do_pages]);
                                } while (do_pages);
-                               /*
-                                * The write preparation may have instantiated
-                                * allocated space outside i_size.  Trim this
-                                * off again.  We can ignore any errors in this
-                                * case as we will just be waisting a bit of
-                                * allocated space, which is not a disaster.
-                                */
-                               i_size = i_size_read(vi);
-                               if (pos + bytes > i_size) {
-                                       ntfs_write_failed(mapping, pos + bytes);
-                               }
                                break;
                        }
                }
                u = (pos >> PAGE_CACHE_SHIFT) - pages[0]->index;
-               if (likely(nr_segs == 1)) {
-                       copied = ntfs_copy_from_user(pages + u, do_pages - u,
-                                       ofs, buf, bytes);
-                       buf += copied;
-               } else
-                       copied = ntfs_copy_from_user_iovec(pages + u,
-                                       do_pages - u, ofs, &iov, &iov_ofs,
-                                       bytes);
+               copied = ntfs_copy_from_user_iter(pages + u, do_pages - u, ofs,
+                                       i, bytes);
                ntfs_flush_dcache_pages(pages + u, do_pages - u);
-               status = ntfs_commit_pages_after_write(pages, do_pages, pos,
-                               bytes);
-               if (likely(!status)) {
-                       written += copied;
-                       count -= copied;
-                       pos += copied;
-                       if (unlikely(copied != bytes))
-                               status = -EFAULT;
+               status = 0;
+               if (likely(copied == bytes)) {
+                       status = ntfs_commit_pages_after_write(pages, do_pages,
+                                       pos, bytes);
+                       if (!status)
+                               status = bytes;
                }
                do {
                        unlock_page(pages[--do_pages]);
                        page_cache_release(pages[do_pages]);
                } while (do_pages);
-               if (unlikely(status))
+               if (unlikely(status < 0))
                        break;
-               balance_dirty_pages_ratelimited(mapping);
+               copied = status;
                cond_resched();
-       } while (count);
-err_out:
-       *ppos = pos;
+               if (unlikely(!copied)) {
+                       size_t sc;
+
+                       /*
+                        * We failed to copy anything.  Fall back to single
+                        * segment length write.
+                        *
+                        * This is needed to avoid possible livelock in the
+                        * case that all segments in the iov cannot be copied
+                        * at once without a pagefault.
+                        */
+                       sc = iov_iter_single_seg_count(i);
+                       if (bytes > sc)
+                               bytes = sc;
+                       goto again;
+               }
+               iov_iter_advance(i, copied);
+               pos += copied;
+               written += copied;
+               balance_dirty_pages_ratelimited(mapping);
+               if (fatal_signal_pending(current)) {
+                       status = -EINTR;
+                       break;
+               }
+       } while (iov_iter_count(i));
        if (cached_page)
                page_cache_release(cached_page);
        ntfs_debug("Done.  Returning %s (written 0x%lx, status %li).",
@@ -2077,63 +1931,36 @@ err_out:
 }
 
 /**
- * ntfs_file_aio_write_nolock -
+ * ntfs_file_write_iter - simple wrapper for ntfs_file_write_iter_nolock()
+ * @iocb:      IO state structure
+ * @from:      iov_iter with data to write
+ *
+ * Basically the same as generic_file_write_iter() except that it ends up
+ * up calling ntfs_perform_write() instead of generic_perform_write() and that
+ * O_DIRECT is not implemented.
  */
-static ssize_t ntfs_file_aio_write_nolock(struct kiocb *iocb,
-               const struct iovec *iov, unsigned long nr_segs, loff_t *ppos)
+static ssize_t ntfs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
 {
        struct file *file = iocb->ki_filp;
-       struct address_space *mapping = file->f_mapping;
-       struct inode *inode = mapping->host;
-       loff_t pos;
-       size_t count;           /* after file limit checks */
-       ssize_t written, err;
+       struct inode *vi = file_inode(file);
+       ssize_t written = 0;
+       ssize_t err;
 
-       count = iov_length(iov, nr_segs);
-       pos = *ppos;
+       mutex_lock(&vi->i_mutex);
        /* We can write back this queue in page reclaim. */
-       current->backing_dev_info = inode_to_bdi(inode);
-       written = 0;
-       err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
-       if (err)
-               goto out;
-       if (!count)
-               goto out;
-       err = file_remove_suid(file);
-       if (err)
-               goto out;
-       err = file_update_time(file);
-       if (err)
-               goto out;
-       written = ntfs_file_buffered_write(iocb, iov, nr_segs, pos, ppos,
-                       count);
-out:
+       current->backing_dev_info = inode_to_bdi(vi);
+       err = ntfs_prepare_file_for_write(iocb, from);
+       if (iov_iter_count(from) && !err)
+               written = ntfs_perform_write(file, from, iocb->ki_pos);
        current->backing_dev_info = NULL;
-       return written ? written : err;
-}
-
-/**
- * ntfs_file_aio_write -
- */
-static ssize_t ntfs_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
-               unsigned long nr_segs, loff_t pos)
-{
-       struct file *file = iocb->ki_filp;
-       struct address_space *mapping = file->f_mapping;
-       struct inode *inode = mapping->host;
-       ssize_t ret;
-
-       BUG_ON(iocb->ki_pos != pos);
-
-       mutex_lock(&inode->i_mutex);
-       ret = ntfs_file_aio_write_nolock(iocb, iov, nr_segs, &iocb->ki_pos);
-       mutex_unlock(&inode->i_mutex);
-       if (ret > 0) {
-               int err = generic_write_sync(file, iocb->ki_pos - ret, ret);
+       mutex_unlock(&vi->i_mutex);
+       if (likely(written > 0)) {
+               err = generic_write_sync(file, iocb->ki_pos, written);
                if (err < 0)
-                       ret = err;
+                       written = 0;
        }
-       return ret;
+       iocb->ki_pos += written;
+       return written ? written : err;
 }
 
 /**
@@ -2197,37 +2024,15 @@ static int ntfs_file_fsync(struct file *filp, loff_t start, loff_t end,
 #endif /* NTFS_RW */
 
 const struct file_operations ntfs_file_ops = {
-       .llseek         = generic_file_llseek,   /* Seek inside file. */
-       .read           = new_sync_read,         /* Read from file. */
-       .read_iter      = generic_file_read_iter, /* Async read from file. */
+       .llseek         = generic_file_llseek,
+       .read_iter      = generic_file_read_iter,
 #ifdef NTFS_RW
-       .write          = do_sync_write,         /* Write to file. */
-       .aio_write      = ntfs_file_aio_write,   /* Async write to file. */
-       /*.release      = ,*/                    /* Last file is closed.  See
-                                                   fs/ext2/file.c::
-                                                   ext2_release_file() for
-                                                   how to use this to discard
-                                                   preallocated space for
-                                                   write opened files. */
-       .fsync          = ntfs_file_fsync,       /* Sync a file to disk. */
-       /*.aio_fsync    = ,*/                    /* Sync all outstanding async
-                                                   i/o operations on a
-                                                   kiocb. */
+       .write_iter     = ntfs_file_write_iter,
+       .fsync          = ntfs_file_fsync,
 #endif /* NTFS_RW */
-       /*.ioctl        = ,*/                    /* Perform function on the
-                                                   mounted filesystem. */
-       .mmap           = generic_file_mmap,     /* Mmap file. */
-       .open           = ntfs_file_open,        /* Open file. */
-       .splice_read    = generic_file_splice_read /* Zero-copy data send with
-                                                   the data source being on
-                                                   the ntfs partition.  We do
-                                                   not need to care about the
-                                                   data destination. */
-       /*.sendpage     = ,*/                    /* Zero-copy data send with
-                                                   the data destination being
-                                                   on the ntfs partition.  We
-                                                   do not need to care about
-                                                   the data source. */
+       .mmap           = generic_file_mmap,
+       .open           = ntfs_file_open,
+       .splice_read    = generic_file_splice_read,
 };
 
 const struct inode_operations ntfs_file_inode_ops = {
index 898b9949d36357a8b7998600f3fdbacaa498d08f..1d0c21df0d805cd73248afd42dc05c1108c49700 100644 (file)
@@ -28,7 +28,6 @@
 #include <linux/quotaops.h>
 #include <linux/slab.h>
 #include <linux/log2.h>
-#include <linux/aio.h>
 
 #include "aops.h"
 #include "attrib.h"
index 44db1808cdb598df6b91548410b3634480c06c31..28b5ad81bbec7b4686b3e6475570fe7393a3820a 100644 (file)
@@ -29,6 +29,7 @@
 #include <linux/mpage.h>
 #include <linux/quotaops.h>
 #include <linux/blkdev.h>
+#include <linux/uio.h>
 
 #include <cluster/masklog.h>
 
@@ -737,10 +738,9 @@ static ssize_t ocfs2_direct_IO_write(struct kiocb *iocb,
                di_bh = NULL;
        }
 
-       written = __blockdev_direct_IO(WRITE, iocb, inode, inode->i_sb->s_bdev,
-                       iter, offset,
-                       ocfs2_direct_IO_get_blocks,
-                       ocfs2_dio_end_io, NULL, 0);
+       written = __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev, iter,
+                                      offset, ocfs2_direct_IO_get_blocks,
+                                      ocfs2_dio_end_io, NULL, 0);
        if (unlikely(written < 0)) {
                loff_t i_size = i_size_read(inode);
 
@@ -818,9 +818,7 @@ out:
        return ret;
 }
 
-static ssize_t ocfs2_direct_IO(int rw,
-                              struct kiocb *iocb,
-                              struct iov_iter *iter,
+static ssize_t ocfs2_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
                               loff_t offset)
 {
        struct file *file = iocb->ki_filp;
@@ -842,12 +840,11 @@ static ssize_t ocfs2_direct_IO(int rw,
        if (i_size_read(inode) <= offset && !full_coherency)
                return 0;
 
-       if (rw == READ)
-               return __blockdev_direct_IO(rw, iocb, inode,
-                                   inode->i_sb->s_bdev,
-                                   iter, offset,
-                                   ocfs2_direct_IO_get_blocks,
-                                   ocfs2_dio_end_io, NULL, 0);
+       if (iov_iter_rw(iter) == READ)
+               return __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev,
+                                           iter, offset,
+                                           ocfs2_direct_IO_get_blocks,
+                                           ocfs2_dio_end_io, NULL, 0);
        else
                return ocfs2_direct_IO_write(iocb, iter, offset);
 }
index 6cae155d54df0d68be4f90f4754d15c30302159c..dd59599b022d5ab26dffd82807d048cac170a154 100644 (file)
@@ -22,7 +22,7 @@
 #ifndef OCFS2_AOPS_H
 #define OCFS2_AOPS_H
 
-#include <linux/aio.h>
+#include <linux/fs.h>
 
 handle_t *ocfs2_start_walk_page_trans(struct inode *inode,
                                                         struct page *page,
index ba1790e52ff2364bd027454650ceef6a9ba227b9..8096fb6c081b9328ff20e95f4655d1c096f73fd8 100644 (file)
@@ -2280,7 +2280,7 @@ static ssize_t ocfs2_file_write_iter(struct kiocb *iocb,
                file->f_path.dentry->d_name.name,
                (unsigned int)from->nr_segs);   /* GRRRRR */
 
-       if (iocb->ki_nbytes == 0)
+       if (count == 0)
                return 0;
 
        appending = file->f_flags & O_APPEND ? 1 : 0;
@@ -2330,8 +2330,7 @@ relock:
        }
 
        can_do_direct = direct_io;
-       ret = ocfs2_prepare_inode_for_write(file, ppos,
-                                           iocb->ki_nbytes, appending,
+       ret = ocfs2_prepare_inode_for_write(file, ppos, count, appending,
                                            &can_do_direct, &has_refcount);
        if (ret < 0) {
                mlog_errno(ret);
@@ -2339,8 +2338,7 @@ relock:
        }
 
        if (direct_io && !is_sync_kiocb(iocb))
-               unaligned_dio = ocfs2_is_io_unaligned(inode, iocb->ki_nbytes,
-                                                     *ppos);
+               unaligned_dio = ocfs2_is_io_unaligned(inode, count, *ppos);
 
        /*
         * We can't complete the direct I/O as requested, fall back to
@@ -2376,8 +2374,7 @@ relock:
        /* communicate with ocfs2_dio_end_io */
        ocfs2_iocb_set_rw_locked(iocb, rw_level);
 
-       ret = generic_write_checks(file, ppos, &count,
-                                  S_ISBLK(inode->i_mode));
+       ret = generic_write_checks(file, ppos, &count);
        if (ret)
                goto out_dio;
 
@@ -2683,8 +2680,6 @@ const struct inode_operations ocfs2_special_file_iops = {
  */
 const struct file_operations ocfs2_fops = {
        .llseek         = ocfs2_file_llseek,
-       .read           = new_sync_read,
-       .write          = new_sync_write,
        .mmap           = ocfs2_mmap,
        .fsync          = ocfs2_sync_file,
        .release        = ocfs2_file_release,
@@ -2731,8 +2726,6 @@ const struct file_operations ocfs2_dops = {
  */
 const struct file_operations ocfs2_fops_no_plocks = {
        .llseek         = ocfs2_file_llseek,
-       .read           = new_sync_read,
-       .write          = new_sync_write,
        .mmap           = ocfs2_mmap,
        .fsync          = ocfs2_sync_file,
        .release        = ocfs2_file_release,
index 902e88527fcec443244bd12a9a25e1cf895ba763..f993be7f2156fcd863fcc28261add1326efd0146 100644 (file)
@@ -337,8 +337,6 @@ static sector_t omfs_bmap(struct address_space *mapping, sector_t block)
 
 const struct file_operations omfs_file_operations = {
        .llseek = generic_file_llseek,
-       .read = new_sync_read,
-       .write = new_sync_write,
        .read_iter = generic_file_read_iter,
        .write_iter = generic_file_write_iter,
        .mmap = generic_file_mmap,
index 33f9cbf2610b39498d416cb8c142fb5ebe4cc790..6796f04d6032ab7e3009d3666bde640fdec37934 100644 (file)
--- a/fs/open.c
+++ b/fs/open.c
@@ -570,6 +570,7 @@ static int chown_common(struct path *path, uid_t user, gid_t group)
        uid = make_kuid(current_user_ns(), user);
        gid = make_kgid(current_user_ns(), group);
 
+retry_deleg:
        newattrs.ia_valid =  ATTR_CTIME;
        if (user != (uid_t) -1) {
                if (!uid_valid(uid))
@@ -586,7 +587,6 @@ static int chown_common(struct path *path, uid_t user, gid_t group)
        if (!S_ISDIR(inode->i_mode))
                newattrs.ia_valid |=
                        ATTR_KILL_SUID | ATTR_KILL_SGID | ATTR_KILL_PRIV;
-retry_deleg:
        mutex_lock(&inode->i_mutex);
        error = security_path_chown(path, uid, gid);
        if (!error)
@@ -734,10 +734,10 @@ static int do_dentry_open(struct file *f,
        if ((f->f_mode & (FMODE_READ | FMODE_WRITE)) == FMODE_READ)
                i_readcount_inc(inode);
        if ((f->f_mode & FMODE_READ) &&
-            likely(f->f_op->read || f->f_op->aio_read || f->f_op->read_iter))
+            likely(f->f_op->read || f->f_op->read_iter))
                f->f_mode |= FMODE_CAN_READ;
        if ((f->f_mode & FMODE_WRITE) &&
-            likely(f->f_op->write || f->f_op->aio_write || f->f_op->write_iter))
+            likely(f->f_op->write || f->f_op->write_iter))
                f->f_mode |= FMODE_CAN_WRITE;
 
        f->f_flags &= ~(O_CREAT | O_EXCL | O_NOCTTY | O_TRUNC);
@@ -988,9 +988,6 @@ struct file *file_open_root(struct dentry *dentry, struct vfsmount *mnt,
                return ERR_PTR(err);
        if (flags & O_CREAT)
                return ERR_PTR(-EINVAL);
-       if (!filename && (flags & O_DIRECTORY))
-               if (!dentry->d_inode->i_op->lookup)
-                       return ERR_PTR(-ENOTDIR);
        return do_file_open_root(dentry, mnt, filename, &op);
 }
 EXPORT_SYMBOL(file_open_root);
index 21981e58e2a634c09b9ebb9b327860d849fb6b53..822da5b7cff06610bec61297ad2a3a688faf11b6 100644 (file)
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -21,7 +21,6 @@
 #include <linux/audit.h>
 #include <linux/syscalls.h>
 #include <linux/fcntl.h>
-#include <linux/aio.h>
 
 #include <asm/uaccess.h>
 #include <asm/ioctls.h>
@@ -947,9 +946,7 @@ err:
 const struct file_operations pipefifo_fops = {
        .open           = fifo_open,
        .llseek         = no_llseek,
-       .read           = new_sync_read,
        .read_iter      = pipe_read,
-       .write          = new_sync_write,
        .write_iter     = pipe_write,
        .poll           = pipe_poll,
        .unlocked_ioctl = pipe_ioctl,
index 4f56de822d2f5995b81006e0bc9783d321b1621d..183a212694bf8af77ee3f682137709ebdb38a274 100644 (file)
@@ -31,9 +31,7 @@
 #include "internal.h"
 
 const struct file_operations ramfs_file_operations = {
-       .read           = new_sync_read,
        .read_iter      = generic_file_read_iter,
-       .write          = new_sync_write,
        .write_iter     = generic_file_write_iter,
        .mmap           = generic_file_mmap,
        .fsync          = noop_fsync,
index f6ab41b39612bd4cd04bd49921095443e54150e3..0b38befa69f356b03b34175f9f629feaf4c8d0fa 100644 (file)
@@ -44,9 +44,7 @@ const struct file_operations ramfs_file_operations = {
        .mmap_capabilities      = ramfs_mmap_capabilities,
        .mmap                   = ramfs_nommu_mmap,
        .get_unmapped_area      = ramfs_nommu_get_unmapped_area,
-       .read                   = new_sync_read,
        .read_iter              = generic_file_read_iter,
-       .write                  = new_sync_write,
        .write_iter             = generic_file_write_iter,
        .fsync                  = noop_fsync,
        .splice_read            = generic_file_splice_read,
index 8e1b68786d663d4be5551efcd7b0bf7d5ed8b192..819ef3faf1bb710678175de06a13f4dcf6e90d62 100644 (file)
@@ -9,7 +9,6 @@
 #include <linux/fcntl.h>
 #include <linux/file.h>
 #include <linux/uio.h>
-#include <linux/aio.h>
 #include <linux/fsnotify.h>
 #include <linux/security.h>
 #include <linux/export.h>
 #include <asm/unistd.h>
 
 typedef ssize_t (*io_fn_t)(struct file *, char __user *, size_t, loff_t *);
-typedef ssize_t (*iov_fn_t)(struct kiocb *, const struct iovec *,
-               unsigned long, loff_t);
 typedef ssize_t (*iter_fn_t)(struct kiocb *, struct iov_iter *);
 
 const struct file_operations generic_ro_fops = {
        .llseek         = generic_file_llseek,
-       .read           = new_sync_read,
        .read_iter      = generic_file_read_iter,
        .mmap           = generic_file_readonly_mmap,
        .splice_read    = generic_file_splice_read,
@@ -343,13 +339,10 @@ ssize_t vfs_iter_read(struct file *file, struct iov_iter *iter, loff_t *ppos)
 
        init_sync_kiocb(&kiocb, file);
        kiocb.ki_pos = *ppos;
-       kiocb.ki_nbytes = iov_iter_count(iter);
 
        iter->type |= READ;
        ret = file->f_op->read_iter(&kiocb, iter);
-       if (ret == -EIOCBQUEUED)
-               ret = wait_on_sync_kiocb(&kiocb);
-
+       BUG_ON(ret == -EIOCBQUEUED);
        if (ret > 0)
                *ppos = kiocb.ki_pos;
        return ret;
@@ -366,13 +359,10 @@ ssize_t vfs_iter_write(struct file *file, struct iov_iter *iter, loff_t *ppos)
 
        init_sync_kiocb(&kiocb, file);
        kiocb.ki_pos = *ppos;
-       kiocb.ki_nbytes = iov_iter_count(iter);
 
        iter->type |= WRITE;
        ret = file->f_op->write_iter(&kiocb, iter);
-       if (ret == -EIOCBQUEUED)
-               ret = wait_on_sync_kiocb(&kiocb);
-
+       BUG_ON(ret == -EIOCBQUEUED);
        if (ret > 0)
                *ppos = kiocb.ki_pos;
        return ret;
@@ -418,26 +408,7 @@ int rw_verify_area(int read_write, struct file *file, const loff_t *ppos, size_t
        return count > MAX_RW_COUNT ? MAX_RW_COUNT : count;
 }
 
-ssize_t do_sync_read(struct file *filp, char __user *buf, size_t len, loff_t *ppos)
-{
-       struct iovec iov = { .iov_base = buf, .iov_len = len };
-       struct kiocb kiocb;
-       ssize_t ret;
-
-       init_sync_kiocb(&kiocb, filp);
-       kiocb.ki_pos = *ppos;
-       kiocb.ki_nbytes = len;
-
-       ret = filp->f_op->aio_read(&kiocb, &iov, 1, kiocb.ki_pos);
-       if (-EIOCBQUEUED == ret)
-               ret = wait_on_sync_kiocb(&kiocb);
-       *ppos = kiocb.ki_pos;
-       return ret;
-}
-
-EXPORT_SYMBOL(do_sync_read);
-
-ssize_t new_sync_read(struct file *filp, char __user *buf, size_t len, loff_t *ppos)
+static ssize_t new_sync_read(struct file *filp, char __user *buf, size_t len, loff_t *ppos)
 {
        struct iovec iov = { .iov_base = buf, .iov_len = len };
        struct kiocb kiocb;
@@ -446,34 +417,25 @@ ssize_t new_sync_read(struct file *filp, char __user *buf, size_t len, loff_t *p
 
        init_sync_kiocb(&kiocb, filp);
        kiocb.ki_pos = *ppos;
-       kiocb.ki_nbytes = len;
        iov_iter_init(&iter, READ, &iov, 1, len);
 
        ret = filp->f_op->read_iter(&kiocb, &iter);
-       if (-EIOCBQUEUED == ret)
-               ret = wait_on_sync_kiocb(&kiocb);
+       BUG_ON(ret == -EIOCBQUEUED);
        *ppos = kiocb.ki_pos;
        return ret;
 }
 
-EXPORT_SYMBOL(new_sync_read);
-
 ssize_t __vfs_read(struct file *file, char __user *buf, size_t count,
                   loff_t *pos)
 {
-       ssize_t ret;
-
        if (file->f_op->read)
-               ret = file->f_op->read(file, buf, count, pos);
-       else if (file->f_op->aio_read)
-               ret = do_sync_read(file, buf, count, pos);
+               return file->f_op->read(file, buf, count, pos);
        else if (file->f_op->read_iter)
-               ret = new_sync_read(file, buf, count, pos);
+               return new_sync_read(file, buf, count, pos);
        else
-               ret = -EINVAL;
-
-       return ret;
+               return -EINVAL;
 }
+EXPORT_SYMBOL(__vfs_read);
 
 ssize_t vfs_read(struct file *file, char __user *buf, size_t count, loff_t *pos)
 {
@@ -502,26 +464,7 @@ ssize_t vfs_read(struct file *file, char __user *buf, size_t count, loff_t *pos)
 
 EXPORT_SYMBOL(vfs_read);
 
-ssize_t do_sync_write(struct file *filp, const char __user *buf, size_t len, loff_t *ppos)
-{
-       struct iovec iov = { .iov_base = (void __user *)buf, .iov_len = len };
-       struct kiocb kiocb;
-       ssize_t ret;
-
-       init_sync_kiocb(&kiocb, filp);
-       kiocb.ki_pos = *ppos;
-       kiocb.ki_nbytes = len;
-
-       ret = filp->f_op->aio_write(&kiocb, &iov, 1, kiocb.ki_pos);
-       if (-EIOCBQUEUED == ret)
-               ret = wait_on_sync_kiocb(&kiocb);
-       *ppos = kiocb.ki_pos;
-       return ret;
-}
-
-EXPORT_SYMBOL(do_sync_write);
-
-ssize_t new_sync_write(struct file *filp, const char __user *buf, size_t len, loff_t *ppos)
+static ssize_t new_sync_write(struct file *filp, const char __user *buf, size_t len, loff_t *ppos)
 {
        struct iovec iov = { .iov_base = (void __user *)buf, .iov_len = len };
        struct kiocb kiocb;
@@ -530,17 +473,26 @@ ssize_t new_sync_write(struct file *filp, const char __user *buf, size_t len, lo
 
        init_sync_kiocb(&kiocb, filp);
        kiocb.ki_pos = *ppos;
-       kiocb.ki_nbytes = len;
        iov_iter_init(&iter, WRITE, &iov, 1, len);
 
        ret = filp->f_op->write_iter(&kiocb, &iter);
-       if (-EIOCBQUEUED == ret)
-               ret = wait_on_sync_kiocb(&kiocb);
-       *ppos = kiocb.ki_pos;
+       BUG_ON(ret == -EIOCBQUEUED);
+       if (ret > 0)
+               *ppos = kiocb.ki_pos;
        return ret;
 }
 
-EXPORT_SYMBOL(new_sync_write);
+ssize_t __vfs_write(struct file *file, const char __user *p, size_t count,
+                   loff_t *pos)
+{
+       if (file->f_op->write)
+               return file->f_op->write(file, p, count, pos);
+       else if (file->f_op->write_iter)
+               return new_sync_write(file, p, count, pos);
+       else
+               return -EINVAL;
+}
+EXPORT_SYMBOL(__vfs_write);
 
 ssize_t __kernel_write(struct file *file, const char *buf, size_t count, loff_t *pos)
 {
@@ -556,12 +508,7 @@ ssize_t __kernel_write(struct file *file, const char *buf, size_t count, loff_t
        p = (__force const char __user *)buf;
        if (count > MAX_RW_COUNT)
                count =  MAX_RW_COUNT;
-       if (file->f_op->write)
-               ret = file->f_op->write(file, p, count, pos);
-       else if (file->f_op->aio_write)
-               ret = do_sync_write(file, p, count, pos);
-       else
-               ret = new_sync_write(file, p, count, pos);
+       ret = __vfs_write(file, p, count, pos);
        set_fs(old_fs);
        if (ret > 0) {
                fsnotify_modify(file);
@@ -588,12 +535,7 @@ ssize_t vfs_write(struct file *file, const char __user *buf, size_t count, loff_
        if (ret >= 0) {
                count = ret;
                file_start_write(file);
-               if (file->f_op->write)
-                       ret = file->f_op->write(file, buf, count, pos);
-               else if (file->f_op->aio_write)
-                       ret = do_sync_write(file, buf, count, pos);
-               else
-                       ret = new_sync_write(file, buf, count, pos);
+               ret = __vfs_write(file, buf, count, pos);
                if (ret > 0) {
                        fsnotify_modify(file);
                        add_wchar(current, ret);
@@ -710,60 +652,32 @@ unsigned long iov_shorten(struct iovec *iov, unsigned long nr_segs, size_t to)
 }
 EXPORT_SYMBOL(iov_shorten);
 
-static ssize_t do_iter_readv_writev(struct file *filp, int rw, const struct iovec *iov,
-               unsigned long nr_segs, size_t len, loff_t *ppos, iter_fn_t fn)
-{
-       struct kiocb kiocb;
-       struct iov_iter iter;
-       ssize_t ret;
-
-       init_sync_kiocb(&kiocb, filp);
-       kiocb.ki_pos = *ppos;
-       kiocb.ki_nbytes = len;
-
-       iov_iter_init(&iter, rw, iov, nr_segs, len);
-       ret = fn(&kiocb, &iter);
-       if (ret == -EIOCBQUEUED)
-               ret = wait_on_sync_kiocb(&kiocb);
-       *ppos = kiocb.ki_pos;
-       return ret;
-}
-
-static ssize_t do_sync_readv_writev(struct file *filp, const struct iovec *iov,
-               unsigned long nr_segs, size_t len, loff_t *ppos, iov_fn_t fn)
+static ssize_t do_iter_readv_writev(struct file *filp, struct iov_iter *iter,
+               loff_t *ppos, iter_fn_t fn)
 {
        struct kiocb kiocb;
        ssize_t ret;
 
        init_sync_kiocb(&kiocb, filp);
        kiocb.ki_pos = *ppos;
-       kiocb.ki_nbytes = len;
 
-       ret = fn(&kiocb, iov, nr_segs, kiocb.ki_pos);
-       if (ret == -EIOCBQUEUED)
-               ret = wait_on_sync_kiocb(&kiocb);
+       ret = fn(&kiocb, iter);
+       BUG_ON(ret == -EIOCBQUEUED);
        *ppos = kiocb.ki_pos;
        return ret;
 }
 
 /* Do it by hand, with file-ops */
-static ssize_t do_loop_readv_writev(struct file *filp, struct iovec *iov,
-               unsigned long nr_segs, loff_t *ppos, io_fn_t fn)
+static ssize_t do_loop_readv_writev(struct file *filp, struct iov_iter *iter,
+               loff_t *ppos, io_fn_t fn)
 {
-       struct iovec *vector = iov;
        ssize_t ret = 0;
 
-       while (nr_segs > 0) {
-               void __user *base;
-               size_t len;
+       while (iov_iter_count(iter)) {
+               struct iovec iovec = iov_iter_iovec(iter);
                ssize_t nr;
 
-               base = vector->iov_base;
-               len = vector->iov_len;
-               vector++;
-               nr_segs--;
-
-               nr = fn(filp, base, len, ppos);
+               nr = fn(filp, iovec.iov_base, iovec.iov_len, ppos);
 
                if (nr < 0) {
                        if (!ret)
@@ -771,8 +685,9 @@ static ssize_t do_loop_readv_writev(struct file *filp, struct iovec *iov,
                        break;
                }
                ret += nr;
-               if (nr != len)
+               if (nr != iovec.iov_len)
                        break;
+               iov_iter_advance(iter, nr);
        }
 
        return ret;
@@ -863,48 +778,42 @@ static ssize_t do_readv_writev(int type, struct file *file,
        size_t tot_len;
        struct iovec iovstack[UIO_FASTIOV];
        struct iovec *iov = iovstack;
+       struct iov_iter iter;
        ssize_t ret;
        io_fn_t fn;
-       iov_fn_t fnv;
        iter_fn_t iter_fn;
 
-       ret = rw_copy_check_uvector(type, uvector, nr_segs,
-                                   ARRAY_SIZE(iovstack), iovstack, &iov);
-       if (ret <= 0)
-               goto out;
+       ret = import_iovec(type, uvector, nr_segs,
+                          ARRAY_SIZE(iovstack), &iov, &iter);
+       if (ret < 0)
+               return ret;
 
-       tot_len = ret;
+       tot_len = iov_iter_count(&iter);
+       if (!tot_len)
+               goto out;
        ret = rw_verify_area(type, file, pos, tot_len);
        if (ret < 0)
                goto out;
 
-       fnv = NULL;
        if (type == READ) {
                fn = file->f_op->read;
-               fnv = file->f_op->aio_read;
                iter_fn = file->f_op->read_iter;
        } else {
                fn = (io_fn_t)file->f_op->write;
-               fnv = file->f_op->aio_write;
                iter_fn = file->f_op->write_iter;
                file_start_write(file);
        }
 
        if (iter_fn)
-               ret = do_iter_readv_writev(file, type, iov, nr_segs, tot_len,
-                                               pos, iter_fn);
-       else if (fnv)
-               ret = do_sync_readv_writev(file, iov, nr_segs, tot_len,
-                                               pos, fnv);
+               ret = do_iter_readv_writev(file, &iter, pos, iter_fn);
        else
-               ret = do_loop_readv_writev(file, iov, nr_segs, pos, fn);
+               ret = do_loop_readv_writev(file, &iter, pos, fn);
 
        if (type != READ)
                file_end_write(file);
 
 out:
-       if (iov != iovstack)
-               kfree(iov);
+       kfree(iov);
        if ((ret + (type == READ)) > 0) {
                if (type == READ)
                        fsnotify_access(file);
@@ -1043,48 +952,42 @@ static ssize_t compat_do_readv_writev(int type, struct file *file,
        compat_ssize_t tot_len;
        struct iovec iovstack[UIO_FASTIOV];
        struct iovec *iov = iovstack;
+       struct iov_iter iter;
        ssize_t ret;
        io_fn_t fn;
-       iov_fn_t fnv;
        iter_fn_t iter_fn;
 
-       ret = compat_rw_copy_check_uvector(type, uvector, nr_segs,
-                                              UIO_FASTIOV, iovstack, &iov);
-       if (ret <= 0)
-               goto out;
+       ret = compat_import_iovec(type, uvector, nr_segs,
+                                 UIO_FASTIOV, &iov, &iter);
+       if (ret < 0)
+               return ret;
 
-       tot_len = ret;
+       tot_len = iov_iter_count(&iter);
+       if (!tot_len)
+               goto out;
        ret = rw_verify_area(type, file, pos, tot_len);
        if (ret < 0)
                goto out;
 
-       fnv = NULL;
        if (type == READ) {
                fn = file->f_op->read;
-               fnv = file->f_op->aio_read;
                iter_fn = file->f_op->read_iter;
        } else {
                fn = (io_fn_t)file->f_op->write;
-               fnv = file->f_op->aio_write;
                iter_fn = file->f_op->write_iter;
                file_start_write(file);
        }
 
        if (iter_fn)
-               ret = do_iter_readv_writev(file, type, iov, nr_segs, tot_len,
-                                               pos, iter_fn);
-       else if (fnv)
-               ret = do_sync_readv_writev(file, iov, nr_segs, tot_len,
-                                               pos, fnv);
+               ret = do_iter_readv_writev(file, &iter, pos, iter_fn);
        else
-               ret = do_loop_readv_writev(file, iov, nr_segs, pos, fn);
+               ret = do_loop_readv_writev(file, &iter, pos, fn);
 
        if (type != READ)
                file_end_write(file);
 
 out:
-       if (iov != iovstack)
-               kfree(iov);
+       kfree(iov);
        if ((ret + (type == READ)) > 0) {
                if (type == READ)
                        fsnotify_access(file);
index 751dd3f4346b5dab98d7b4861cd24a617c148bae..96a1bcf33db4435098baa153f807a71274543aaf 100644 (file)
@@ -243,8 +243,6 @@ drop_write_lock:
 }
 
 const struct file_operations reiserfs_file_operations = {
-       .read = new_sync_read,
-       .write = new_sync_write,
        .unlocked_ioctl = reiserfs_ioctl,
 #ifdef CONFIG_COMPAT
        .compat_ioctl = reiserfs_compat_ioctl,
index e72401e1f9956238064c91805279233a721bffe1..742242b60972671c50926f9a0dd0db48c4e7aeca 100644 (file)
@@ -18,7 +18,7 @@
 #include <linux/writeback.h>
 #include <linux/quotaops.h>
 #include <linux/swap.h>
-#include <linux/aio.h>
+#include <linux/uio.h>
 
 int reiserfs_commit_write(struct file *f, struct page *page,
                          unsigned from, unsigned to);
@@ -3278,22 +3278,22 @@ static int reiserfs_releasepage(struct page *page, gfp_t unused_gfp_flags)
  * We thank Mingming Cao for helping us understand in great detail what
  * to do in this section of the code.
  */
-static ssize_t reiserfs_direct_IO(int rw, struct kiocb *iocb,
-                                 struct iov_iter *iter, loff_t offset)
+static ssize_t reiserfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
+                                 loff_t offset)
 {
        struct file *file = iocb->ki_filp;
        struct inode *inode = file->f_mapping->host;
        size_t count = iov_iter_count(iter);
        ssize_t ret;
 
-       ret = blockdev_direct_IO(rw, iocb, inode, iter, offset,
+       ret = blockdev_direct_IO(iocb, inode, iter, offset,
                                 reiserfs_get_blocks_direct_io);
 
        /*
         * In case of error extending write may have instantiated a few
         * blocks outside i_size. Trim these off again.
         */
-       if (unlikely((rw & WRITE) && ret < 0)) {
+       if (unlikely(iov_iter_rw(iter) == WRITE && ret < 0)) {
                loff_t isize = i_size_read(inode);
                loff_t end = offset + count;
 
index 7da9e2153953eb31399bf3909a9ef881328ddf90..1118a0dc6b45f0292cfdf34970d35c6dfc2e6ba8 100644 (file)
@@ -81,7 +81,6 @@ static unsigned romfs_mmap_capabilities(struct file *file)
 
 const struct file_operations romfs_ro_fops = {
        .llseek                 = generic_file_llseek,
-       .read                   = new_sync_read,
        .read_iter              = generic_file_read_iter,
        .splice_read            = generic_file_splice_read,
        .mmap                   = romfs_mmap,
index 7968da96bebbb5d1cd087cbfa2ece65c09cc8b4a..41cbb16299e0949984eb284887c22f77fff0390f 100644 (file)
@@ -32,7 +32,6 @@
 #include <linux/gfp.h>
 #include <linux/socket.h>
 #include <linux/compat.h>
-#include <linux/aio.h>
 #include "internal.h"
 
 /*
@@ -1534,34 +1533,29 @@ static long vmsplice_to_user(struct file *file, const struct iovec __user *uiov,
        struct iovec iovstack[UIO_FASTIOV];
        struct iovec *iov = iovstack;
        struct iov_iter iter;
-       ssize_t count;
 
        pipe = get_pipe_info(file);
        if (!pipe)
                return -EBADF;
 
-       ret = rw_copy_check_uvector(READ, uiov, nr_segs,
-                                   ARRAY_SIZE(iovstack), iovstack, &iov);
-       if (ret <= 0)
-               goto out;
-
-       count = ret;
-       iov_iter_init(&iter, READ, iov, nr_segs, count);
+       ret = import_iovec(READ, uiov, nr_segs,
+                          ARRAY_SIZE(iovstack), &iov, &iter);
+       if (ret < 0)
+               return ret;
 
+       sd.total_len = iov_iter_count(&iter);
        sd.len = 0;
-       sd.total_len = count;
        sd.flags = flags;
        sd.u.data = &iter;
        sd.pos = 0;
 
-       pipe_lock(pipe);
-       ret = __splice_from_pipe(pipe, &sd, pipe_to_user);
-       pipe_unlock(pipe);
-
-out:
-       if (iov != iovstack)
-               kfree(iov);
+       if (sd.total_len) {
+               pipe_lock(pipe);
+               ret = __splice_from_pipe(pipe, &sd, pipe_to_user);
+               pipe_unlock(pipe);
+       }
 
+       kfree(iov);
        return ret;
 }
 
index ae0c3cef9927e64fb1f21ccf1848155825fc79ef..19636af5e75cc16614f790519c6111599d906830 100644 (file)
--- a/fs/stat.c
+++ b/fs/stat.c
@@ -66,7 +66,7 @@ int vfs_getattr(struct path *path, struct kstat *stat)
 {
        int retval;
 
-       retval = security_inode_getattr(path->mnt, path->dentry);
+       retval = security_inode_getattr(path);
        if (retval)
                return retval;
        return vfs_getattr_nosec(path, stat);
index b00811c75b24f63acb1651991f6972f4b0b6ff54..a48e30410ad1979eaf1068cd59ddfa2dcc89c69a 100644 (file)
@@ -21,9 +21,7 @@
  */
 const struct file_operations sysv_file_operations = {
        .llseek         = generic_file_llseek,
-       .read           = new_sync_read,
        .read_iter      = generic_file_read_iter,
-       .write          = new_sync_write,
        .write_iter     = generic_file_write_iter,
        .mmap           = generic_file_mmap,
        .fsync          = generic_file_fsync,
index e627c0acf6264f6aabc4b2777ab79214e9c32e64..475b15635f11654f40ddbc23dfc0f76aeb2a1099 100644 (file)
@@ -50,7 +50,6 @@
  */
 
 #include "ubifs.h"
-#include <linux/aio.h>
 #include <linux/mount.h>
 #include <linux/namei.h>
 #include <linux/slab.h>
@@ -1581,8 +1580,6 @@ const struct inode_operations ubifs_symlink_inode_operations = {
 
 const struct file_operations ubifs_file_operations = {
        .llseek         = generic_file_llseek,
-       .read           = new_sync_read,
-       .write          = new_sync_write,
        .read_iter      = generic_file_read_iter,
        .write_iter     = ubifs_write_iter,
        .mmap           = ubifs_file_mmap,
index 08f3555fbeac3f6ceeda033cb8f6ec82557623d0..ccab8b78e3633ea9167afed946a124adc3a106ee 100644 (file)
@@ -34,7 +34,7 @@
 #include <linux/errno.h>
 #include <linux/pagemap.h>
 #include <linux/buffer_head.h>
-#include <linux/aio.h>
+#include <linux/uio.h>
 
 #include "udf_i.h"
 #include "udf_sb.h"
@@ -100,8 +100,7 @@ static int udf_adinicb_write_begin(struct file *file,
        return 0;
 }
 
-static ssize_t udf_adinicb_direct_IO(int rw, struct kiocb *iocb,
-                                    struct iov_iter *iter,
+static ssize_t udf_adinicb_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
                                     loff_t offset)
 {
        /* Fallback to buffered I/O. */
@@ -121,21 +120,27 @@ static ssize_t udf_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
        ssize_t retval;
        struct file *file = iocb->ki_filp;
        struct inode *inode = file_inode(file);
-       int err, pos;
-       size_t count = iocb->ki_nbytes;
+       size_t count = iov_iter_count(from);
        struct udf_inode_info *iinfo = UDF_I(inode);
+       int err;
 
        mutex_lock(&inode->i_mutex);
+
+       retval = generic_write_checks(file, &iocb->ki_pos, &count);
+       if (retval)
+               goto out;
+
+       if (count == 0)
+               goto out;
+
+       iov_iter_truncate(from, count);
+
        down_write(&iinfo->i_data_sem);
        if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) {
-               if (file->f_flags & O_APPEND)
-                       pos = inode->i_size;
-               else
-                       pos = iocb->ki_pos;
+               loff_t end = iocb->ki_pos + iov_iter_count(from);
 
                if (inode->i_sb->s_blocksize <
-                               (udf_file_entry_alloc_offset(inode) +
-                                               pos + count)) {
+                               (udf_file_entry_alloc_offset(inode) + end)) {
                        err = udf_expand_file_adinicb(inode);
                        if (err) {
                                mutex_unlock(&inode->i_mutex);
@@ -143,16 +148,14 @@ static ssize_t udf_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
                                return err;
                        }
                } else {
-                       if (pos + count > inode->i_size)
-                               iinfo->i_lenAlloc = pos + count;
-                       else
-                               iinfo->i_lenAlloc = inode->i_size;
+                       iinfo->i_lenAlloc = max(end, inode->i_size);
                        up_write(&iinfo->i_data_sem);
                }
        } else
                up_write(&iinfo->i_data_sem);
 
        retval = __generic_file_write_iter(iocb, from);
+out:
        mutex_unlock(&inode->i_mutex);
 
        if (retval > 0) {
@@ -240,12 +243,10 @@ static int udf_release_file(struct inode *inode, struct file *filp)
 }
 
 const struct file_operations udf_file_operations = {
-       .read                   = new_sync_read,
        .read_iter              = generic_file_read_iter,
        .unlocked_ioctl         = udf_ioctl,
        .open                   = generic_file_open,
        .mmap                   = generic_file_mmap,
-       .write                  = new_sync_write,
        .write_iter             = udf_file_write_iter,
        .release                = udf_release_file,
        .fsync                  = generic_file_fsync,
index a445d599098d7ad1ccace2a81a86a0bc563af391..4f178c83b04f17edf98a9fbbc582a2bb42a39a39 100644 (file)
@@ -38,7 +38,7 @@
 #include <linux/slab.h>
 #include <linux/crc-itu-t.h>
 #include <linux/mpage.h>
-#include <linux/aio.h>
+#include <linux/uio.h>
 
 #include "udf_i.h"
 #include "udf_sb.h"
@@ -215,8 +215,7 @@ static int udf_write_begin(struct file *file, struct address_space *mapping,
        return ret;
 }
 
-static ssize_t udf_direct_IO(int rw, struct kiocb *iocb,
-                            struct iov_iter *iter,
+static ssize_t udf_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
                             loff_t offset)
 {
        struct file *file = iocb->ki_filp;
@@ -225,8 +224,8 @@ static ssize_t udf_direct_IO(int rw, struct kiocb *iocb,
        size_t count = iov_iter_count(iter);
        ssize_t ret;
 
-       ret = blockdev_direct_IO(rw, iocb, inode, iter, offset, udf_get_block);
-       if (unlikely(ret < 0 && (rw & WRITE)))
+       ret = blockdev_direct_IO(iocb, inode, iter, offset, udf_get_block);
+       if (unlikely(ret < 0 && iov_iter_rw(iter) == WRITE))
                udf_write_failed(mapping, offset + count);
        return ret;
 }
index c84ec010a6761ff683e732973bd8780d989e6531..042ddbf110ccf65254c82bcb834c63916c941d81 100644 (file)
@@ -35,9 +35,7 @@
  
 const struct file_operations ufs_file_operations = {
        .llseek         = generic_file_llseek,
-       .read           = new_sync_read,
        .read_iter      = generic_file_read_iter,
-       .write          = new_sync_write,
        .write_iter     = generic_file_write_iter,
        .mmap           = generic_file_mmap,
        .open           = generic_file_open,
index 3a9b7a1b8704be66439ea797dd2183c035a929e5..1d8eef9cf0f509ac91f86f97ae7fe608e44bc849 100644 (file)
@@ -31,7 +31,6 @@
 #include "xfs_bmap.h"
 #include "xfs_bmap_util.h"
 #include "xfs_bmap_btree.h"
-#include <linux/aio.h>
 #include <linux/gfp.h>
 #include <linux/mpage.h>
 #include <linux/pagevec.h>
@@ -1496,7 +1495,6 @@ xfs_end_io_direct_write(
 
 STATIC ssize_t
 xfs_vm_direct_IO(
-       int                     rw,
        struct kiocb            *iocb,
        struct iov_iter         *iter,
        loff_t                  offset)
@@ -1504,15 +1502,14 @@ xfs_vm_direct_IO(
        struct inode            *inode = iocb->ki_filp->f_mapping->host;
        struct block_device     *bdev = xfs_find_bdev_for_inode(inode);
 
-       if (rw & WRITE) {
-               return __blockdev_direct_IO(rw, iocb, inode, bdev, iter,
-                                           offset, xfs_get_blocks_direct,
+       if (iov_iter_rw(iter) == WRITE) {
+               return __blockdev_direct_IO(iocb, inode, bdev, iter, offset,
+                                           xfs_get_blocks_direct,
                                            xfs_end_io_direct_write, NULL,
                                            DIO_ASYNC_EXTEND);
        }
-       return __blockdev_direct_IO(rw, iocb, inode, bdev, iter,
-                                   offset, xfs_get_blocks_direct,
-                                   NULL, NULL, 0);
+       return __blockdev_direct_IO(iocb, inode, bdev, iter, offset,
+                                   xfs_get_blocks_direct, NULL, NULL, 0);
 }
 
 /*
index a2e1cb8a568bf9d45e32c43539a2e6f8b56d83f4..ebde43e15dd9402bfa58f035ebde222d727a3e57 100644 (file)
@@ -38,7 +38,6 @@
 #include "xfs_icache.h"
 #include "xfs_pnfs.h"
 
-#include <linux/aio.h>
 #include <linux/dcache.h>
 #include <linux/falloc.h>
 #include <linux/pagevec.h>
@@ -545,17 +544,18 @@ xfs_zero_eof(
  */
 STATIC ssize_t
 xfs_file_aio_write_checks(
-       struct file             *file,
-       loff_t                  *pos,
-       size_t                  *count,
+       struct kiocb            *iocb,
+       struct iov_iter         *from,
        int                     *iolock)
 {
+       struct file             *file = iocb->ki_filp;
        struct inode            *inode = file->f_mapping->host;
        struct xfs_inode        *ip = XFS_I(inode);
        int                     error = 0;
+       size_t                  count = iov_iter_count(from);
 
 restart:
-       error = generic_write_checks(file, pos, count, S_ISBLK(inode->i_mode));
+       error = generic_write_checks(file, &iocb->ki_pos, &count);
        if (error)
                return error;
 
@@ -570,7 +570,7 @@ restart:
         * iolock shared, we need to update it to exclusive which implies
         * having to redo all checks before.
         */
-       if (*pos > i_size_read(inode)) {
+       if (iocb->ki_pos > i_size_read(inode)) {
                bool    zero = false;
 
                if (*iolock == XFS_IOLOCK_SHARED) {
@@ -579,10 +579,11 @@ restart:
                        xfs_rw_ilock(ip, *iolock);
                        goto restart;
                }
-               error = xfs_zero_eof(ip, *pos, i_size_read(inode), &zero);
+               error = xfs_zero_eof(ip, iocb->ki_pos, i_size_read(inode), &zero);
                if (error)
                        return error;
        }
+       iov_iter_truncate(from, count);
 
        /*
         * Updating the timestamps will grab the ilock again from
@@ -679,10 +680,11 @@ xfs_file_dio_aio_write(
                xfs_rw_ilock(ip, iolock);
        }
 
-       ret = xfs_file_aio_write_checks(file, &pos, &count, &iolock);
+       ret = xfs_file_aio_write_checks(iocb, from, &iolock);
        if (ret)
                goto out;
-       iov_iter_truncate(from, count);
+       count = iov_iter_count(from);
+       pos = iocb->ki_pos;
 
        if (mapping->nrpages) {
                ret = filemap_write_and_wait_range(VFS_I(ip)->i_mapping,
@@ -735,24 +737,22 @@ xfs_file_buffered_aio_write(
        ssize_t                 ret;
        int                     enospc = 0;
        int                     iolock = XFS_IOLOCK_EXCL;
-       loff_t                  pos = iocb->ki_pos;
-       size_t                  count = iov_iter_count(from);
 
        xfs_rw_ilock(ip, iolock);
 
-       ret = xfs_file_aio_write_checks(file, &pos, &count, &iolock);
+       ret = xfs_file_aio_write_checks(iocb, from, &iolock);
        if (ret)
                goto out;
 
-       iov_iter_truncate(from, count);
        /* We can write back this queue in page reclaim */
        current->backing_dev_info = inode_to_bdi(inode);
 
 write_retry:
-       trace_xfs_file_buffered_write(ip, count, iocb->ki_pos, 0);
-       ret = generic_perform_write(file, from, pos);
+       trace_xfs_file_buffered_write(ip, iov_iter_count(from),
+                                     iocb->ki_pos, 0);
+       ret = generic_perform_write(file, from, iocb->ki_pos);
        if (likely(ret >= 0))
-               iocb->ki_pos = pos + ret;
+               iocb->ki_pos += ret;
 
        /*
         * If we hit a space limit, try to free up some lingering preallocated
@@ -1387,8 +1387,6 @@ xfs_file_llseek(
 
 const struct file_operations xfs_file_operations = {
        .llseek         = xfs_file_llseek,
-       .read           = new_sync_read,
-       .write          = new_sync_write,
        .read_iter      = xfs_file_read_iter,
        .write_iter     = xfs_file_write_iter,
        .splice_read    = xfs_file_splice_read,
index 178525e5f430ba99f70a8f8250937501b46a650d..018afb264ac261ea5dea2f3afa547cf9a8f7a202 100644 (file)
@@ -58,8 +58,9 @@ struct af_alg_type {
 };
 
 struct af_alg_sgl {
-       struct scatterlist sg[ALG_MAX_PAGES];
+       struct scatterlist sg[ALG_MAX_PAGES + 1];
        struct page *pages[ALG_MAX_PAGES];
+       unsigned int npages;
 };
 
 int af_alg_register_type(const struct af_alg_type *type);
@@ -70,6 +71,7 @@ int af_alg_accept(struct sock *sk, struct socket *newsock);
 
 int af_alg_make_sg(struct af_alg_sgl *sgl, struct iov_iter *iter, int len);
 void af_alg_free_sg(struct af_alg_sgl *sgl);
+void af_alg_link_sg(struct af_alg_sgl *sgl_prev, struct af_alg_sgl *sgl_new);
 
 int af_alg_cmsg_send(struct msghdr *msg, struct af_alg_control *con);
 
index d9c92daa3944e43a13f285a7baaa1443868cce3d..9eb42dbc5582ace99283629f0905861ac820c7d5 100644 (file)
@@ -1,86 +1,23 @@
 #ifndef __LINUX__AIO_H
 #define __LINUX__AIO_H
 
-#include <linux/list.h>
-#include <linux/workqueue.h>
 #include <linux/aio_abi.h>
-#include <linux/uio.h>
-#include <linux/rcupdate.h>
-
-#include <linux/atomic.h>
 
 struct kioctx;
 struct kiocb;
+struct mm_struct;
 
 #define KIOCB_KEY              0
 
-/*
- * We use ki_cancel == KIOCB_CANCELLED to indicate that a kiocb has been either
- * cancelled or completed (this makes a certain amount of sense because
- * successful cancellation - io_cancel() - does deliver the completion to
- * userspace).
- *
- * And since most things don't implement kiocb cancellation and we'd really like
- * kiocb completion to be lockless when possible, we use ki_cancel to
- * synchronize cancellation and completion - we only set it to KIOCB_CANCELLED
- * with xchg() or cmpxchg(), see batch_complete_aio() and kiocb_cancel().
- */
-#define KIOCB_CANCELLED                ((void *) (~0ULL))
-
 typedef int (kiocb_cancel_fn)(struct kiocb *);
 
-struct kiocb {
-       struct file             *ki_filp;
-       struct kioctx           *ki_ctx;        /* NULL for sync ops */
-       kiocb_cancel_fn         *ki_cancel;
-       void                    *private;
-
-       union {
-               void __user             *user;
-               struct task_struct      *tsk;
-       } ki_obj;
-
-       __u64                   ki_user_data;   /* user's data for completion */
-       loff_t                  ki_pos;
-       size_t                  ki_nbytes;      /* copy of iocb->aio_nbytes */
-
-       struct list_head        ki_list;        /* the aio core uses this
-                                                * for cancellation */
-
-       /*
-        * If the aio_resfd field of the userspace iocb is not zero,
-        * this is the underlying eventfd context to deliver events to.
-        */
-       struct eventfd_ctx      *ki_eventfd;
-};
-
-static inline bool is_sync_kiocb(struct kiocb *kiocb)
-{
-       return kiocb->ki_ctx == NULL;
-}
-
-static inline void init_sync_kiocb(struct kiocb *kiocb, struct file *filp)
-{
-       *kiocb = (struct kiocb) {
-                       .ki_ctx = NULL,
-                       .ki_filp = filp,
-                       .ki_obj.tsk = current,
-               };
-}
-
 /* prototypes */
 #ifdef CONFIG_AIO
-extern ssize_t wait_on_sync_kiocb(struct kiocb *iocb);
-extern void aio_complete(struct kiocb *iocb, long res, long res2);
-struct mm_struct;
 extern void exit_aio(struct mm_struct *mm);
 extern long do_io_submit(aio_context_t ctx_id, long nr,
                         struct iocb __user *__user *iocbpp, bool compat);
 void kiocb_set_cancel_fn(struct kiocb *req, kiocb_cancel_fn *cancel);
 #else
-static inline ssize_t wait_on_sync_kiocb(struct kiocb *iocb) { return 0; }
-static inline void aio_complete(struct kiocb *iocb, long res, long res2) { }
-struct mm_struct;
 static inline void exit_aio(struct mm_struct *mm) { }
 static inline long do_io_submit(aio_context_t ctx_id, long nr,
                                struct iocb __user * __user *iocbpp,
@@ -89,11 +26,6 @@ static inline void kiocb_set_cancel_fn(struct kiocb *req,
                                       kiocb_cancel_fn *cancel) { }
 #endif /* CONFIG_AIO */
 
-static inline struct kiocb *list_kiocb(struct list_head *h)
-{
-       return list_entry(h, struct kiocb, ki_list);
-}
-
 /* for sysctl: */
 extern unsigned long aio_nr;
 extern unsigned long aio_max_nr;
index 994739da827f26cb574e97411e92b82d3f23d340..e34f906647d39dce39985d1cfe836f42b689c556 100644 (file)
@@ -434,6 +434,27 @@ static inline struct bcma_device *bcma_find_core(struct bcma_bus *bus,
        return bcma_find_core_unit(bus, coreid, 0);
 }
 
+#ifdef CONFIG_BCMA_HOST_PCI
+extern void bcma_host_pci_up(struct bcma_bus *bus);
+extern void bcma_host_pci_down(struct bcma_bus *bus);
+extern int bcma_host_pci_irq_ctl(struct bcma_bus *bus,
+                                struct bcma_device *core, bool enable);
+#else
+static inline void bcma_host_pci_up(struct bcma_bus *bus)
+{
+}
+static inline void bcma_host_pci_down(struct bcma_bus *bus)
+{
+}
+static inline int bcma_host_pci_irq_ctl(struct bcma_bus *bus,
+                                       struct bcma_device *core, bool enable)
+{
+       if (bus->hosttype == BCMA_HOSTTYPE_PCI)
+               return -ENOTSUPP;
+       return 0;
+}
+#endif
+
 extern bool bcma_core_is_enabled(struct bcma_device *core);
 extern void bcma_core_disable(struct bcma_device *core, u32 flags);
 extern int bcma_core_enable(struct bcma_device *core, u32 flags);
index db6fa217f98bf3f1276ac659c61e50099da739e4..6cceedf65ca27d787f995980cf716de2d0a2be47 100644 (file)
@@ -663,14 +663,6 @@ struct bcma_drv_cc_b {
 #define bcma_cc_maskset32(cc, offset, mask, set) \
        bcma_cc_write32(cc, offset, (bcma_cc_read32(cc, offset) & (mask)) | (set))
 
-extern void bcma_core_chipcommon_init(struct bcma_drv_cc *cc);
-extern void bcma_core_chipcommon_early_init(struct bcma_drv_cc *cc);
-
-extern void bcma_chipco_suspend(struct bcma_drv_cc *cc);
-extern void bcma_chipco_resume(struct bcma_drv_cc *cc);
-
-void bcma_chipco_bcm4331_ext_pa_lines_ctl(struct bcma_drv_cc *cc, bool enable);
-
 extern u32 bcma_chipco_watchdog_timer_set(struct bcma_drv_cc *cc, u32 ticks);
 
 extern u32 bcma_chipco_get_alp_clock(struct bcma_drv_cc *cc);
@@ -690,9 +682,6 @@ u32 bcma_chipco_gpio_pullup(struct bcma_drv_cc *cc, u32 mask, u32 value);
 u32 bcma_chipco_gpio_pulldown(struct bcma_drv_cc *cc, u32 mask, u32 value);
 
 /* PMU support */
-extern void bcma_pmu_init(struct bcma_drv_cc *cc);
-extern void bcma_pmu_early_init(struct bcma_drv_cc *cc);
-
 extern void bcma_chipco_pll_write(struct bcma_drv_cc *cc, u32 offset,
                                  u32 value);
 extern void bcma_chipco_pll_maskset(struct bcma_drv_cc *cc, u32 offset,
index 4dd1f33e36a20accc10d1aa1f1d66cfd5203f49b..4354d4ea6713da3d1121cde7bd8a0c0325626243 100644 (file)
@@ -91,10 +91,4 @@ struct bcma_drv_gmac_cmn {
 #define gmac_cmn_write16(gc, offset, val)      bcma_write16((gc)->core, offset, val)
 #define gmac_cmn_write32(gc, offset, val)      bcma_write32((gc)->core, offset, val)
 
-#ifdef CONFIG_BCMA_DRIVER_GMAC_CMN
-extern void bcma_core_gmac_cmn_init(struct bcma_drv_gmac_cmn *gc);
-#else
-static inline void bcma_core_gmac_cmn_init(struct bcma_drv_gmac_cmn *gc) { }
-#endif
-
 #endif /* LINUX_BCMA_DRIVER_GMAC_CMN_H_ */
index 0b3b32aeeb8af8c76b9ed23ff0017ef79e92e635..8eea7f9e33b45d1665aed004f874d9b931ad2769 100644 (file)
@@ -39,21 +39,6 @@ struct bcma_drv_mips {
        u8 early_setup_done:1;
 };
 
-#ifdef CONFIG_BCMA_DRIVER_MIPS
-extern void bcma_core_mips_init(struct bcma_drv_mips *mcore);
-extern void bcma_core_mips_early_init(struct bcma_drv_mips *mcore);
-
-extern unsigned int bcma_core_mips_irq(struct bcma_device *dev);
-#else
-static inline void bcma_core_mips_init(struct bcma_drv_mips *mcore) { }
-static inline void bcma_core_mips_early_init(struct bcma_drv_mips *mcore) { }
-
-static inline unsigned int bcma_core_mips_irq(struct bcma_device *dev)
-{
-       return 0;
-}
-#endif
-
 extern u32 bcma_cpu_clock(struct bcma_drv_mips *mcore);
 
 #endif /* LINUX_BCMA_DRIVER_MIPS_H_ */
index 3f809ae372c4aa702cf13f350840fd80552ee1ca..5ba6918ca20bc9d3bc30993c21b7ef10a6c42243 100644 (file)
@@ -238,13 +238,13 @@ struct bcma_drv_pci {
 #define pcicore_write16(pc, offset, val)       bcma_write16((pc)->core, offset, val)
 #define pcicore_write32(pc, offset, val)       bcma_write32((pc)->core, offset, val)
 
-extern void bcma_core_pci_early_init(struct bcma_drv_pci *pc);
-extern void bcma_core_pci_init(struct bcma_drv_pci *pc);
-extern int bcma_core_pci_irq_ctl(struct bcma_drv_pci *pc,
-                                struct bcma_device *core, bool enable);
-extern void bcma_core_pci_up(struct bcma_bus *bus);
-extern void bcma_core_pci_down(struct bcma_bus *bus);
+#ifdef CONFIG_BCMA_DRIVER_PCI
 extern void bcma_core_pci_power_save(struct bcma_bus *bus, bool up);
+#else
+static inline void bcma_core_pci_power_save(struct bcma_bus *bus, bool up)
+{
+}
+#endif
 
 extern int bcma_core_pci_pcibios_map_irq(const struct pci_dev *dev);
 extern int bcma_core_pci_plat_dev_init(struct pci_dev *dev);
index 5988b05781c336d21c047339d8f7027bf3eaa068..31e6d17ab7985c4800110121ceca7209a38e4838 100644 (file)
 
 struct bcma_drv_pcie2 {
        struct bcma_device *core;
+
+       u16 reqsize;
 };
 
 #define pcie2_read16(pcie2, offset)            bcma_read16((pcie2)->core, offset)
@@ -153,6 +155,4 @@ struct bcma_drv_pcie2 {
 #define pcie2_set32(pcie2, offset, set)                bcma_set32((pcie2)->core, offset, set)
 #define pcie2_mask32(pcie2, offset, mask)      bcma_mask32((pcie2)->core, offset, mask)
 
-void bcma_core_pcie2_init(struct bcma_drv_pcie2 *pcie2);
-
 #endif /* LINUX_BCMA_DRIVER_PCIE2_H_ */
index bbfceb7564523bc5e17037e25663185aee02311e..d5cda067115aaaa0f2b00a41c5a3d8c68ad114b3 100644 (file)
@@ -32,23 +32,19 @@ struct bpf_map {
        u32 key_size;
        u32 value_size;
        u32 max_entries;
-       struct bpf_map_ops *ops;
+       const struct bpf_map_ops *ops;
        struct work_struct work;
 };
 
 struct bpf_map_type_list {
        struct list_head list_node;
-       struct bpf_map_ops *ops;
+       const struct bpf_map_ops *ops;
        enum bpf_map_type type;
 };
 
-void bpf_register_map_type(struct bpf_map_type_list *tl);
-void bpf_map_put(struct bpf_map *map);
-struct bpf_map *bpf_map_get(struct fd f);
-
 /* function argument constraints */
 enum bpf_arg_type {
-       ARG_ANYTHING = 0,       /* any argument is ok */
+       ARG_DONTCARE = 0,       /* unused argument in helper function */
 
        /* the following constraints used to prototype
         * bpf_map_lookup/update/delete_elem() functions
@@ -62,6 +58,9 @@ enum bpf_arg_type {
         */
        ARG_PTR_TO_STACK,       /* any pointer to eBPF program stack */
        ARG_CONST_STACK_SIZE,   /* number of bytes accessed from stack */
+
+       ARG_PTR_TO_CTX,         /* pointer to context */
+       ARG_ANYTHING,           /* any (initialized) argument is ok */
 };
 
 /* type of values returned from helper functions */
@@ -105,41 +104,61 @@ struct bpf_verifier_ops {
         * with 'type' (read or write) is allowed
         */
        bool (*is_valid_access)(int off, int size, enum bpf_access_type type);
+
+       u32 (*convert_ctx_access)(int dst_reg, int src_reg, int ctx_off,
+                                 struct bpf_insn *insn);
 };
 
 struct bpf_prog_type_list {
        struct list_head list_node;
-       struct bpf_verifier_ops *ops;
+       const struct bpf_verifier_ops *ops;
        enum bpf_prog_type type;
 };
 
-void bpf_register_prog_type(struct bpf_prog_type_list *tl);
-
 struct bpf_prog;
 
 struct bpf_prog_aux {
        atomic_t refcnt;
-       bool is_gpl_compatible;
-       enum bpf_prog_type prog_type;
-       struct bpf_verifier_ops *ops;
-       struct bpf_map **used_maps;
        u32 used_map_cnt;
+       const struct bpf_verifier_ops *ops;
+       struct bpf_map **used_maps;
        struct bpf_prog *prog;
        struct work_struct work;
 };
 
 #ifdef CONFIG_BPF_SYSCALL
-void bpf_prog_put(struct bpf_prog *prog);
-#else
-static inline void bpf_prog_put(struct bpf_prog *prog) {}
-#endif
+void bpf_register_prog_type(struct bpf_prog_type_list *tl);
+void bpf_register_map_type(struct bpf_map_type_list *tl);
+
 struct bpf_prog *bpf_prog_get(u32 ufd);
+void bpf_prog_put(struct bpf_prog *prog);
+
+struct bpf_map *bpf_map_get(struct fd f);
+void bpf_map_put(struct bpf_map *map);
+
 /* verify correctness of eBPF program */
-int bpf_check(struct bpf_prog *fp, union bpf_attr *attr);
+int bpf_check(struct bpf_prog **fp, union bpf_attr *attr);
+#else
+static inline void bpf_register_prog_type(struct bpf_prog_type_list *tl)
+{
+}
+
+static inline struct bpf_prog *bpf_prog_get(u32 ufd)
+{
+       return ERR_PTR(-EOPNOTSUPP);
+}
+
+static inline void bpf_prog_put(struct bpf_prog *prog)
+{
+}
+#endif /* CONFIG_BPF_SYSCALL */
 
 /* verifier prototypes for helper functions called from eBPF programs */
-extern struct bpf_func_proto bpf_map_lookup_elem_proto;
-extern struct bpf_func_proto bpf_map_update_elem_proto;
-extern struct bpf_func_proto bpf_map_delete_elem_proto;
+extern const struct bpf_func_proto bpf_map_lookup_elem_proto;
+extern const struct bpf_func_proto bpf_map_update_elem_proto;
+extern const struct bpf_func_proto bpf_map_delete_elem_proto;
+
+extern const struct bpf_func_proto bpf_get_prandom_u32_proto;
+extern const struct bpf_func_proto bpf_get_smp_processor_id_proto;
 
 #endif /* _LINUX_BPF_H */
index 7ccd928cc1f29b71afc7b7cf5cf1ea6a855cd70f..cab60661752237f736c817588d1d0e1a01469cdf 100644 (file)
@@ -19,6 +19,7 @@
 #define PHY_ID_BCM7425                 0x03625e60
 #define PHY_ID_BCM7429                 0x600d8730
 #define PHY_ID_BCM7439                 0x600d8480
+#define PHY_ID_BCM7439_2               0xae025080
 #define PHY_ID_BCM7445                 0x600d8510
 
 #define PHY_BCM_OUI_MASK               0xfffffc00
index c05ff0f9f9a55afec230435183dcce0d93bd5725..c3a9c8fc60fa620395751b994e117dc1ac9f0712 100644 (file)
@@ -61,6 +61,8 @@ struct can_priv {
        char tx_led_trig_name[CAN_LED_NAME_SZ];
        struct led_trigger *rx_led_trig;
        char rx_led_trig_name[CAN_LED_NAME_SZ];
+       struct led_trigger *rxtx_led_trig;
+       char rxtx_led_trig_name[CAN_LED_NAME_SZ];
 #endif
 };
 
index e0475c5cbb92aac6fe1163bc2ac65a4664048152..146de4506d211285b82a5b3554154c4e6fc4a8a1 100644 (file)
@@ -21,8 +21,10 @@ enum can_led_event {
 
 #ifdef CONFIG_CAN_LEDS
 
-/* keep space for interface name + "-tx"/"-rx" suffix and null terminator */
-#define CAN_LED_NAME_SZ (IFNAMSIZ + 4)
+/* keep space for interface name + "-tx"/"-rx"/"-rxtx"
+ * suffix and null terminator
+ */
+#define CAN_LED_NAME_SZ (IFNAMSIZ + 6)
 
 void can_led_event(struct net_device *netdev, enum can_led_event event);
 void devm_can_led_init(struct net_device *netdev);
index cc00d15c6107be8893b024e3eabb2ef1ba243016..b6a52a4b457aaaff2db3f55e2fd3bb78d07a6ab1 100644 (file)
@@ -44,16 +44,11 @@ static inline void can_skb_reserve(struct sk_buff *skb)
        skb_reserve(skb, sizeof(struct can_skb_priv));
 }
 
-static inline void can_skb_destructor(struct sk_buff *skb)
-{
-       sock_put(skb->sk);
-}
-
 static inline void can_skb_set_owner(struct sk_buff *skb, struct sock *sk)
 {
        if (sk) {
                sock_hold(sk);
-               skb->destructor = can_skb_destructor;
+               skb->destructor = sock_efree;
                skb->sk = sk;
        }
 }
index 439ff698000aa4ef81f5cef53e69095ce9c9fc80..221025423e6c993b70918e1186dabb7ff49a00e2 100644 (file)
@@ -43,6 +43,7 @@ enum dccp_state {
        DCCP_CLOSING         = TCP_CLOSING,
        DCCP_TIME_WAIT       = TCP_TIME_WAIT,
        DCCP_CLOSED          = TCP_CLOSE,
+       DCCP_NEW_SYN_RECV    = TCP_NEW_SYN_RECV,
        DCCP_PARTOPEN        = TCP_MAX_STATES,
        DCCP_PASSIVE_CLOSEREQ,                  /* clients receiving CloseReq */
        DCCP_MAX_STATES
@@ -57,6 +58,7 @@ enum {
        DCCPF_CLOSING         = TCPF_CLOSING,
        DCCPF_TIME_WAIT       = TCPF_TIME_WAIT,
        DCCPF_CLOSED          = TCPF_CLOSE,
+       DCCPF_NEW_SYN_RECV    = TCPF_NEW_SYN_RECV,
        DCCPF_PARTOPEN        = (1 << DCCP_PARTOPEN),
 };
 
@@ -317,6 +319,6 @@ static inline const char *dccp_role(const struct sock *sk)
        return NULL;
 }
 
-extern void dccp_syn_ack_timeout(struct sock *sk, struct request_sock *req);
+extern void dccp_syn_ack_timeout(const struct request_sock *req);
 
 #endif /* _LINUX_DCCP_H */
index 1d869d185a0dbdb03b33de2b8c0af33c7ea865fa..606563ef8a725e54d27db855c63dad948f3ce815 100644 (file)
@@ -35,7 +35,6 @@ extern const struct header_ops eth_header_ops;
 
 int eth_header(struct sk_buff *skb, struct net_device *dev, unsigned short type,
               const void *daddr, const void *saddr, unsigned len);
-int eth_rebuild_header(struct sk_buff *skb);
 int eth_header_parse(const struct sk_buff *skb, unsigned char *haddr);
 int eth_header_cache(const struct neighbour *neigh, struct hh_cache *hh,
                     __be16 type);
index caac2087a4d5e7479e272b6dc5da23d7c90c9fd7..fa11b3a367be54c4f73427d3c323c9c4936aca3a 100644 (file)
@@ -145,8 +145,6 @@ struct bpf_prog_aux;
                .off   = 0,                                     \
                .imm   = ((__u64) (IMM)) >> 32 })
 
-#define BPF_PSEUDO_MAP_FD      1
-
 /* pseudo BPF_LD_IMM64 insn used to refer to process-local map_fd */
 #define BPF_LD_MAP_FD(DST, MAP_FD)                             \
        BPF_LD_IMM64_RAW(DST, BPF_PSEUDO_MAP_FD, MAP_FD)
@@ -310,9 +308,11 @@ struct bpf_binary_header {
 struct bpf_prog {
        u16                     pages;          /* Number of allocated pages */
        bool                    jited;          /* Is our filter JIT'ed? */
+       bool                    gpl_compatible; /* Is our filter GPL compatible? */
        u32                     len;            /* Number of filter blocks */
-       struct sock_fprog_kern  *orig_prog;     /* Original BPF program */
+       enum bpf_prog_type      type;           /* Type of BPF program */
        struct bpf_prog_aux     *aux;           /* Auxiliary fields */
+       struct sock_fprog_kern  *orig_prog;     /* Original BPF program */
        unsigned int            (*bpf_func)(const struct sk_buff *skb,
                                            const struct bpf_insn *filter);
        /* Instructions for interpreter */
@@ -454,6 +454,7 @@ static inline u16 bpf_anc_helper(const struct sock_filter *ftest)
                BPF_ANCILLARY(VLAN_TAG_PRESENT);
                BPF_ANCILLARY(PAY_OFFSET);
                BPF_ANCILLARY(RANDOM);
+               BPF_ANCILLARY(VLAN_TPID);
                }
                /* Fallthrough. */
        default:
index 52cc4492cb3a1bcb979124b097fefdcfbc89e390..c7b21db7782fd6885ea8b00b6c9fdc928cde32a1 100644 (file)
@@ -314,6 +314,28 @@ struct page;
 struct address_space;
 struct writeback_control;
 
+#define IOCB_EVENTFD           (1 << 0)
+
+struct kiocb {
+       struct file             *ki_filp;
+       loff_t                  ki_pos;
+       void (*ki_complete)(struct kiocb *iocb, long ret, long ret2);
+       void                    *private;
+       int                     ki_flags;
+};
+
+static inline bool is_sync_kiocb(struct kiocb *kiocb)
+{
+       return kiocb->ki_complete == NULL;
+}
+
+static inline void init_sync_kiocb(struct kiocb *kiocb, struct file *filp)
+{
+       *kiocb = (struct kiocb) {
+               .ki_filp = filp,
+       };
+}
+
 /*
  * "descriptor" for what we're up to with a read.
  * This allows us to use the same read code yet
@@ -361,7 +383,7 @@ struct address_space_operations {
        void (*invalidatepage) (struct page *, unsigned int, unsigned int);
        int (*releasepage) (struct page *, gfp_t);
        void (*freepage)(struct page *);
-       ssize_t (*direct_IO)(int, struct kiocb *, struct iov_iter *iter, loff_t offset);
+       ssize_t (*direct_IO)(struct kiocb *, struct iov_iter *iter, loff_t offset);
        /*
         * migrate the contents of a page to the specified target. If
         * migrate_mode is MIGRATE_ASYNC, it must not block.
@@ -1540,8 +1562,6 @@ struct file_operations {
        loff_t (*llseek) (struct file *, loff_t, int);
        ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
        ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
-       ssize_t (*aio_read) (struct kiocb *, const struct iovec *, unsigned long, loff_t);
-       ssize_t (*aio_write) (struct kiocb *, const struct iovec *, unsigned long, loff_t);
        ssize_t (*read_iter) (struct kiocb *, struct iov_iter *);
        ssize_t (*write_iter) (struct kiocb *, struct iov_iter *);
        int (*iterate) (struct file *, struct dir_context *);
@@ -1617,6 +1637,7 @@ ssize_t rw_copy_check_uvector(int type, const struct iovec __user * uvector,
                              struct iovec **ret_pointer);
 
 extern ssize_t __vfs_read(struct file *, char __user *, size_t, loff_t *);
+extern ssize_t __vfs_write(struct file *, const char __user *, size_t, loff_t *);
 extern ssize_t vfs_read(struct file *, char __user *, size_t, loff_t *);
 extern ssize_t vfs_write(struct file *, const char __user *, size_t, loff_t *);
 extern ssize_t vfs_readv(struct file *, const struct iovec __user *,
@@ -2145,7 +2166,7 @@ struct filename {
        const __user char       *uptr;  /* original userland pointer */
        struct audit_names      *aname;
        int                     refcnt;
-       bool                    separate; /* should "name" be freed? */
+       const char              iname[];
 };
 
 extern long vfs_truncate(struct path *, loff_t);
@@ -2545,16 +2566,12 @@ extern int sb_min_blocksize(struct super_block *, int);
 
 extern int generic_file_mmap(struct file *, struct vm_area_struct *);
 extern int generic_file_readonly_mmap(struct file *, struct vm_area_struct *);
-int generic_write_checks(struct file *file, loff_t *pos, size_t *count, int isblk);
+int generic_write_checks(struct file *file, loff_t *pos, size_t *count);
 extern ssize_t generic_file_read_iter(struct kiocb *, struct iov_iter *);
 extern ssize_t __generic_file_write_iter(struct kiocb *, struct iov_iter *);
 extern ssize_t generic_file_write_iter(struct kiocb *, struct iov_iter *);
 extern ssize_t generic_file_direct_write(struct kiocb *, struct iov_iter *, loff_t);
 extern ssize_t generic_perform_write(struct file *, struct iov_iter *, loff_t);
-extern ssize_t do_sync_read(struct file *filp, char __user *buf, size_t len, loff_t *ppos);
-extern ssize_t do_sync_write(struct file *filp, const char __user *buf, size_t len, loff_t *ppos);
-extern ssize_t new_sync_read(struct file *filp, char __user *buf, size_t len, loff_t *ppos);
-extern ssize_t new_sync_write(struct file *filp, const char __user *buf, size_t len, loff_t *ppos);
 
 ssize_t vfs_iter_read(struct file *file, struct iov_iter *iter, loff_t *ppos);
 ssize_t vfs_iter_write(struct file *file, struct iov_iter *iter, loff_t *ppos);
@@ -2592,8 +2609,8 @@ extern loff_t fixed_size_llseek(struct file *file, loff_t offset,
 extern int generic_file_open(struct inode * inode, struct file * filp);
 extern int nonseekable_open(struct inode * inode, struct file * filp);
 
-ssize_t dax_do_io(int rw, struct kiocb *, struct inode *, struct iov_iter *,
-               loff_t, get_block_t, dio_iodone_t, int flags);
+ssize_t dax_do_io(struct kiocb *, struct inode *, struct iov_iter *, loff_t,
+                 get_block_t, dio_iodone_t, int flags);
 int dax_clear_blocks(struct inode *, sector_t block, long size);
 int dax_zero_page_range(struct inode *, loff_t from, unsigned len, get_block_t);
 int dax_truncate_page(struct inode *, loff_t from, get_block_t);
@@ -2617,16 +2634,18 @@ enum {
 
 void dio_end_io(struct bio *bio, int error);
 
-ssize_t __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
-       struct block_device *bdev, struct iov_iter *iter, loff_t offset,
-       get_block_t get_block, dio_iodone_t end_io,
-       dio_submit_t submit_io, int flags);
+ssize_t __blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
+                            struct block_device *bdev, struct iov_iter *iter,
+                            loff_t offset, get_block_t get_block,
+                            dio_iodone_t end_io, dio_submit_t submit_io,
+                            int flags);
 
-static inline ssize_t blockdev_direct_IO(int rw, struct kiocb *iocb,
-               struct inode *inode, struct iov_iter *iter, loff_t offset,
-               get_block_t get_block)
+static inline ssize_t blockdev_direct_IO(struct kiocb *iocb,
+                                        struct inode *inode,
+                                        struct iov_iter *iter, loff_t offset,
+                                        get_block_t get_block)
 {
-       return __blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iter,
+       return __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev, iter,
                                    offset, get_block, NULL, NULL,
                                    DIO_LOCKING | DIO_SKIP_HOLES);
 }
index 6e82d888287c280e096bc4c8c4cd585f786fb507..8872ca103d0699bb7c0c6ab2a898302d920b41dd 100644 (file)
@@ -28,7 +28,9 @@
 #include <asm/byteorder.h>
 
 #define IEEE802154_MTU                 127
-#define IEEE802154_MIN_PSDU_LEN                5
+#define IEEE802154_ACK_PSDU_LEN                5
+#define IEEE802154_MIN_PSDU_LEN                9
+#define IEEE802154_FCS_LEN             2
 
 #define IEEE802154_PAN_ID_BROADCAST    0xffff
 #define IEEE802154_ADDR_SHORT_BROADCAST        0xffff
@@ -38,6 +40,7 @@
 
 #define IEEE802154_LIFS_PERIOD         40
 #define IEEE802154_SIFS_PERIOD         12
+#define IEEE802154_MAX_SIFS_FRAME_SIZE 18
 
 #define IEEE802154_MAX_CHANNEL         26
 #define IEEE802154_MAX_PAGE            31
@@ -204,11 +207,18 @@ enum {
 
 /**
  * ieee802154_is_valid_psdu_len - check if psdu len is valid
+ * available lengths:
+ *     0-4     Reserved
+ *     5       MPDU (Acknowledgment)
+ *     6-8     Reserved
+ *     9-127   MPDU
+ *
  * @len: psdu len with (MHR + payload + MFR)
  */
 static inline bool ieee802154_is_valid_psdu_len(const u8 len)
 {
-       return (len >= IEEE802154_MIN_PSDU_LEN && len <= IEEE802154_MTU);
+       return (len == IEEE802154_ACK_PSDU_LEN ||
+               (len >= IEEE802154_MIN_PSDU_LEN && len <= IEEE802154_MTU));
 }
 
 /**
index a57bca2ea97e51058283dfa9305aff9f9244b12e..dad8b00beed27220856985c984e620d88cafd736 100644 (file)
@@ -44,6 +44,7 @@ struct br_ip_list {
 #define BR_PROMISC             BIT(7)
 #define BR_PROXYARP            BIT(8)
 #define BR_LEARNING_SYNC       BIT(9)
+#define BR_PROXYARP_WIFI       BIT(10)
 
 extern void brioctl_set(int (*ioctl_hook)(struct net *, unsigned int, void __user *));
 
index aff7ad8a4ea3cdea45daca2049a7b83dc8151cc3..66a7d7600f4343a809252ba809d92bca5bc7e8f9 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/netdevice.h>
 #include <linux/ppp_channel.h>
 #include <linux/skbuff.h>
+#include <linux/workqueue.h>
 #include <uapi/linux/if_pppox.h>
 
 static inline struct pppoe_hdr *pppoe_hdr(const struct sk_buff *skb)
@@ -32,6 +33,7 @@ struct pppoe_opt {
        struct pppoe_addr       pa;       /* what this socket is bound to*/
        struct sockaddr_pppox   relay;    /* what socket data will be
                                             relayed to (PPPoE relaying) */
+       struct work_struct      padt_work;/* Work item for handling PADT */
 };
 
 struct pptp_opt {
index b11b28a30b9ee78e1f600cc076736a72dc624c72..920e4457ce6eab1541a9322595fe2894559ce16f 100644 (file)
@@ -561,4 +561,71 @@ static inline void vlan_set_encap_proto(struct sk_buff *skb,
                skb->protocol = htons(ETH_P_802_2);
 }
 
+/**
+ * skb_vlan_tagged - check if skb is vlan tagged.
+ * @skb: skbuff to query
+ *
+ * Returns true if the skb is tagged, regardless of whether it is hardware
+ * accelerated or not.
+ */
+static inline bool skb_vlan_tagged(const struct sk_buff *skb)
+{
+       if (!skb_vlan_tag_present(skb) &&
+           likely(skb->protocol != htons(ETH_P_8021Q) &&
+                  skb->protocol != htons(ETH_P_8021AD)))
+               return false;
+
+       return true;
+}
+
+/**
+ * skb_vlan_tagged_multi - check if skb is vlan tagged with multiple headers.
+ * @skb: skbuff to query
+ *
+ * Returns true if the skb is tagged with multiple vlan headers, regardless
+ * of whether it is hardware accelerated or not.
+ */
+static inline bool skb_vlan_tagged_multi(const struct sk_buff *skb)
+{
+       __be16 protocol = skb->protocol;
+
+       if (!skb_vlan_tag_present(skb)) {
+               struct vlan_ethhdr *veh;
+
+               if (likely(protocol != htons(ETH_P_8021Q) &&
+                          protocol != htons(ETH_P_8021AD)))
+                       return false;
+
+               veh = (struct vlan_ethhdr *)skb->data;
+               protocol = veh->h_vlan_encapsulated_proto;
+       }
+
+       if (protocol != htons(ETH_P_8021Q) && protocol != htons(ETH_P_8021AD))
+               return false;
+
+       return true;
+}
+
+/**
+ * vlan_features_check - drop unsafe features for skb with multiple tags.
+ * @skb: skbuff to query
+ * @features: features to be checked
+ *
+ * Returns features without unsafe ones if the skb has multiple tags.
+ */
+static inline netdev_features_t vlan_features_check(const struct sk_buff *skb,
+                                                   netdev_features_t features)
+{
+       if (skb_vlan_tagged_multi(skb))
+               features = netdev_intersect_features(features,
+                                                    NETIF_F_SG |
+                                                    NETIF_F_HIGHDMA |
+                                                    NETIF_F_FRAGLIST |
+                                                    NETIF_F_GEN_CSUM |
+                                                    NETIF_F_HW_VLAN_CTAG_TX |
+                                                    NETIF_F_HW_VLAN_STAG_TX);
+
+       return features;
+}
+
 #endif /* !(_LINUX_IF_VLAN_H_) */
index 46da02410a09691c5e624df266cfc0c105fce6d9..ac48b10c9395f992403f1377c63a3be47d90804a 100644 (file)
@@ -11,33 +11,34 @@ struct sk_buff;
 struct netlink_callback;
 
 struct inet_diag_handler {
-       void                    (*dump)(struct sk_buff *skb,
-                                       struct netlink_callback *cb,
-                                       struct inet_diag_req_v2 *r,
-                                       struct nlattr *bc);
-
-       int                     (*dump_one)(struct sk_buff *in_skb,
-                                       const struct nlmsghdr *nlh,
-                                       struct inet_diag_req_v2 *req);
-
-       void                    (*idiag_get_info)(struct sock *sk,
-                                                 struct inet_diag_msg *r,
-                                                 void *info);
-       __u16                   idiag_type;
+       void            (*dump)(struct sk_buff *skb,
+                               struct netlink_callback *cb,
+                               const struct inet_diag_req_v2 *r,
+                               struct nlattr *bc);
+
+       int             (*dump_one)(struct sk_buff *in_skb,
+                                   const struct nlmsghdr *nlh,
+                                   const struct inet_diag_req_v2 *req);
+
+       void            (*idiag_get_info)(struct sock *sk,
+                                         struct inet_diag_msg *r,
+                                         void *info);
+       __u16           idiag_type;
 };
 
 struct inet_connection_sock;
 int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
-                             struct sk_buff *skb, struct inet_diag_req_v2 *req,
-                             struct user_namespace *user_ns,
-                             u32 pid, u32 seq, u16 nlmsg_flags,
-                             const struct nlmsghdr *unlh);
+                     struct sk_buff *skb, const struct inet_diag_req_v2 *req,
+                     struct user_namespace *user_ns,
+                     u32 pid, u32 seq, u16 nlmsg_flags,
+                     const struct nlmsghdr *unlh);
 void inet_diag_dump_icsk(struct inet_hashinfo *h, struct sk_buff *skb,
-               struct netlink_callback *cb, struct inet_diag_req_v2 *r,
-               struct nlattr *bc);
+                        struct netlink_callback *cb,
+                        const struct inet_diag_req_v2 *r,
+                        struct nlattr *bc);
 int inet_diag_dump_one_icsk(struct inet_hashinfo *hashinfo,
-               struct sk_buff *in_skb, const struct nlmsghdr *nlh,
-               struct inet_diag_req_v2 *req);
+                           struct sk_buff *in_skb, const struct nlmsghdr *nlh,
+                           const struct inet_diag_req_v2 *req);
 
 int inet_diag_bc_sk(const struct nlattr *_bc, struct sock *sk);
 
index 4d5169f5d7d1844d99d2af6c8d2cb89ad9879bdf..82806c60aa4273d67ff5592dfc11e4eaa57c51d7 100644 (file)
@@ -53,6 +53,10 @@ struct ipv6_devconf {
        __s32           ndisc_notify;
        __s32           suppress_frag_ndisc;
        __s32           accept_ra_mtu;
+       struct ipv6_stable_secret {
+               bool initialized;
+               struct in6_addr secret;
+       } stable_secret;
        void            *sysctl;
 };
 
index 47cb09edec1a613821b734a7b4a0aa1def2ab0ae..348c6f47e4cc36681e55ff4800a4d2a4e052f954 100644 (file)
@@ -145,11 +145,11 @@ static inline u32 jhash2(const u32 *k, u32 length, u32 initval)
 }
 
 
-/* jhash_3words - hash exactly 3, 2 or 1 word(s) */
-static inline u32 jhash_3words(u32 a, u32 b, u32 c, u32 initval)
+/* __jhash_nwords - hash exactly 3, 2 or 1 word(s) */
+static inline u32 __jhash_nwords(u32 a, u32 b, u32 c, u32 initval)
 {
-       a += JHASH_INITVAL;
-       b += JHASH_INITVAL;
+       a += initval;
+       b += initval;
        c += initval;
 
        __jhash_final(a, b, c);
@@ -157,14 +157,19 @@ static inline u32 jhash_3words(u32 a, u32 b, u32 c, u32 initval)
        return c;
 }
 
+static inline u32 jhash_3words(u32 a, u32 b, u32 c, u32 initval)
+{
+       return __jhash_nwords(a, b, c, initval + JHASH_INITVAL + (3 << 2));
+}
+
 static inline u32 jhash_2words(u32 a, u32 b, u32 initval)
 {
-       return jhash_3words(a, b, 0, initval);
+       return __jhash_nwords(a, b, 0, initval + JHASH_INITVAL + (2 << 2));
 }
 
 static inline u32 jhash_1word(u32 a, u32 initval)
 {
-       return jhash_3words(a, 0, 0, initval);
+       return __jhash_nwords(a, 0, 0, initval + JHASH_INITVAL + (1 << 2));
 }
 
 #endif /* _LINUX_JHASH_H */
index 7bf01d779b4532a5a8d168d0ff91fd08d5228e02..1ce79a7f1daa18868adfe14598c35206382f58a5 100644 (file)
@@ -4,5 +4,6 @@
 #include <linux/compiler.h>
 
 unsigned long lcm(unsigned long a, unsigned long b) __attribute_const__;
+unsigned long lcm_not_zero(unsigned long a, unsigned long b) __attribute_const__;
 
 #endif /* _LCM_H */
index 7b6d4e9ff603828181239f5f64cbdf3a6d2cd282..f62e7cf227c61c9510e96afda8482e8f5b7b606b 100644 (file)
@@ -68,6 +68,8 @@ enum {
        MLX4_CMD_UNMAP_ICM_AUX   = 0xffb,
        MLX4_CMD_SET_ICM_SIZE    = 0xffd,
        MLX4_CMD_ACCESS_REG      = 0x3b,
+       MLX4_CMD_ALLOCATE_VPP    = 0x80,
+       MLX4_CMD_SET_VPORT_QOS   = 0x81,
 
        /*master notify fw on finish for slave's flr*/
        MLX4_CMD_INFORM_FLR_DONE = 0x5b,
@@ -163,6 +165,9 @@ enum {
        MLX4_QP_FLOW_STEERING_ATTACH = 0x65,
        MLX4_QP_FLOW_STEERING_DETACH = 0x66,
        MLX4_FLOW_STEERING_IB_UC_QP_RANGE = 0x64,
+
+       /* Update and read QCN parameters */
+       MLX4_CMD_CONGESTION_CTRL_OPCODE = 0x68,
 };
 
 enum {
@@ -183,7 +188,14 @@ enum {
 };
 
 enum {
-       /* set port opcode modifiers */
+       /* Set port opcode modifiers */
+       MLX4_SET_PORT_IB_OPCODE         = 0x0,
+       MLX4_SET_PORT_ETH_OPCODE        = 0x1,
+       MLX4_SET_PORT_BEACON_OPCODE     = 0x4,
+};
+
+enum {
+       /* Set port Ethernet input modifiers */
        MLX4_SET_PORT_GENERAL   = 0x0,
        MLX4_SET_PORT_RQP_CALC  = 0x1,
        MLX4_SET_PORT_MAC_TABLE = 0x2,
@@ -233,6 +245,16 @@ struct mlx4_config_dev_params {
        u8      rx_csum_flags_port_2;
 };
 
+enum mlx4_en_congestion_control_algorithm {
+       MLX4_CTRL_ALGO_802_1_QAU_REACTION_POINT = 0,
+};
+
+enum mlx4_en_congestion_control_opmod {
+       MLX4_CONGESTION_CONTROL_GET_PARAMS,
+       MLX4_CONGESTION_CONTROL_GET_STATISTICS,
+       MLX4_CONGESTION_CONTROL_SET_PARAMS = 4,
+};
+
 struct mlx4_dev;
 
 struct mlx4_cmd_mailbox {
@@ -281,6 +303,8 @@ void mlx4_free_cmd_mailbox(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbo
 u32 mlx4_comm_get_version(void);
 int mlx4_set_vf_mac(struct mlx4_dev *dev, int port, int vf, u64 mac);
 int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos);
+int mlx4_set_vf_rate(struct mlx4_dev *dev, int port, int vf, int min_tx_rate,
+                    int max_tx_rate);
 int mlx4_set_vf_spoofchk(struct mlx4_dev *dev, int port, int vf, bool setting);
 int mlx4_get_vf_config(struct mlx4_dev *dev, int port, int vf, struct ifla_vf_info *ivf);
 int mlx4_set_vf_link_state(struct mlx4_dev *dev, int port, int vf, int link_state);
index e4ebff7e9d02fba498e8d5ae9792666e45c3fa3b..f9ce34bec45b1d615bb1b191bffd867a23dd559a 100644 (file)
@@ -49,8 +49,6 @@
 #define MSIX_LEGACY_SZ         4
 #define MIN_MSIX_P_PORT                5
 
-#define MLX4_NUM_UP                    8
-#define MLX4_NUM_TC                    8
 #define MLX4_MAX_100M_UNITS_VAL                255     /*
                                                 * work around: can't set values
                                                 * greater then this value when
@@ -174,6 +172,7 @@ enum {
        MLX4_DEV_CAP_FLAG_VEP_UC_STEER  = 1LL << 41,
        MLX4_DEV_CAP_FLAG_VEP_MC_STEER  = 1LL << 42,
        MLX4_DEV_CAP_FLAG_COUNTERS      = 1LL << 48,
+       MLX4_DEV_CAP_FLAG_RSS_IP_FRAG   = 1LL << 52,
        MLX4_DEV_CAP_FLAG_SET_ETH_SCHED = 1LL << 53,
        MLX4_DEV_CAP_FLAG_SENSE_SUPPORT = 1LL << 55,
        MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV = 1LL << 59,
@@ -203,7 +202,14 @@ enum {
        MLX4_DEV_CAP_FLAG2_80_VFS               = 1LL <<  18,
        MLX4_DEV_CAP_FLAG2_FS_A0                = 1LL <<  19,
        MLX4_DEV_CAP_FLAG2_RECOVERABLE_ERROR_EVENT = 1LL << 20,
-       MLX4_DEV_CAP_FLAG2_PORT_REMAP           = 1LL <<  21
+       MLX4_DEV_CAP_FLAG2_PORT_REMAP           = 1LL <<  21,
+       MLX4_DEV_CAP_FLAG2_QCN                  = 1LL <<  22,
+       MLX4_DEV_CAP_FLAG2_QP_RATE_LIMIT        = 1LL <<  23,
+       MLX4_DEV_CAP_FLAG2_FLOWSTATS_EN         = 1LL <<  24,
+       MLX4_DEV_CAP_FLAG2_QOS_VPP              = 1LL <<  25,
+       MLX4_DEV_CAP_FLAG2_ETS_CFG              = 1LL <<  26,
+       MLX4_DEV_CAP_FLAG2_PORT_BEACON          = 1LL <<  27,
+       MLX4_DEV_CAP_FLAG2_IGNORE_FCS           = 1LL <<  28,
 };
 
 enum {
@@ -449,6 +455,21 @@ enum mlx4_module_id {
        MLX4_MODULE_ID_QSFP28           = 0x11,
 };
 
+enum { /* rl */
+       MLX4_QP_RATE_LIMIT_NONE         = 0,
+       MLX4_QP_RATE_LIMIT_KBS          = 1,
+       MLX4_QP_RATE_LIMIT_MBS          = 2,
+       MLX4_QP_RATE_LIMIT_GBS          = 3
+};
+
+struct mlx4_rate_limit_caps {
+       u16     num_rates; /* Number of different rates */
+       u8      min_unit;
+       u16     min_val;
+       u8      max_unit;
+       u16     max_val;
+};
+
 static inline u64 mlx4_fw_ver(u64 major, u64 minor, u64 subminor)
 {
        return (major << 32) | (minor << 16) | subminor;
@@ -564,6 +585,7 @@ struct mlx4_caps {
        u32                     dmfs_high_rate_qpn_base;
        u32                     dmfs_high_rate_qpn_range;
        u32                     vf_caps;
+       struct mlx4_rate_limit_caps rl_caps;
 };
 
 struct mlx4_buf_list {
@@ -982,6 +1004,11 @@ static inline int mlx4_is_slave(struct mlx4_dev *dev)
        return dev->flags & MLX4_FLAG_SLAVE;
 }
 
+static inline int mlx4_is_eth(struct mlx4_dev *dev, int port)
+{
+       return dev->caps.port_type[port] == MLX4_PORT_TYPE_IB ? 0 : 1;
+}
+
 int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct,
                   struct mlx4_buf *buf, gfp_t gfp);
 void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf);
@@ -1282,14 +1309,13 @@ int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac);
 void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac);
 int mlx4_get_base_qpn(struct mlx4_dev *dev, u8 port);
 int __mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac);
-void mlx4_set_stats_bitmap(struct mlx4_dev *dev, u64 *stats_bitmap);
 int mlx4_SET_PORT_general(struct mlx4_dev *dev, u8 port, int mtu,
                          u8 pptx, u8 pfctx, u8 pprx, u8 pfcrx);
 int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn,
                           u8 promisc);
-int mlx4_SET_PORT_PRIO2TC(struct mlx4_dev *dev, u8 port, u8 *prio2tc);
-int mlx4_SET_PORT_SCHEDULER(struct mlx4_dev *dev, u8 port, u8 *tc_tx_bw,
-               u8 *pg, u16 *ratelimit);
+int mlx4_SET_PORT_BEACON(struct mlx4_dev *dev, u8 port, u16 time);
+int mlx4_SET_PORT_fcs_check(struct mlx4_dev *dev, u8 port,
+                           u8 ignore_fcs_value);
 int mlx4_SET_PORT_VXLAN(struct mlx4_dev *dev, u8 port, u8 steering, int enable);
 int mlx4_find_cached_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *idx);
 int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx);
index 551f85456c11574a144bf64d1c38ec8031313b3e..6fed539e54569c3f0701632a8a17e9bdf16a53d6 100644 (file)
@@ -207,14 +207,17 @@ struct mlx4_qp_context {
        __be32                  msn;
        __be16                  rq_wqe_counter;
        __be16                  sq_wqe_counter;
-       u32                     reserved3[2];
+       u32                     reserved3;
+       __be16                  rate_limit_params;
+       u8                      reserved4;
+       u8                      qos_vport;
        __be32                  param3;
        __be32                  nummmcpeers_basemkey;
        u8                      log_page_size;
-       u8                      reserved4[2];
+       u8                      reserved5[2];
        u8                      mtt_base_addr_h;
        __be32                  mtt_base_addr_l;
-       u32                     reserved5[10];
+       u32                     reserved6[10];
 };
 
 struct mlx4_update_qp_context {
@@ -229,6 +232,8 @@ struct mlx4_update_qp_context {
 enum {
        MLX4_UPD_QP_MASK_PM_STATE       = 32,
        MLX4_UPD_QP_MASK_VSD            = 33,
+       MLX4_UPD_QP_MASK_QOS_VPP        = 34,
+       MLX4_UPD_QP_MASK_RATE_LIMIT     = 35,
 };
 
 enum {
@@ -428,7 +433,9 @@ struct mlx4_wqe_inline_seg {
 enum mlx4_update_qp_attr {
        MLX4_UPDATE_QP_SMAC             = 1 << 0,
        MLX4_UPDATE_QP_VSD              = 1 << 1,
-       MLX4_UPDATE_QP_SUPPORTED_ATTRS  = (1 << 2) - 1
+       MLX4_UPDATE_QP_RATE_LIMIT       = 1 << 2,
+       MLX4_UPDATE_QP_QOS_VPORT        = 1 << 3,
+       MLX4_UPDATE_QP_SUPPORTED_ATTRS  = (1 << 4) - 1
 };
 
 enum mlx4_update_qp_params_flags {
@@ -437,7 +444,10 @@ enum mlx4_update_qp_params_flags {
 
 struct mlx4_update_qp_params {
        u8      smac_index;
+       u8      qos_vport;
        u32     flags;
+       u16     rate_unit;
+       u16     rate_val;
 };
 
 int mlx4_update_qp(struct mlx4_dev *dev, u32 qpn,
index 2826a4b6071ef2c0c06cf6cf6fbb63e90095c04d..68cd08f02c2f62ca1d5a036a9f476396c6fa6e9e 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, Mellanox Technologies inc.  All rights reserved.
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
  *
  * This software is available to you under a choice of one of two
  * licenses.  You may choose to be licensed under the terms of the GNU
index f6b17ac601bda06533afb7bc4c7d80a692ff7bc4..2695ced222df23b56df42252fb7319c7d7d8b157 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, Mellanox Technologies inc.  All rights reserved.
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
  *
  * This software is available to you under a choice of one of two
  * licenses.  You may choose to be licensed under the terms of the GNU
@@ -137,14 +137,15 @@ enum {
 
 static inline void mlx5_cq_arm(struct mlx5_core_cq *cq, u32 cmd,
                               void __iomem *uar_page,
-                              spinlock_t *doorbell_lock)
+                              spinlock_t *doorbell_lock,
+                              u32 cons_index)
 {
        __be32 doorbell[2];
        u32 sn;
        u32 ci;
 
        sn = cq->arm_sn & 3;
-       ci = cq->cons_index & 0xffffff;
+       ci = cons_index & 0xffffff;
 
        *cq->arm_db = cpu_to_be32(sn << 28 | cmd | ci);
 
index 4e5bd813bb9a7d9142866857ff3c4be73f956e6e..abf65c7904214b75f5326a7f576df3b8e2f0a9d8 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, Mellanox Technologies inc.  All rights reserved.
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
  *
  * This software is available to you under a choice of one of two
  * licenses.  You may choose to be licensed under the terms of the GNU
index 163a818411e71742c332417c0bdfb811669f663f..afc78a3f4462e3f2eb2e350abc010ae39a181e7f 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, Mellanox Technologies inc.  All rights reserved.
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
  *
  * This software is available to you under a choice of one of two
  * licenses.  You may choose to be licensed under the terms of the GNU
index 166d9315fe4b565bdd8487da66559f6cbfe19462..9a90e7523dc24d2f7f29467023c8845cbf50cff7 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, Mellanox Technologies inc.  All rights reserved.
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
  *
  * This software is available to you under a choice of one of two
  * licenses.  You may choose to be licensed under the terms of the GNU
@@ -232,6 +232,9 @@ struct mlx5_cmd_stats {
 };
 
 struct mlx5_cmd {
+       void           *cmd_alloc_buf;
+       dma_addr_t      alloc_dma;
+       int             alloc_size;
        void           *cmd_buf;
        dma_addr_t      dma;
        u16             cmdif_rev;
@@ -407,7 +410,7 @@ struct mlx5_core_srq {
 struct mlx5_eq_table {
        void __iomem           *update_ci;
        void __iomem           *update_arm_ci;
-       struct list_head       *comp_eq_head;
+       struct list_head        comp_eqs_list;
        struct mlx5_eq          pages_eq;
        struct mlx5_eq          async_eq;
        struct mlx5_eq          cmd_eq;
@@ -722,6 +725,7 @@ int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
 int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
 int mlx5_start_eqs(struct mlx5_core_dev *dev);
 int mlx5_stop_eqs(struct mlx5_core_dev *dev);
+int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn, int *irqn);
 int mlx5_core_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn);
 int mlx5_core_detach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn);
 
@@ -777,14 +781,22 @@ enum {
        MAX_MR_CACHE_ENTRIES    = 16,
 };
 
+enum {
+       MLX5_INTERFACE_PROTOCOL_IB  = 0,
+       MLX5_INTERFACE_PROTOCOL_ETH = 1,
+};
+
 struct mlx5_interface {
        void *                  (*add)(struct mlx5_core_dev *dev);
        void                    (*remove)(struct mlx5_core_dev *dev, void *context);
        void                    (*event)(struct mlx5_core_dev *dev, void *context,
                                         enum mlx5_dev_event event, unsigned long param);
+       void *                  (*get_dev)(void *context);
+       int                     protocol;
        struct list_head        list;
 };
 
+void *mlx5_get_protocol_dev(struct mlx5_core_dev *mdev, int protocol);
 int mlx5_register_interface(struct mlx5_interface *intf);
 void mlx5_unregister_interface(struct mlx5_interface *intf);
 
index 5f48b8f592c51a9aece4baa2755af9757b9976fa..cb3ad17edd1f5959b0499b82899ec95ada191025 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014, Mellanox Technologies inc.  All rights reserved.
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
  *
  * This software is available to you under a choice of one of two
  * licenses.  You may choose to be licensed under the terms of the GNU
index 61f7a342d1bfd1cc4f102d1b75be3d8a977e28d0..310b5f7fd6ae52101665c9f3dd6e042b6a4ceb9e 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, Mellanox Technologies inc.  All rights reserved.
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
  *
  * This software is available to you under a choice of one of two
  * licenses.  You may choose to be licensed under the terms of the GNU
index e1a363a336634f7506880c086a1c2e2f1689bb7e..f43ed054a3e0904c2b99a844fdd79964c633fe9a 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, Mellanox Technologies inc.  All rights reserved.
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
  *
  * This software is available to you under a choice of one of two
  * licenses.  You may choose to be licensed under the terms of the GNU
index 996807963716770486d9733d34879273c2b4d77b..83430f2ea757df9ad92b054b36db1306562f9e59 100644 (file)
@@ -33,6 +33,8 @@
 #define SDIO_DEVICE_ID_BROADCOM_43341          0xa94d
 #define SDIO_DEVICE_ID_BROADCOM_4335_4339      0x4335
 #define SDIO_DEVICE_ID_BROADCOM_43362          0xa962
+#define SDIO_DEVICE_ID_BROADCOM_43430          0xa9a6
+#define SDIO_DEVICE_ID_BROADCOM_4345           0x4345
 #define SDIO_DEVICE_ID_BROADCOM_4354           0x4354
 
 #define SDIO_VENDOR_ID_INTEL                   0x0089
index 17d83393afcc4337d50f00cfa837030d9429c6f8..738ea48be889e670275616bae5063259ca3d61d9 100644 (file)
@@ -120,7 +120,6 @@ struct socket {
 
 struct vm_area_struct;
 struct page;
-struct kiocb;
 struct sockaddr;
 struct msghdr;
 struct module;
@@ -162,8 +161,8 @@ struct proto_ops {
        int             (*compat_getsockopt)(struct socket *sock, int level,
                                      int optname, char __user *optval, int __user *optlen);
 #endif
-       int             (*sendmsg)   (struct kiocb *iocb, struct socket *sock,
-                                     struct msghdr *m, size_t total_len);
+       int             (*sendmsg)   (struct socket *sock, struct msghdr *m,
+                                     size_t total_len);
        /* Notes for implementing recvmsg:
         * ===============================
         * msg->msg_namelen should get updated by the recvmsg handlers
@@ -172,9 +171,8 @@ struct proto_ops {
         * handlers can assume that msg.msg_name is either NULL or has
         * a minimum size of sizeof(struct sockaddr_storage).
         */
-       int             (*recvmsg)   (struct kiocb *iocb, struct socket *sock,
-                                     struct msghdr *m, size_t total_len,
-                                     int flags);
+       int             (*recvmsg)   (struct socket *sock, struct msghdr *m,
+                                     size_t total_len, int flags);
        int             (*mmap)      (struct file *file, struct socket *sock,
                                      struct vm_area_struct * vma);
        ssize_t         (*sendpage)  (struct socket *sock, struct page *page,
@@ -213,7 +211,7 @@ int sock_create(int family, int type, int proto, struct socket **res);
 int sock_create_kern(int family, int type, int proto, struct socket **res);
 int sock_create_lite(int family, int type, int proto, struct socket **res);
 void sock_release(struct socket *sock);
-int sock_sendmsg(struct socket *sock, struct msghdr *msg, size_t len);
+int sock_sendmsg(struct socket *sock, struct msghdr *msg);
 int sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
                 int flags);
 struct file *sock_alloc_file(struct socket *sock, int flags, const char *dname);
index dcf6ec27739b1d8bfb98c82e8902ca0b697df5f8..41bf58a2b936bed3ec8e0d2fcda0a417662e4fbd 100644 (file)
@@ -261,7 +261,6 @@ struct header_ops {
                           unsigned short type, const void *daddr,
                           const void *saddr, unsigned int len);
        int     (*parse)(const struct sk_buff *skb, unsigned char *haddr);
-       int     (*rebuild)(struct sk_buff *skb);
        int     (*cache)(const struct neighbour *neigh, struct hh_cache *hh, __be16 type);
        void    (*cache_update)(struct hh_cache *hh,
                                const struct net_device *dev,
@@ -588,6 +587,7 @@ struct netdev_queue {
 #ifdef CONFIG_BQL
        struct dql              dql;
 #endif
+       unsigned long           tx_maxrate;
 } ____cacheline_aligned_in_smp;
 
 static inline int netdev_queue_numa_node_read(const struct netdev_queue *q)
@@ -795,7 +795,10 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
  * netdev_tx_t (*ndo_start_xmit)(struct sk_buff *skb,
  *                               struct net_device *dev);
  *     Called when a packet needs to be transmitted.
- *     Must return NETDEV_TX_OK , NETDEV_TX_BUSY.
+ *     Returns NETDEV_TX_OK.  Can return NETDEV_TX_BUSY, but you should stop
+ *     the queue before that can happen; it's for obsolete devices and weird
+ *     corner cases, but the stack really does a non-trivial amount
+ *     of useless work if you return NETDEV_TX_BUSY.
  *        (can also return NETDEV_TX_LOCKED iff NETIF_F_LLTX)
  *     Required can not be NULL.
  *
@@ -1026,15 +1029,12 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
  *     be otherwise expressed by feature flags. The check is called with
  *     the set of features that the stack has calculated and it returns
  *     those the driver believes to be appropriate.
- *
- * int (*ndo_switch_parent_id_get)(struct net_device *dev,
- *                                struct netdev_phys_item_id *psid);
- *     Called to get an ID of the switch chip this port is part of.
- *     If driver implements this, it indicates that it represents a port
- *     of a switch chip.
- * int (*ndo_switch_port_stp_update)(struct net_device *dev, u8 state);
- *     Called to notify switch device port of bridge port STP
- *     state change.
+ * int (*ndo_set_tx_maxrate)(struct net_device *dev,
+ *                          int queue_index, u32 maxrate);
+ *     Called when a user wants to set a max-rate limitation of specific
+ *     TX queue.
+ * int (*ndo_get_iflink)(const struct net_device *dev);
+ *     Called to get the iflink value of this device.
  */
 struct net_device_ops {
        int                     (*ndo_init)(struct net_device *dev);
@@ -1172,6 +1172,8 @@ struct net_device_ops {
                                                      bool new_carrier);
        int                     (*ndo_get_phys_port_id)(struct net_device *dev,
                                                        struct netdev_phys_item_id *ppid);
+       int                     (*ndo_get_phys_port_name)(struct net_device *dev,
+                                                         char *name, size_t len);
        void                    (*ndo_add_vxlan_port)(struct  net_device *dev,
                                                      sa_family_t sa_family,
                                                      __be16 port);
@@ -1191,12 +1193,10 @@ struct net_device_ops {
        netdev_features_t       (*ndo_features_check) (struct sk_buff *skb,
                                                       struct net_device *dev,
                                                       netdev_features_t features);
-#ifdef CONFIG_NET_SWITCHDEV
-       int                     (*ndo_switch_parent_id_get)(struct net_device *dev,
-                                                           struct netdev_phys_item_id *psid);
-       int                     (*ndo_switch_port_stp_update)(struct net_device *dev,
-                                                             u8 state);
-#endif
+       int                     (*ndo_set_tx_maxrate)(struct net_device *dev,
+                                                     int queue_index,
+                                                     u32 maxrate);
+       int                     (*ndo_get_iflink)(const struct net_device *dev);
 };
 
 /**
@@ -1348,8 +1348,7 @@ enum netdev_priv_flags {
  *     @netdev_ops:    Includes several pointers to callbacks,
  *                     if one wants to override the ndo_*() functions
  *     @ethtool_ops:   Management operations
- *     @fwd_ops:       Management operations
- *     @header_ops:    Includes callbacks for creating,parsing,rebuilding,etc
+ *     @header_ops:    Includes callbacks for creating,parsing,caching,etc
  *                     of Layer 2 headers.
  *
  *     @flags:         Interface flags (a la BSD)
@@ -1542,7 +1541,7 @@ struct net_device {
        netdev_features_t       mpls_features;
 
        int                     ifindex;
-       int                     iflink;
+       int                     group;
 
        struct net_device_stats stats;
 
@@ -1557,7 +1556,9 @@ struct net_device {
 #endif
        const struct net_device_ops *netdev_ops;
        const struct ethtool_ops *ethtool_ops;
-       const struct forwarding_accel_ops *fwd_ops;
+#ifdef CONFIG_NET_SWITCHDEV
+       const struct swdev_ops *swdev_ops;
+#endif
 
        const struct header_ops *header_ops;
 
@@ -1702,9 +1703,7 @@ struct net_device {
        struct netpoll_info __rcu       *npinfo;
 #endif
 
-#ifdef CONFIG_NET_NS
-       struct net              *nd_net;
-#endif
+       possible_net_t                  nd_net;
 
        /* mid-layer private */
        union {
@@ -1745,7 +1744,6 @@ struct net_device {
 #endif
        struct phy_device *phydev;
        struct lock_class_key *qdisc_tx_busylock;
-       int group;
        struct pm_qos_request   pm_qos_req;
 };
 #define to_net_dev(d) container_of(d, struct net_device, dev)
@@ -1844,10 +1842,7 @@ struct net *dev_net(const struct net_device *dev)
 static inline
 void dev_net_set(struct net_device *dev, struct net *net)
 {
-#ifdef CONFIG_NET_NS
-       release_net(dev->nd_net);
-       dev->nd_net = hold_net(net);
-#endif
+       write_pnet(&dev->nd_net, net);
 }
 
 static inline bool netdev_uses_dsa(struct net_device *dev)
@@ -2159,6 +2154,7 @@ void __dev_remove_pack(struct packet_type *pt);
 void dev_add_offload(struct packet_offload *po);
 void dev_remove_offload(struct packet_offload *po);
 
+int dev_get_iflink(const struct net_device *dev);
 struct net_device *__dev_get_by_flags(struct net *net, unsigned short flags,
                                      unsigned short mask);
 struct net_device *dev_get_by_name(struct net *net, const char *name);
@@ -2167,6 +2163,7 @@ struct net_device *__dev_get_by_name(struct net *net, const char *name);
 int dev_alloc_name(struct net_device *dev, const char *name);
 int dev_open(struct net_device *dev);
 int dev_close(struct net_device *dev);
+int dev_close_many(struct list_head *head, bool unlink);
 void dev_disable_lro(struct net_device *dev);
 int dev_loopback_xmit(struct sk_buff *newskb);
 int dev_queue_xmit(struct sk_buff *skb);
@@ -2185,6 +2182,12 @@ void netdev_freemem(struct net_device *dev);
 void synchronize_net(void);
 int init_dummy_netdev(struct net_device *dev);
 
+DECLARE_PER_CPU(int, xmit_recursion);
+static inline int dev_recursion_level(void)
+{
+       return this_cpu_read(xmit_recursion);
+}
+
 struct net_device *dev_get_by_index(struct net *net, int ifindex);
 struct net_device *__dev_get_by_index(struct net *net, int ifindex);
 struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex);
@@ -2403,15 +2406,6 @@ static inline int dev_parse_header(const struct sk_buff *skb,
        return dev->header_ops->parse(skb, haddr);
 }
 
-static inline int dev_rebuild_header(struct sk_buff *skb)
-{
-       const struct net_device *dev = skb->dev;
-
-       if (!dev->header_ops || !dev->header_ops->rebuild)
-               return 0;
-       return dev->header_ops->rebuild(skb);
-}
-
 typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr, int len);
 int register_gifconf(unsigned int family, gifconf_func_t *gifconf);
 static inline int unregister_gifconf(unsigned int family)
@@ -2969,6 +2963,8 @@ int dev_set_mac_address(struct net_device *, struct sockaddr *);
 int dev_change_carrier(struct net_device *, bool new_carrier);
 int dev_get_phys_port_id(struct net_device *dev,
                         struct netdev_phys_item_id *ppid);
+int dev_get_phys_port_name(struct net_device *dev,
+                          char *name, size_t len);
 struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev);
 struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
                                    struct netdev_queue *txq, int *ret);
@@ -3673,6 +3669,9 @@ void netdev_change_features(struct net_device *dev);
 void netif_stacked_transfer_operstate(const struct net_device *rootdev,
                                        struct net_device *dev);
 
+netdev_features_t passthru_features_check(struct sk_buff *skb,
+                                         struct net_device *dev,
+                                         netdev_features_t features);
 netdev_features_t netif_skb_features(struct sk_buff *skb);
 
 static inline bool net_gso_ok(netdev_features_t features, int gso_type)
index 2517ece988209a611b324a0bb8ade2b566eeb645..c480c43ad8f7a59538c3bc71fd971744e6f0246c 100644 (file)
@@ -44,11 +44,19 @@ int netfilter_init(void);
 struct sk_buff;
 
 struct nf_hook_ops;
+
+struct nf_hook_state {
+       unsigned int hook;
+       int thresh;
+       u_int8_t pf;
+       struct net_device *in;
+       struct net_device *out;
+       int (*okfn)(struct sk_buff *);
+};
+
 typedef unsigned int nf_hookfn(const struct nf_hook_ops *ops,
                               struct sk_buff *skb,
-                              const struct net_device *in,
-                              const struct net_device *out,
-                              int (*okfn)(struct sk_buff *));
+                              const struct nf_hook_state *state);
 
 struct nf_hook_ops {
        struct list_head list;
@@ -118,9 +126,7 @@ static inline bool nf_hooks_active(u_int8_t pf, unsigned int hook)
 }
 #endif
 
-int nf_hook_slow(u_int8_t pf, unsigned int hook, struct sk_buff *skb,
-                struct net_device *indev, struct net_device *outdev,
-                int (*okfn)(struct sk_buff *), int thresh);
+int nf_hook_slow(struct sk_buff *skb, struct nf_hook_state *state);
 
 /**
  *     nf_hook_thresh - call a netfilter hook
@@ -135,8 +141,18 @@ static inline int nf_hook_thresh(u_int8_t pf, unsigned int hook,
                                 struct net_device *outdev,
                                 int (*okfn)(struct sk_buff *), int thresh)
 {
-       if (nf_hooks_active(pf, hook))
-               return nf_hook_slow(pf, hook, skb, indev, outdev, okfn, thresh);
+       if (nf_hooks_active(pf, hook)) {
+               struct nf_hook_state state = {
+                       .hook = hook,
+                       .thresh = thresh,
+                       .pf = pf,
+                       .in = indev,
+                       .out = outdev,
+                       .okfn = okfn
+               };
+
+               return nf_hook_slow(skb, &state);
+       }
        return 1;
 }
 
index f1606fa6132d562b03ba11757393e1aeb4aa6e06..34b172301558e64ef80a19d9731ea41e3a5291a6 100644 (file)
@@ -483,7 +483,7 @@ static inline int nla_put_ipaddr4(struct sk_buff *skb, int type, __be32 ipaddr)
 
        if (!__nested)
                return -EMSGSIZE;
-       ret = nla_put_net32(skb, IPSET_ATTR_IPADDR_IPV4, ipaddr);
+       ret = nla_put_in_addr(skb, IPSET_ATTR_IPADDR_IPV4, ipaddr);
        if (!ret)
                ipset_nest_end(skb, __nested);
        return ret;
@@ -497,8 +497,7 @@ static inline int nla_put_ipaddr6(struct sk_buff *skb, int type,
 
        if (!__nested)
                return -EMSGSIZE;
-       ret = nla_put(skb, IPSET_ATTR_IPADDR_IPV6,
-                     sizeof(struct in6_addr), ipaddrptr);
+       ret = nla_put_in6_addr(skb, IPSET_ATTR_IPADDR_IPV6, ipaddrptr);
        if (!ret)
                ipset_nest_end(skb, __nested);
        return ret;
index cfb7191e6efa55633dfa8eb52855e6248ccb3d4f..c22a7fb8d0df08155857d8ca01ce4c67bfa01988 100644 (file)
@@ -54,8 +54,7 @@ extern struct xt_table *arpt_register_table(struct net *net,
 extern void arpt_unregister_table(struct xt_table *table);
 extern unsigned int arpt_do_table(struct sk_buff *skb,
                                  unsigned int hook,
-                                 const struct net_device *in,
-                                 const struct net_device *out,
+                                 const struct nf_hook_state *state,
                                  struct xt_table *table);
 
 #ifdef CONFIG_COMPAT
index c755e4971fa3cc89e3000b669e6f3ba557f555b8..2734977199cac6a587126202dff59827724af277 100644 (file)
@@ -19,61 +19,10 @@ enum nf_br_hook_priorities {
 
 #define BRNF_PKT_TYPE                  0x01
 #define BRNF_BRIDGED_DNAT              0x02
-#define BRNF_BRIDGED                   0x04
 #define BRNF_NF_BRIDGE_PREROUTING      0x08
 #define BRNF_8021Q                     0x10
 #define BRNF_PPPoE                     0x20
 
-static inline unsigned int nf_bridge_encap_header_len(const struct sk_buff *skb)
-{
-       switch (skb->protocol) {
-       case __cpu_to_be16(ETH_P_8021Q):
-               return VLAN_HLEN;
-       case __cpu_to_be16(ETH_P_PPP_SES):
-               return PPPOE_SES_HLEN;
-       default:
-               return 0;
-       }
-}
-
-static inline void nf_bridge_update_protocol(struct sk_buff *skb)
-{
-       if (skb->nf_bridge->mask & BRNF_8021Q)
-               skb->protocol = htons(ETH_P_8021Q);
-       else if (skb->nf_bridge->mask & BRNF_PPPoE)
-               skb->protocol = htons(ETH_P_PPP_SES);
-}
-
-/* Fill in the header for fragmented IP packets handled by
- * the IPv4 connection tracking code.
- *
- * Only used in br_forward.c
- */
-static inline int nf_bridge_copy_header(struct sk_buff *skb)
-{
-       int err;
-       unsigned int header_size;
-
-       nf_bridge_update_protocol(skb);
-       header_size = ETH_HLEN + nf_bridge_encap_header_len(skb);
-       err = skb_cow_head(skb, header_size);
-       if (err)
-               return err;
-
-       skb_copy_to_linear_data_offset(skb, -header_size,
-                                      skb->nf_bridge->data, header_size);
-       __skb_push(skb, nf_bridge_encap_header_len(skb));
-       return 0;
-}
-
-static inline int nf_bridge_maybe_copy_header(struct sk_buff *skb)
-{
-       if (skb->nf_bridge &&
-           skb->nf_bridge->mask & (BRNF_BRIDGED | BRNF_BRIDGED_DNAT))
-               return nf_bridge_copy_header(skb);
-       return 0;
-}
-
 static inline unsigned int nf_bridge_mtu_reduction(const struct sk_buff *skb)
 {
        if (unlikely(skb->nf_bridge->mask & BRNF_PPPoE))
@@ -82,33 +31,6 @@ static inline unsigned int nf_bridge_mtu_reduction(const struct sk_buff *skb)
 }
 
 int br_handle_frame_finish(struct sk_buff *skb);
-/* Only used in br_device.c */
-static inline int br_nf_pre_routing_finish_bridge_slow(struct sk_buff *skb)
-{
-       struct nf_bridge_info *nf_bridge = skb->nf_bridge;
-
-       skb_pull(skb, ETH_HLEN);
-       nf_bridge->mask ^= BRNF_BRIDGED_DNAT;
-       skb_copy_to_linear_data_offset(skb, -(ETH_HLEN-ETH_ALEN),
-                                      skb->nf_bridge->data, ETH_HLEN-ETH_ALEN);
-       skb->dev = nf_bridge->physindev;
-       return br_handle_frame_finish(skb);
-}
-
-/* This is called by the IP fragmenting code and it ensures there is
- * enough room for the encapsulating header (if there is one). */
-static inline unsigned int nf_bridge_pad(const struct sk_buff *skb)
-{
-       if (skb->nf_bridge)
-               return nf_bridge_encap_header_len(skb);
-       return 0;
-}
-
-struct bridge_skb_cb {
-       union {
-               __be32 ipv4;
-       } daddr;
-};
 
 static inline void br_drop_fake_rtable(struct sk_buff *skb)
 {
@@ -119,8 +41,6 @@ static inline void br_drop_fake_rtable(struct sk_buff *skb)
 }
 
 #else
-#define nf_bridge_maybe_copy_header(skb)       (0)
-#define nf_bridge_pad(skb)                     (0)
 #define br_drop_fake_rtable(skb)               do { } while (0)
 #endif /* CONFIG_BRIDGE_NETFILTER */
 
index 901e84db847d35e5ccbe49d2d524c921dc9bdef9..4073510da485fbad83ba5236d7ba5794b599653a 100644 (file)
@@ -65,8 +65,7 @@ struct ipt_error {
 extern void *ipt_alloc_initial_table(const struct xt_table *);
 extern unsigned int ipt_do_table(struct sk_buff *skb,
                                 unsigned int hook,
-                                const struct net_device *in,
-                                const struct net_device *out,
+                                const struct nf_hook_state *state,
                                 struct xt_table *table);
 
 #ifdef CONFIG_COMPAT
index 610208b18c05819dc4cfecc0554574269157f90f..b40d2b635778372f46870d00fd06a10266359f67 100644 (file)
@@ -31,8 +31,7 @@ extern struct xt_table *ip6t_register_table(struct net *net,
 extern void ip6t_unregister_table(struct net *net, struct xt_table *table);
 extern unsigned int ip6t_do_table(struct sk_buff *skb,
                                  unsigned int hook,
-                                 const struct net_device *in,
-                                 const struct net_device *out,
+                                 const struct nf_hook_state *state,
                                  struct xt_table *table);
 
 /* Check for an extension */
index b01ccf371fdcaf229f07bddbd2f12f9bbb5c53b0..3d1b0d2fe55e8d2725759b6a23913c3266713d39 100644 (file)
@@ -447,7 +447,7 @@ static inline struct rpc_cred *nfs_file_cred(struct file *file)
 /*
  * linux/fs/nfs/direct.c
  */
-extern ssize_t nfs_direct_IO(int, struct kiocb *, struct iov_iter *, loff_t);
+extern ssize_t nfs_direct_IO(struct kiocb *, struct iov_iter *, loff_t);
 extern ssize_t nfs_file_direct_read(struct kiocb *iocb,
                        struct iov_iter *iter,
                        loff_t pos);
index d449018d07265200f4d8d6eeaf8ddac1a9010ebd..8f2237eb348574cfeb56eb7af844be63e1e3184e 100644 (file)
@@ -24,6 +24,7 @@ struct phy_device *of_phy_attach(struct net_device *dev,
                                 phy_interface_t iface);
 
 extern struct mii_bus *of_mdio_find_bus(struct device_node *mdio_np);
+extern int of_mdio_parse_addr(struct device *dev, const struct device_node *np);
 
 #else /* CONFIG_OF */
 static inline int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
@@ -60,6 +61,12 @@ static inline struct mii_bus *of_mdio_find_bus(struct device_node *mdio_np)
 {
        return NULL;
 }
+
+static inline int of_mdio_parse_addr(struct device *dev,
+                                    const struct device_node *np)
+{
+       return -ENOSYS;
+}
 #endif /* CONFIG_OF */
 
 #if defined(CONFIG_OF) && defined(CONFIG_FIXED_PHY)
index 34597c8c1a4cad4942c6c575cccc3190f01ad274..9cd72aab76fe27af17105ba93b17584c773c842f 100644 (file)
@@ -9,8 +9,11 @@
 
 #ifdef CONFIG_OF_NET
 #include <linux/of.h>
+
+struct net_device;
 extern int of_get_phy_mode(struct device_node *np);
 extern const void *of_get_mac_address(struct device_node *np);
+extern struct net_device *of_find_net_device_by_node(struct device_node *np);
 #else
 static inline int of_get_phy_mode(struct device_node *np)
 {
@@ -21,6 +24,11 @@ static inline const void *of_get_mac_address(struct device_node *np)
 {
        return NULL;
 }
+
+static inline struct net_device *of_find_net_device_by_node(struct device_node *np)
+{
+       return NULL;
+}
 #endif
 
 #endif /* __LINUX_OF_NET_H */
index 7e75bfe37cc7cd72d5db63cd094b5110984b066b..fe5732d53edacd17af2e259e191f60e595721345 100644 (file)
@@ -21,6 +21,9 @@ extern void fixed_phy_del(int phy_addr);
 extern int fixed_phy_set_link_update(struct phy_device *phydev,
                        int (*link_update)(struct net_device *,
                                           struct fixed_phy_status *));
+extern int fixed_phy_update_state(struct phy_device *phydev,
+                          const struct fixed_phy_status *status,
+                          const struct fixed_phy_status *changed);
 #else
 static inline int fixed_phy_add(unsigned int irq, int phy_id,
                                struct fixed_phy_status *status)
@@ -43,6 +46,12 @@ static inline int fixed_phy_set_link_update(struct phy_device *phydev,
 {
        return -ENODEV;
 }
+static inline int fixed_phy_update_state(struct phy_device *phydev,
+                          const struct fixed_phy_status *status,
+                          const struct fixed_phy_status *changed)
+{
+       return -ENODEV;
+}
 #endif /* CONFIG_FIXED_PHY */
 
 #endif /* __PHY_FIXED_H */
index 0d8ff3fb84baf1778fccd81cf4caa7c4a87ac2d8..b8b73066d1379087f1ecd5bd40dbf719ad2af4ad 100644 (file)
@@ -64,11 +64,11 @@ struct ptp_clock_request {
  * @adjtime:  Shifts the time of the hardware clock.
  *            parameter delta: Desired change in nanoseconds.
  *
- * @gettime:  Reads the current time from the hardware clock.
- *            parameter ts: Holds the result.
+ * @gettime64:  Reads the current time from the hardware clock.
+ *              parameter ts: Holds the result.
  *
- * @settime:  Set the current time on the hardware clock.
- *            parameter ts: Time value to set.
+ * @settime64:  Set the current time on the hardware clock.
+ *              parameter ts: Time value to set.
  *
  * @enable:   Request driver to enable or disable an ancillary feature.
  *            parameter request: Desired resource to enable or disable.
@@ -104,8 +104,8 @@ struct ptp_clock_info {
        struct ptp_pin_desc *pin_config;
        int (*adjfreq)(struct ptp_clock_info *ptp, s32 delta);
        int (*adjtime)(struct ptp_clock_info *ptp, s64 delta);
-       int (*gettime)(struct ptp_clock_info *ptp, struct timespec *ts);
-       int (*settime)(struct ptp_clock_info *ptp, const struct timespec *ts);
+       int (*gettime64)(struct ptp_clock_info *ptp, struct timespec64 *ts);
+       int (*settime64)(struct ptp_clock_info *p, const struct timespec64 *ts);
        int (*enable)(struct ptp_clock_info *ptp,
                      struct ptp_clock_request *request, int on);
        int (*verify)(struct ptp_clock_info *ptp, unsigned int pin,
index d438eeb08bff407043b32d5f52f58d08fac8838f..e23d242d1230ff899f37478bcb3a8b92769129d2 100644 (file)
@@ -1,14 +1,13 @@
 /*
  * Resizable, Scalable, Concurrent Hash Table
  *
- * Copyright (c) 2014 Thomas Graf <tgraf@suug.ch>
+ * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au>
+ * Copyright (c) 2014-2015 Thomas Graf <tgraf@suug.ch>
  * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net>
  *
- * Based on the following paper by Josh Triplett, Paul E. McKenney
- * and Jonathan Walpole:
- * https://www.usenix.org/legacy/event/atc11/tech/final_files/Triplett.pdf
- *
  * Code partially derived from nft_hash
+ * Rewritten with rehash code from br_multicast plus single list
+ * pointer as suggested by Josh Triplett
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
 #define _LINUX_RHASHTABLE_H
 
 #include <linux/compiler.h>
+#include <linux/errno.h>
+#include <linux/jhash.h>
 #include <linux/list_nulls.h>
 #include <linux/workqueue.h>
 #include <linux/mutex.h>
+#include <linux/rcupdate.h>
 
 /*
  * The end of the chain is marked with a special nulls marks which has
@@ -42,6 +44,9 @@
 #define RHT_HASH_BITS          27
 #define RHT_BASE_SHIFT         RHT_HASH_BITS
 
+/* Base bits plus 1 bit for nulls marker */
+#define RHT_HASH_RESERVED_SPACE        (RHT_BASE_BITS + 1)
+
 struct rhash_head {
        struct rhash_head __rcu         *next;
 };
@@ -49,20 +54,43 @@ struct rhash_head {
 /**
  * struct bucket_table - Table of hash buckets
  * @size: Number of hash buckets
+ * @rehash: Current bucket being rehashed
+ * @hash_rnd: Random seed to fold into hash
  * @locks_mask: Mask to apply before accessing locks[]
  * @locks: Array of spinlocks protecting individual buckets
+ * @walkers: List of active walkers
+ * @rcu: RCU structure for freeing the table
+ * @future_tbl: Table under construction during rehashing
  * @buckets: size * hash buckets
  */
 struct bucket_table {
-       size_t                  size;
+       unsigned int            size;
+       unsigned int            rehash;
+       u32                     hash_rnd;
        unsigned int            locks_mask;
        spinlock_t              *locks;
+       struct list_head        walkers;
+       struct rcu_head         rcu;
+
+       struct bucket_table __rcu *future_tbl;
 
        struct rhash_head __rcu *buckets[] ____cacheline_aligned_in_smp;
 };
 
+/**
+ * struct rhashtable_compare_arg - Key for the function rhashtable_compare
+ * @ht: Hash table
+ * @key: Key to compare against
+ */
+struct rhashtable_compare_arg {
+       struct rhashtable *ht;
+       const void *key;
+};
+
 typedef u32 (*rht_hashfn_t)(const void *data, u32 len, u32 seed);
-typedef u32 (*rht_obj_hashfn_t)(const void *data, u32 seed);
+typedef u32 (*rht_obj_hashfn_t)(const void *data, u32 len, u32 seed);
+typedef int (*rht_obj_cmpfn_t)(struct rhashtable_compare_arg *arg,
+                              const void *obj);
 
 struct rhashtable;
 
@@ -72,60 +100,62 @@ struct rhashtable;
  * @key_len: Length of key
  * @key_offset: Offset of key in struct to be hashed
  * @head_offset: Offset of rhash_head in struct to be hashed
- * @hash_rnd: Seed to use while hashing
- * @max_shift: Maximum number of shifts while expanding
- * @min_shift: Minimum number of shifts while shrinking
+ * @max_size: Maximum size while expanding
+ * @min_size: Minimum size while shrinking
  * @nulls_base: Base value to generate nulls marker
+ * @insecure_elasticity: Set to true to disable chain length checks
+ * @automatic_shrinking: Enable automatic shrinking of tables
  * @locks_mul: Number of bucket locks to allocate per cpu (default: 128)
- * @hashfn: Function to hash key
+ * @hashfn: Hash function (default: jhash2 if !(key_len % 4), or jhash)
  * @obj_hashfn: Function to hash object
+ * @obj_cmpfn: Function to compare key with object
  */
 struct rhashtable_params {
        size_t                  nelem_hint;
        size_t                  key_len;
        size_t                  key_offset;
        size_t                  head_offset;
-       u32                     hash_rnd;
-       size_t                  max_shift;
-       size_t                  min_shift;
+       unsigned int            max_size;
+       unsigned int            min_size;
        u32                     nulls_base;
+       bool                    insecure_elasticity;
+       bool                    automatic_shrinking;
        size_t                  locks_mul;
        rht_hashfn_t            hashfn;
        rht_obj_hashfn_t        obj_hashfn;
+       rht_obj_cmpfn_t         obj_cmpfn;
 };
 
 /**
  * struct rhashtable - Hash table handle
  * @tbl: Bucket table
- * @future_tbl: Table under construction during expansion/shrinking
  * @nelems: Number of elements in table
- * @shift: Current size (1 << shift)
+ * @key_len: Key length for hashfn
+ * @elasticity: Maximum chain length before rehash
  * @p: Configuration parameters
  * @run_work: Deferred worker to expand/shrink asynchronously
  * @mutex: Mutex to protect current/future table swapping
- * @walkers: List of active walkers
- * @being_destroyed: True if table is set up for destruction
+ * @lock: Spin lock to protect walker list
  */
 struct rhashtable {
        struct bucket_table __rcu       *tbl;
-       struct bucket_table __rcu       *future_tbl;
        atomic_t                        nelems;
-       atomic_t                        shift;
+       unsigned int                    key_len;
+       unsigned int                    elasticity;
        struct rhashtable_params        p;
        struct work_struct              run_work;
        struct mutex                    mutex;
-       struct list_head                walkers;
-       bool                            being_destroyed;
+       spinlock_t                      lock;
 };
 
 /**
  * struct rhashtable_walker - Hash table walker
  * @list: List entry on list of walkers
- * @resize: Resize event occured
+ * @tbl: The table that we were walking over
  */
 struct rhashtable_walker {
        struct list_head list;
-       bool resize;
+       struct bucket_table *tbl;
 };
 
 /**
@@ -162,6 +192,118 @@ static inline unsigned long rht_get_nulls_value(const struct rhash_head *ptr)
        return ((unsigned long) ptr) >> 1;
 }
 
+static inline void *rht_obj(const struct rhashtable *ht,
+                           const struct rhash_head *he)
+{
+       return (char *)he - ht->p.head_offset;
+}
+
+static inline unsigned int rht_bucket_index(const struct bucket_table *tbl,
+                                           unsigned int hash)
+{
+       return (hash >> RHT_HASH_RESERVED_SPACE) & (tbl->size - 1);
+}
+
+static inline unsigned int rht_key_hashfn(
+       struct rhashtable *ht, const struct bucket_table *tbl,
+       const void *key, const struct rhashtable_params params)
+{
+       unsigned int hash;
+
+       /* params must be equal to ht->p if it isn't constant. */
+       if (!__builtin_constant_p(params.key_len))
+               hash = ht->p.hashfn(key, ht->key_len, tbl->hash_rnd);
+       else if (params.key_len) {
+               unsigned int key_len = params.key_len;
+
+               if (params.hashfn)
+                       hash = params.hashfn(key, key_len, tbl->hash_rnd);
+               else if (key_len & (sizeof(u32) - 1))
+                       hash = jhash(key, key_len, tbl->hash_rnd);
+               else
+                       hash = jhash2(key, key_len / sizeof(u32),
+                                     tbl->hash_rnd);
+       } else {
+               unsigned int key_len = ht->p.key_len;
+
+               if (params.hashfn)
+                       hash = params.hashfn(key, key_len, tbl->hash_rnd);
+               else
+                       hash = jhash(key, key_len, tbl->hash_rnd);
+       }
+
+       return rht_bucket_index(tbl, hash);
+}
+
+static inline unsigned int rht_head_hashfn(
+       struct rhashtable *ht, const struct bucket_table *tbl,
+       const struct rhash_head *he, const struct rhashtable_params params)
+{
+       const char *ptr = rht_obj(ht, he);
+
+       return likely(params.obj_hashfn) ?
+              rht_bucket_index(tbl, params.obj_hashfn(ptr, params.key_len ?:
+                                                           ht->p.key_len,
+                                                      tbl->hash_rnd)) :
+              rht_key_hashfn(ht, tbl, ptr + params.key_offset, params);
+}
+
+/**
+ * rht_grow_above_75 - returns true if nelems > 0.75 * table-size
+ * @ht:                hash table
+ * @tbl:       current table
+ */
+static inline bool rht_grow_above_75(const struct rhashtable *ht,
+                                    const struct bucket_table *tbl)
+{
+       /* Expand table when exceeding 75% load */
+       return atomic_read(&ht->nelems) > (tbl->size / 4 * 3) &&
+              (!ht->p.max_size || tbl->size < ht->p.max_size);
+}
+
+/**
+ * rht_shrink_below_30 - returns true if nelems < 0.3 * table-size
+ * @ht:                hash table
+ * @tbl:       current table
+ */
+static inline bool rht_shrink_below_30(const struct rhashtable *ht,
+                                      const struct bucket_table *tbl)
+{
+       /* Shrink table beneath 30% load */
+       return atomic_read(&ht->nelems) < (tbl->size * 3 / 10) &&
+              tbl->size > ht->p.min_size;
+}
+
+/**
+ * rht_grow_above_100 - returns true if nelems > table-size
+ * @ht:                hash table
+ * @tbl:       current table
+ */
+static inline bool rht_grow_above_100(const struct rhashtable *ht,
+                                     const struct bucket_table *tbl)
+{
+       return atomic_read(&ht->nelems) > tbl->size;
+}
+
+/* The bucket lock is selected based on the hash and protects mutations
+ * on a group of hash buckets.
+ *
+ * A maximum of tbl->size/2 bucket locks is allocated. This ensures that
+ * a single lock always covers both buckets which may both contains
+ * entries which link to the same bucket of the old table during resizing.
+ * This allows to simplify the locking as locking the bucket in both
+ * tables during resize always guarantee protection.
+ *
+ * IMPORTANT: When holding the bucket lock of both the old and new table
+ * during expansions and shrinking, the old bucket lock must always be
+ * acquired first.
+ */
+static inline spinlock_t *rht_bucket_lock(const struct bucket_table *tbl,
+                                         unsigned int hash)
+{
+       return &tbl->locks[hash & tbl->locks_mask];
+}
+
 #ifdef CONFIG_PROVE_LOCKING
 int lockdep_rht_mutex_is_held(struct rhashtable *ht);
 int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash);
@@ -178,23 +320,13 @@ static inline int lockdep_rht_bucket_is_held(const struct bucket_table *tbl,
 }
 #endif /* CONFIG_PROVE_LOCKING */
 
-int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params);
-
-void rhashtable_insert(struct rhashtable *ht, struct rhash_head *node);
-bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *node);
-
-int rhashtable_expand(struct rhashtable *ht);
-int rhashtable_shrink(struct rhashtable *ht);
+int rhashtable_init(struct rhashtable *ht,
+                   const struct rhashtable_params *params);
 
-void *rhashtable_lookup(struct rhashtable *ht, const void *key);
-void *rhashtable_lookup_compare(struct rhashtable *ht, const void *key,
-                               bool (*compare)(void *, void *), void *arg);
-
-bool rhashtable_lookup_insert(struct rhashtable *ht, struct rhash_head *obj);
-bool rhashtable_lookup_compare_insert(struct rhashtable *ht,
-                                     struct rhash_head *obj,
-                                     bool (*compare)(void *, void *),
-                                     void *arg);
+int rhashtable_insert_slow(struct rhashtable *ht, const void *key,
+                          struct rhash_head *obj,
+                          struct bucket_table *old_tbl);
+int rhashtable_insert_rehash(struct rhashtable *ht);
 
 int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter);
 void rhashtable_walk_exit(struct rhashtable_iter *iter);
@@ -202,6 +334,9 @@ int rhashtable_walk_start(struct rhashtable_iter *iter) __acquires(RCU);
 void *rhashtable_walk_next(struct rhashtable_iter *iter);
 void rhashtable_walk_stop(struct rhashtable_iter *iter) __releases(RCU);
 
+void rhashtable_free_and_destroy(struct rhashtable *ht,
+                                void (*free_fn)(void *ptr, void *arg),
+                                void *arg);
 void rhashtable_destroy(struct rhashtable *ht);
 
 #define rht_dereference(p, ht) \
@@ -352,4 +487,316 @@ void rhashtable_destroy(struct rhashtable *ht);
        rht_for_each_entry_rcu_continue(tpos, pos, (tbl)->buckets[hash],\
                                        tbl, hash, member)
 
+static inline int rhashtable_compare(struct rhashtable_compare_arg *arg,
+                                    const void *obj)
+{
+       struct rhashtable *ht = arg->ht;
+       const char *ptr = obj;
+
+       return memcmp(ptr + ht->p.key_offset, arg->key, ht->p.key_len);
+}
+
+/**
+ * rhashtable_lookup_fast - search hash table, inlined version
+ * @ht:                hash table
+ * @key:       the pointer to the key
+ * @params:    hash table parameters
+ *
+ * Computes the hash value for the key and traverses the bucket chain looking
+ * for a entry with an identical key. The first matching entry is returned.
+ *
+ * Returns the first entry on which the compare function returned true.
+ */
+static inline void *rhashtable_lookup_fast(
+       struct rhashtable *ht, const void *key,
+       const struct rhashtable_params params)
+{
+       struct rhashtable_compare_arg arg = {
+               .ht = ht,
+               .key = key,
+       };
+       const struct bucket_table *tbl;
+       struct rhash_head *he;
+       unsigned int hash;
+
+       rcu_read_lock();
+
+       tbl = rht_dereference_rcu(ht->tbl, ht);
+restart:
+       hash = rht_key_hashfn(ht, tbl, key, params);
+       rht_for_each_rcu(he, tbl, hash) {
+               if (params.obj_cmpfn ?
+                   params.obj_cmpfn(&arg, rht_obj(ht, he)) :
+                   rhashtable_compare(&arg, rht_obj(ht, he)))
+                       continue;
+               rcu_read_unlock();
+               return rht_obj(ht, he);
+       }
+
+       /* Ensure we see any new tables. */
+       smp_rmb();
+
+       tbl = rht_dereference_rcu(tbl->future_tbl, ht);
+       if (unlikely(tbl))
+               goto restart;
+       rcu_read_unlock();
+
+       return NULL;
+}
+
+/* Internal function, please use rhashtable_insert_fast() instead */
+static inline int __rhashtable_insert_fast(
+       struct rhashtable *ht, const void *key, struct rhash_head *obj,
+       const struct rhashtable_params params)
+{
+       struct rhashtable_compare_arg arg = {
+               .ht = ht,
+               .key = key,
+       };
+       struct bucket_table *tbl, *new_tbl;
+       struct rhash_head *head;
+       spinlock_t *lock;
+       unsigned int elasticity;
+       unsigned int hash;
+       int err;
+
+restart:
+       rcu_read_lock();
+
+       tbl = rht_dereference_rcu(ht->tbl, ht);
+
+       /* All insertions must grab the oldest table containing
+        * the hashed bucket that is yet to be rehashed.
+        */
+       for (;;) {
+               hash = rht_head_hashfn(ht, tbl, obj, params);
+               lock = rht_bucket_lock(tbl, hash);
+               spin_lock_bh(lock);
+
+               if (tbl->rehash <= hash)
+                       break;
+
+               spin_unlock_bh(lock);
+               tbl = rht_dereference_rcu(tbl->future_tbl, ht);
+       }
+
+       new_tbl = rht_dereference_rcu(tbl->future_tbl, ht);
+       if (unlikely(new_tbl)) {
+               err = rhashtable_insert_slow(ht, key, obj, new_tbl);
+               if (err == -EAGAIN)
+                       goto slow_path;
+               goto out;
+       }
+
+       if (unlikely(rht_grow_above_100(ht, tbl))) {
+slow_path:
+               spin_unlock_bh(lock);
+               err = rhashtable_insert_rehash(ht);
+               rcu_read_unlock();
+               if (err)
+                       return err;
+
+               goto restart;
+       }
+
+       err = -EEXIST;
+       elasticity = ht->elasticity;
+       rht_for_each(head, tbl, hash) {
+               if (key &&
+                   unlikely(!(params.obj_cmpfn ?
+                              params.obj_cmpfn(&arg, rht_obj(ht, head)) :
+                              rhashtable_compare(&arg, rht_obj(ht, head)))))
+                       goto out;
+               if (!--elasticity)
+                       goto slow_path;
+       }
+
+       err = 0;
+
+       head = rht_dereference_bucket(tbl->buckets[hash], tbl, hash);
+
+       RCU_INIT_POINTER(obj->next, head);
+
+       rcu_assign_pointer(tbl->buckets[hash], obj);
+
+       atomic_inc(&ht->nelems);
+       if (rht_grow_above_75(ht, tbl))
+               schedule_work(&ht->run_work);
+
+out:
+       spin_unlock_bh(lock);
+       rcu_read_unlock();
+
+       return err;
+}
+
+/**
+ * rhashtable_insert_fast - insert object into hash table
+ * @ht:                hash table
+ * @obj:       pointer to hash head inside object
+ * @params:    hash table parameters
+ *
+ * Will take a per bucket spinlock to protect against mutual mutations
+ * on the same bucket. Multiple insertions may occur in parallel unless
+ * they map to the same bucket lock.
+ *
+ * It is safe to call this function from atomic context.
+ *
+ * Will trigger an automatic deferred table resizing if the size grows
+ * beyond the watermark indicated by grow_decision() which can be passed
+ * to rhashtable_init().
+ */
+static inline int rhashtable_insert_fast(
+       struct rhashtable *ht, struct rhash_head *obj,
+       const struct rhashtable_params params)
+{
+       return __rhashtable_insert_fast(ht, NULL, obj, params);
+}
+
+/**
+ * rhashtable_lookup_insert_fast - lookup and insert object into hash table
+ * @ht:                hash table
+ * @obj:       pointer to hash head inside object
+ * @params:    hash table parameters
+ *
+ * Locks down the bucket chain in both the old and new table if a resize
+ * is in progress to ensure that writers can't remove from the old table
+ * and can't insert to the new table during the atomic operation of search
+ * and insertion. Searches for duplicates in both the old and new table if
+ * a resize is in progress.
+ *
+ * This lookup function may only be used for fixed key hash table (key_len
+ * parameter set). It will BUG() if used inappropriately.
+ *
+ * It is safe to call this function from atomic context.
+ *
+ * Will trigger an automatic deferred table resizing if the size grows
+ * beyond the watermark indicated by grow_decision() which can be passed
+ * to rhashtable_init().
+ */
+static inline int rhashtable_lookup_insert_fast(
+       struct rhashtable *ht, struct rhash_head *obj,
+       const struct rhashtable_params params)
+{
+       const char *key = rht_obj(ht, obj);
+
+       BUG_ON(ht->p.obj_hashfn);
+
+       return __rhashtable_insert_fast(ht, key + ht->p.key_offset, obj,
+                                       params);
+}
+
+/**
+ * rhashtable_lookup_insert_key - search and insert object to hash table
+ *                               with explicit key
+ * @ht:                hash table
+ * @key:       key
+ * @obj:       pointer to hash head inside object
+ * @params:    hash table parameters
+ *
+ * Locks down the bucket chain in both the old and new table if a resize
+ * is in progress to ensure that writers can't remove from the old table
+ * and can't insert to the new table during the atomic operation of search
+ * and insertion. Searches for duplicates in both the old and new table if
+ * a resize is in progress.
+ *
+ * Lookups may occur in parallel with hashtable mutations and resizing.
+ *
+ * Will trigger an automatic deferred table resizing if the size grows
+ * beyond the watermark indicated by grow_decision() which can be passed
+ * to rhashtable_init().
+ *
+ * Returns zero on success.
+ */
+static inline int rhashtable_lookup_insert_key(
+       struct rhashtable *ht, const void *key, struct rhash_head *obj,
+       const struct rhashtable_params params)
+{
+       BUG_ON(!ht->p.obj_hashfn || !key);
+
+       return __rhashtable_insert_fast(ht, key, obj, params);
+}
+
+/* Internal function, please use rhashtable_remove_fast() instead */
+static inline int __rhashtable_remove_fast(
+       struct rhashtable *ht, struct bucket_table *tbl,
+       struct rhash_head *obj, const struct rhashtable_params params)
+{
+       struct rhash_head __rcu **pprev;
+       struct rhash_head *he;
+       spinlock_t * lock;
+       unsigned int hash;
+       int err = -ENOENT;
+
+       hash = rht_head_hashfn(ht, tbl, obj, params);
+       lock = rht_bucket_lock(tbl, hash);
+
+       spin_lock_bh(lock);
+
+       pprev = &tbl->buckets[hash];
+       rht_for_each(he, tbl, hash) {
+               if (he != obj) {
+                       pprev = &he->next;
+                       continue;
+               }
+
+               rcu_assign_pointer(*pprev, obj->next);
+               err = 0;
+               break;
+       }
+
+       spin_unlock_bh(lock);
+
+       return err;
+}
+
+/**
+ * rhashtable_remove_fast - remove object from hash table
+ * @ht:                hash table
+ * @obj:       pointer to hash head inside object
+ * @params:    hash table parameters
+ *
+ * Since the hash chain is single linked, the removal operation needs to
+ * walk the bucket chain upon removal. The removal operation is thus
+ * considerable slow if the hash table is not correctly sized.
+ *
+ * Will automatically shrink the table via rhashtable_expand() if the
+ * shrink_decision function specified at rhashtable_init() returns true.
+ *
+ * Returns zero on success, -ENOENT if the entry could not be found.
+ */
+static inline int rhashtable_remove_fast(
+       struct rhashtable *ht, struct rhash_head *obj,
+       const struct rhashtable_params params)
+{
+       struct bucket_table *tbl;
+       int err;
+
+       rcu_read_lock();
+
+       tbl = rht_dereference_rcu(ht->tbl, ht);
+
+       /* Because we have already taken (and released) the bucket
+        * lock in old_tbl, if we find that future_tbl is not yet
+        * visible then that guarantees the entry to still be in
+        * the old tbl if it exists.
+        */
+       while ((err = __rhashtable_remove_fast(ht, tbl, obj, params)) &&
+              (tbl = rht_dereference_rcu(tbl->future_tbl, ht)))
+               ;
+
+       if (err)
+               goto out;
+
+       atomic_dec(&ht->nelems);
+       if (unlikely(ht->p.automatic_shrinking &&
+                    rht_shrink_below_30(ht, tbl)))
+               schedule_work(&ht->run_work);
+
+out:
+       rcu_read_unlock();
+
+       return err;
+}
+
 #endif /* _LINUX_RHASHTABLE_H */
index a1b7dbd127ffc73c1c07f3935ed9cd9118cfec52..18264ea9e314153488f9726b530993658c4cea25 100644 (file)
@@ -1556,7 +1556,7 @@ struct security_operations {
        int (*inode_follow_link) (struct dentry *dentry, struct nameidata *nd);
        int (*inode_permission) (struct inode *inode, int mask);
        int (*inode_setattr)    (struct dentry *dentry, struct iattr *attr);
-       int (*inode_getattr) (struct vfsmount *mnt, struct dentry *dentry);
+       int (*inode_getattr) (const struct path *path);
        int (*inode_setxattr) (struct dentry *dentry, const char *name,
                               const void *value, size_t size, int flags);
        void (*inode_post_setxattr) (struct dentry *dentry, const char *name,
@@ -1716,7 +1716,6 @@ struct security_operations {
        int (*tun_dev_attach_queue) (void *security);
        int (*tun_dev_attach) (struct sock *sk, void *security);
        int (*tun_dev_open) (void *security);
-       void (*skb_owned_by) (struct sk_buff *skb, struct sock *sk);
 #endif /* CONFIG_SECURITY_NETWORK */
 
 #ifdef CONFIG_SECURITY_NETWORK_XFRM
@@ -1843,7 +1842,7 @@ int security_inode_readlink(struct dentry *dentry);
 int security_inode_follow_link(struct dentry *dentry, struct nameidata *nd);
 int security_inode_permission(struct inode *inode, int mask);
 int security_inode_setattr(struct dentry *dentry, struct iattr *attr);
-int security_inode_getattr(struct vfsmount *mnt, struct dentry *dentry);
+int security_inode_getattr(const struct path *path);
 int security_inode_setxattr(struct dentry *dentry, const char *name,
                            const void *value, size_t size, int flags);
 void security_inode_post_setxattr(struct dentry *dentry, const char *name,
@@ -2259,8 +2258,7 @@ static inline int security_inode_setattr(struct dentry *dentry,
        return 0;
 }
 
-static inline int security_inode_getattr(struct vfsmount *mnt,
-                                         struct dentry *dentry)
+static inline int security_inode_getattr(const struct path *path)
 {
        return 0;
 }
@@ -2735,8 +2733,6 @@ int security_tun_dev_attach_queue(void *security);
 int security_tun_dev_attach(struct sock *sk, void *security);
 int security_tun_dev_open(void *security);
 
-void security_skb_owned_by(struct sk_buff *skb, struct sock *sk);
-
 #else  /* CONFIG_SECURITY_NETWORK */
 static inline int security_unix_stream_connect(struct sock *sock,
                                               struct sock *other,
@@ -2928,11 +2924,6 @@ static inline int security_tun_dev_open(void *security)
 {
        return 0;
 }
-
-static inline void security_skb_owned_by(struct sk_buff *skb, struct sock *sk)
-{
-}
-
 #endif /* CONFIG_SECURITY_NETWORK */
 
 #ifdef CONFIG_SECURITY_NETWORK_XFRM
index f54d6659713ae76e96391334dba700eac8be122f..36f3f43c011789efe68c372296cc691cc7a3863e 100644 (file)
@@ -492,7 +492,6 @@ static inline u32 skb_mstamp_us_delta(const struct skb_mstamp *t1,
   *    @napi_id: id of the NAPI struct this skb came from
  *     @secmark: security marking
  *     @mark: Generic packet mark
- *     @dropcount: total number of sk_receive_queue overflows
  *     @vlan_proto: vlan encapsulation protocol
  *     @vlan_tci: vlan tag control information
  *     @inner_protocol: Protocol (encapsulation)
@@ -641,7 +640,6 @@ struct sk_buff {
 #endif
        union {
                __u32           mark;
-               __u32           dropcount;
                __u32           reserved_tailroom;
        };
 
@@ -870,8 +868,7 @@ unsigned int skb_seq_read(unsigned int consumed, const u8 **data,
 void skb_abort_seq_read(struct skb_seq_state *st);
 
 unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
-                          unsigned int to, struct ts_config *config,
-                          struct ts_state *state);
+                          unsigned int to, struct ts_config *config);
 
 /*
  * Packet hash types specify the type of hash in skb_set_hash.
index 46cca4c06848346ca84753ac182526a4514ff277..083ac388098e43413b4eee3238558c180b747847 100644 (file)
@@ -19,8 +19,8 @@ void sock_diag_unregister(const struct sock_diag_handler *h);
 void sock_diag_register_inet_compat(int (*fn)(struct sk_buff *skb, struct nlmsghdr *nlh));
 void sock_diag_unregister_inet_compat(int (*fn)(struct sk_buff *skb, struct nlmsghdr *nlh));
 
-int sock_diag_check_cookie(void *sk, __u32 *cookie);
-void sock_diag_save_cookie(void *sk, __u32 *cookie);
+int sock_diag_check_cookie(struct sock *sk, const __u32 *cookie);
+void sock_diag_save_cookie(struct sock *sk, __u32 *cookie);
 
 int sock_diag_put_meminfo(struct sock *sk, struct sk_buff *skb, int attr);
 int sock_diag_put_filterinfo(bool may_report_filterinfo, struct sock *sk,
index 5c19cba34dce023a49c45d776751190834b88889..5bf59c8493b763c6dc11ccce18da9c0ac6c0da46 100644 (file)
@@ -51,6 +51,7 @@ struct msghdr {
        void            *msg_control;   /* ancillary data */
        __kernel_size_t msg_controllen; /* ancillary data buffer length */
        unsigned int    msg_flags;      /* flags on received message */
+       struct kiocb    *msg_iocb;      /* ptr to iocb for async requests */
 };
  
 struct user_msghdr {
@@ -138,6 +139,11 @@ static inline struct cmsghdr * cmsg_nxthdr (struct msghdr *__msg, struct cmsghdr
        return __cmsg_nxthdr(__msg->msg_control, __msg->msg_controllen, __cmsg);
 }
 
+static inline size_t msg_data_left(struct msghdr *msg)
+{
+       return iov_iter_count(&msg->msg_iter);
+}
+
 /* "Socket"-level control message types: */
 
 #define        SCM_RIGHTS      0x01            /* rw: access rights (array of int) */
@@ -181,6 +187,7 @@ struct ucred {
 #define AF_WANPIPE     25      /* Wanpipe API Sockets */
 #define AF_LLC         26      /* Linux LLC                    */
 #define AF_IB          27      /* Native InfiniBand address    */
+#define AF_MPLS                28      /* MPLS */
 #define AF_CAN         29      /* Controller Area Network      */
 #define AF_TIPC                30      /* TIPC sockets                 */
 #define AF_BLUETOOTH   31      /* Bluetooth sockets            */
@@ -226,6 +233,7 @@ struct ucred {
 #define PF_WANPIPE     AF_WANPIPE
 #define PF_LLC         AF_LLC
 #define PF_IB          AF_IB
+#define PF_MPLS                AF_MPLS
 #define PF_CAN         AF_CAN
 #define PF_TIPC                AF_TIPC
 #define PF_BLUETOOTH   AF_BLUETOOTH
index cd519a11c2c6723d5d679ffed8cb4320eba6e318..b63fe6f5fdc874399e5125e879a071797697075d 100644 (file)
@@ -22,6 +22,7 @@ struct at86rf230_platform_data {
        int rstn;
        int slp_tr;
        int dig2;
+       u8 xtal_trim;
 };
 
 #endif
index 85b8ee67e93795ac88e02a1b7a00a0ed25b6d785..e741e8baad928caac8ec941700c5392fbdfa3164 100644 (file)
@@ -21,6 +21,7 @@ struct cc2520_platform_data {
        int sfd;
        int reset;
        int vreg;
+       bool amplified;
 };
 
 #endif
index 1a7adb411647436feac207029d8e8efe19ac1193..f869ae8afbaf9f4092625d7f5c0317ef7538ca1f 100644 (file)
@@ -111,7 +111,7 @@ struct tcp_request_sock_ops;
 struct tcp_request_sock {
        struct inet_request_sock        req;
        const struct tcp_request_sock_ops *af_specific;
-       struct sock                     *listener; /* needed for TFO */
+       bool                            tfo_listener;
        u32                             rcv_isn;
        u32                             snt_isn;
        u32                             snt_synack; /* synack sent time */
@@ -236,7 +236,6 @@ struct tcp_sock {
        u32     lost_out;       /* Lost packets                 */
        u32     sacked_out;     /* SACK'd packets                       */
        u32     fackets_out;    /* FACK'd packets                       */
-       u32     tso_deferred;
 
        /* from STCP, retrans queue hinting */
        struct sk_buff* lost_skb_hint;
index 247cfdcc4b08bbf377ff5819ebd02683806b0c83..87c094961bd5867a3a4bc7e83e6db7a38c2397cf 100644 (file)
@@ -34,7 +34,7 @@ static inline struct udphdr *inner_udp_hdr(const struct sk_buff *skb)
 
 #define UDP_HTABLE_SIZE_MIN            (CONFIG_BASE_SMALL ? 128 : 256)
 
-static inline int udp_hashfn(struct net *net, unsigned num, unsigned mask)
+static inline u32 udp_hashfn(const struct net *net, u32 num, u32 mask)
 {
        return (num + net_hash_mix(net)) & mask;
 }
index 71880299ed487b68dc7b278248a4fb29ddb6b6ec..8b01e1c3c6146623759f122b72ff6b3b869a0617 100644 (file)
@@ -76,6 +76,7 @@ size_t iov_iter_copy_from_user_atomic(struct page *page,
                struct iov_iter *i, unsigned long offset, size_t bytes);
 void iov_iter_advance(struct iov_iter *i, size_t bytes);
 int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes);
+int iov_iter_fault_in_multipages_readable(struct iov_iter *i, size_t bytes);
 size_t iov_iter_single_seg_count(const struct iov_iter *i);
 size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
                         struct iov_iter *i);
@@ -110,6 +111,14 @@ static inline bool iter_is_iovec(struct iov_iter *i)
        return !(i->type & (ITER_BVEC | ITER_KVEC));
 }
 
+/*
+ * Get one of READ or WRITE out of iter->type without any other flags OR'd in
+ * with it.
+ *
+ * The ?: is just for type safety.
+ */
+#define iov_iter_rw(i) ((0 ? (struct iov_iter *)0 : (i))->type & RW_MASK)
+
 /*
  * Cap the iov_iter by given limit; note that the second argument is
  * *not* the new size - it's upper limit for such.  Passing it a value
@@ -139,4 +148,18 @@ static inline void iov_iter_reexpand(struct iov_iter *i, size_t count)
 size_t csum_and_copy_to_iter(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i);
 size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i);
 
+int import_iovec(int type, const struct iovec __user * uvector,
+                unsigned nr_segs, unsigned fast_segs,
+                struct iovec **iov, struct iov_iter *i);
+
+#ifdef CONFIG_COMPAT
+struct compat_iovec;
+int compat_import_iovec(int type, const struct compat_iovec __user * uvector,
+                unsigned nr_segs, unsigned fast_segs,
+                struct iovec **iov, struct iov_iter *i);
+#endif
+
+int import_single_range(int type, void __user *buf, size_t len,
+                struct iovec *iov, struct iov_iter *i);
+
 #endif
index 6fab66c5c5af6356318e52c92703bdb3ea37a3d4..c6b97e58cf8455c120c486192e9d38eb9834343a 100644 (file)
@@ -211,6 +211,8 @@ struct p9_dirent {
        char d_name[256];
 };
 
+struct iov_iter;
+
 int p9_client_statfs(struct p9_fid *fid, struct p9_rstatfs *sb);
 int p9_client_rename(struct p9_fid *fid, struct p9_fid *newdirfid,
                     const char *name);
@@ -236,10 +238,8 @@ int p9_client_clunk(struct p9_fid *fid);
 int p9_client_fsync(struct p9_fid *fid, int datasync);
 int p9_client_remove(struct p9_fid *fid);
 int p9_client_unlinkat(struct p9_fid *dfid, const char *name, int flags);
-int p9_client_read(struct p9_fid *fid, char *data, char __user *udata,
-                                                       u64 offset, u32 count);
-int p9_client_write(struct p9_fid *fid, char *data, const char __user *udata,
-                                                       u64 offset, u32 count);
+int p9_client_read(struct p9_fid *fid, u64 offset, struct iov_iter *to, int *err);
+int p9_client_write(struct p9_fid *fid, u64 offset, struct iov_iter *from, int *err);
 int p9_client_readdir(struct p9_fid *fid, char *data, u32 count, u64 offset);
 int p9dirent_read(struct p9_client *clnt, char *buf, int len,
                  struct p9_dirent *dirent);
index 2a25dec3021166d5aba52ad155e8ca01e0b1570e..5122b5e40f78f1aec1b30aa17d13dd68558d9f4f 100644 (file)
@@ -61,7 +61,7 @@ struct p9_trans_module {
        int (*cancel) (struct p9_client *, struct p9_req_t *req);
        int (*cancelled)(struct p9_client *, struct p9_req_t *req);
        int (*zc_request)(struct p9_client *, struct p9_req_t *,
-                         char *, char *, int , int, int, int);
+                         struct iov_iter *, struct iov_iter *, int , int, int);
 };
 
 void v9fs_register_trans(struct p9_trans_module *m);
index 0d87674fb7758736d9cdbf466fd717c5665f535e..172632dd9930d1d98a68e47076c97fba3bd2c7ae 100644 (file)
@@ -100,8 +100,8 @@ struct vsock_transport {
 
        /* DGRAM. */
        int (*dgram_bind)(struct vsock_sock *, struct sockaddr_vm *);
-       int (*dgram_dequeue)(struct kiocb *kiocb, struct vsock_sock *vsk,
-                            struct msghdr *msg, size_t len, int flags);
+       int (*dgram_dequeue)(struct vsock_sock *vsk, struct msghdr *msg,
+                            size_t len, int flags);
        int (*dgram_enqueue)(struct vsock_sock *, struct sockaddr_vm *,
                             struct msghdr *, size_t len);
        bool (*dgram_allow)(u32 cid, u32 port);
index 73c49864076b3370f015d11616f02c47a2768ff0..5e0f891d476c299d91fde14ce9cdeedc9a7e26c6 100644 (file)
@@ -9,28 +9,17 @@
 
 extern struct neigh_table arp_tbl;
 
-static inline u32 arp_hashfn(u32 key, const struct net_device *dev, u32 hash_rnd)
+static inline u32 arp_hashfn(const void *pkey, const struct net_device *dev, u32 *hash_rnd)
 {
+       u32 key = *(const u32 *)pkey;
        u32 val = key ^ hash32_ptr(dev);
 
-       return val * hash_rnd;
+       return val * hash_rnd[0];
 }
 
 static inline struct neighbour *__ipv4_neigh_lookup_noref(struct net_device *dev, u32 key)
 {
-       struct neigh_hash_table *nht = rcu_dereference_bh(arp_tbl.nht);
-       struct neighbour *n;
-       u32 hash_val;
-
-       hash_val = arp_hashfn(key, dev, nht->hash_rnd[0]) >> (32 - nht->hash_shift);
-       for (n = rcu_dereference_bh(nht->hash_buckets[hash_val]);
-            n != NULL;
-            n = rcu_dereference_bh(n->next)) {
-               if (n->dev == dev && *(u32 *)n->primary_key == key)
-                       return n;
-       }
-
-       return NULL;
+       return ___neigh_lookup_noref(&arp_tbl, neigh_key_eq32, arp_hashfn, &key, dev);
 }
 
 static inline struct neighbour *__ipv4_neigh_lookup(struct net_device *dev, u32 key)
@@ -47,7 +36,6 @@ static inline struct neighbour *__ipv4_neigh_lookup(struct net_device *dev, u32
 }
 
 void arp_init(void);
-int arp_find(unsigned char *haddr, struct sk_buff *skb);
 int arp_ioctl(struct net *net, unsigned int cmd, void __user *arg);
 void arp_send(int type, int ptype, __be32 dest_ip,
              struct net_device *dev, __be32 src_ip,
index bf0396e9a5d3f3c946b30a64c030c9f5c4f17ca0..16a923a3a43a8825d167b75025baca0b1b5d741c 100644 (file)
@@ -12,6 +12,7 @@
 #include <linux/list.h>
 #include <linux/slab.h>
 #include <linux/atomic.h>
+#include <net/neighbour.h>
 
 #define        AX25_T1CLAMPLO                  1
 #define        AX25_T1CLAMPHI                  (30 * HZ)
@@ -366,9 +367,7 @@ int ax25_kiss_rcv(struct sk_buff *, struct net_device *, struct packet_type *,
                  struct net_device *);
 
 /* ax25_ip.c */
-int ax25_hard_header(struct sk_buff *, struct net_device *, unsigned short,
-                    const void *, const void *, unsigned int);
-int ax25_rebuild_header(struct sk_buff *);
+netdev_tx_t ax25_ip_xmit(struct sk_buff *skb);
 extern const struct header_ops ax25_header_ops;
 
 /* ax25_out.c */
index e00455aab18c2cd7274615eecc4248efc22cd4a8..7dba80546f16a00892c5cf8e5e3b7ca1eb292e95 100644 (file)
@@ -245,10 +245,10 @@ int  bt_sock_register(int proto, const struct net_proto_family *ops);
 void bt_sock_unregister(int proto);
 void bt_sock_link(struct bt_sock_list *l, struct sock *s);
 void bt_sock_unlink(struct bt_sock_list *l, struct sock *s);
-int  bt_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
-                               struct msghdr *msg, size_t len, int flags);
-int  bt_sock_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
-                       struct msghdr *msg, size_t len, int flags);
+int  bt_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
+                    int flags);
+int  bt_sock_stream_recvmsg(struct socket *sock, struct msghdr *msg,
+                           size_t len, int flags);
 uint bt_sock_poll(struct file *file, struct socket *sock, poll_table *wait);
 int  bt_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
 int  bt_sock_wait_state(struct sock *sk, int state, unsigned long timeo);
@@ -269,29 +269,34 @@ struct l2cap_ctrl {
        __u16   reqseq;
        __u16   txseq;
        __u8    retries;
+       __le16  psm;
+       bdaddr_t bdaddr;
+       struct l2cap_chan *chan;
 };
 
 struct hci_dev;
 
 typedef void (*hci_req_complete_t)(struct hci_dev *hdev, u8 status, u16 opcode);
-
-struct hci_req_ctrl {
-       bool                    start;
-       u8                      event;
-       hci_req_complete_t      complete;
+typedef void (*hci_req_complete_skb_t)(struct hci_dev *hdev, u8 status,
+                                      u16 opcode, struct sk_buff *skb);
+
+struct req_ctrl {
+       bool start;
+       u8 event;
+       hci_req_complete_t complete;
+       hci_req_complete_skb_t complete_skb;
 };
 
 struct bt_skb_cb {
        __u8 pkt_type;
-       __u8 incoming;
+       __u8 force_active;
        __u16 opcode;
        __u16 expect;
-       __u8 force_active;
-       struct l2cap_chan *chan;
-       struct l2cap_ctrl control;
-       struct hci_req_ctrl req;
-       bdaddr_t bdaddr;
-       __le16 psm;
+       __u8 incoming:1;
+       union {
+               struct l2cap_ctrl l2cap;
+               struct req_ctrl req;
+       };
 };
 #define bt_cb(skb) ((struct bt_skb_cb *)((skb)->cb))
 
@@ -339,6 +344,11 @@ out:
 
 int bt_to_errno(__u16 code);
 
+void hci_sock_set_flag(struct sock *sk, int nr);
+void hci_sock_clear_flag(struct sock *sk, int nr);
+int hci_sock_test_flag(struct sock *sk, int nr);
+unsigned short hci_sock_get_channel(struct sock *sk);
+
 int hci_sock_init(void);
 void hci_sock_cleanup(void);
 
@@ -358,6 +368,9 @@ void l2cap_exit(void);
 int sco_init(void);
 void sco_exit(void);
 
+int mgmt_init(void);
+void mgmt_exit(void);
+
 void bt_sock_reclassify_lock(struct sock *sk, int proto);
 
 #endif /* __BLUETOOTH_H */
index 8e54f825153c95832246c660521d62c56d2f4117..3acecf35420b707a40ab18fe0fa3d221025aa07f 100644 (file)
@@ -160,6 +160,14 @@ enum {
         * during the hdev->setup vendor callback.
         */
        HCI_QUIRK_STRICT_DUPLICATE_FILTER,
+
+       /* When this quirk is set, LE scan and BR/EDR inquiry is done
+        * simultaneously, otherwise it's interleaved.
+        *
+        * This quirk can be set before hci_register_dev is called or
+        * during the hdev->setup vendor callback.
+        */
+       HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
 };
 
 /* HCI device flags */
@@ -179,13 +187,14 @@ enum {
        HCI_RESET,
 };
 
-/* BR/EDR and/or LE controller flags: the flags defined here should represent
- * states configured via debugfs for debugging and testing purposes only.
- */
+/* HCI socket flags */
 enum {
-       HCI_DUT_MODE,
-       HCI_FORCE_BREDR_SMP,
-       HCI_FORCE_STATIC_ADDR,
+       HCI_SOCK_TRUSTED,
+       HCI_MGMT_INDEX_EVENTS,
+       HCI_MGMT_UNCONF_INDEX_EVENTS,
+       HCI_MGMT_EXT_INDEX_EVENTS,
+       HCI_MGMT_GENERIC_EVENTS,
+       HCI_MGMT_OOB_DATA_EVENTS,
 };
 
 /*
@@ -217,6 +226,8 @@ enum {
        HCI_HS_ENABLED,
        HCI_LE_ENABLED,
        HCI_ADVERTISING,
+       HCI_ADVERTISING_CONNECTABLE,
+       HCI_ADVERTISING_INSTANCE,
        HCI_CONNECTABLE,
        HCI_DISCOVERABLE,
        HCI_LIMITED_DISCOVERABLE,
@@ -225,13 +236,13 @@ enum {
        HCI_FAST_CONNECTABLE,
        HCI_BREDR_ENABLED,
        HCI_LE_SCAN_INTERRUPTED,
-};
 
-/* A mask for the flags that are supposed to remain when a reset happens
- * or the HCI device is closed.
- */
-#define HCI_PERSISTENT_MASK (BIT(HCI_LE_SCAN) | BIT(HCI_PERIODIC_INQ) | \
-                             BIT(HCI_FAST_CONNECTABLE) | BIT(HCI_LE_ADV))
+       HCI_DUT_MODE,
+       HCI_FORCE_BREDR_SMP,
+       HCI_FORCE_STATIC_ADDR,
+
+       __HCI_NUM_FLAGS,
+};
 
 /* HCI timeouts */
 #define HCI_DISCONN_TIMEOUT    msecs_to_jiffies(2000)  /* 2 seconds */
@@ -455,6 +466,11 @@ enum {
 #define EIR_SSP_HASH_C         0x0E /* Simple Pairing Hash C */
 #define EIR_SSP_RAND_R         0x0F /* Simple Pairing Randomizer R */
 #define EIR_DEVICE_ID          0x10 /* device ID */
+#define EIR_APPEARANCE         0x19 /* Device appearance */
+#define EIR_LE_BDADDR          0x1B /* LE Bluetooth device address */
+#define EIR_LE_ROLE            0x1C /* LE role */
+#define EIR_LE_SC_CONFIRM      0x22 /* LE SC Confirmation Value */
+#define EIR_LE_SC_RANDOM       0x23 /* LE SC Random Value */
 
 /* Low Energy Advertising Flags */
 #define LE_AD_LIMITED          0x01 /* Limited Discoverable */
index 52863c3e0b132bc59224feef3f9e7acd678ef45a..93fd3e756b8afc72d2618c35f4d8202b0abac5ce 100644 (file)
@@ -76,6 +76,7 @@ struct discovery_state {
        u8                      last_adv_data[HCI_MAX_AD_LENGTH];
        u8                      last_adv_data_len;
        bool                    report_invalid_rssi;
+       bool                    result_filtering;
        s8                      rssi;
        u16                     uuid_count;
        u8                      (*uuids)[16];
@@ -108,7 +109,7 @@ struct bt_uuid {
 struct smp_csrk {
        bdaddr_t bdaddr;
        u8 bdaddr_type;
-       u8 master;
+       u8 type;
        u8 val[16];
 };
 
@@ -154,6 +155,17 @@ struct oob_data {
        u8 rand256[16];
 };
 
+struct adv_info {
+       struct delayed_work timeout_exp;
+       __u8    instance;
+       __u32   flags;
+       __u16   timeout;
+       __u16   adv_data_len;
+       __u8    adv_data[HCI_MAX_AD_LENGTH];
+       __u16   scan_rsp_len;
+       __u8    scan_rsp_data[HCI_MAX_AD_LENGTH];
+};
+
 #define HCI_MAX_SHORT_NAME_LENGTH      10
 
 /* Default LE RPA expiry time, 15 minutes */
@@ -314,7 +326,6 @@ struct hci_dev {
        struct sk_buff_head     raw_q;
        struct sk_buff_head     cmd_q;
 
-       struct sk_buff          *recv_evt;
        struct sk_buff          *sent_cmd;
        struct sk_buff          *reassembly[NUM_REASSEMBLY];
 
@@ -322,6 +333,7 @@ struct hci_dev {
        wait_queue_head_t       req_wait_q;
        __u32                   req_status;
        __u32                   req_result;
+       struct sk_buff          *req_skb;
 
        void                    *smp_data;
        void                    *smp_bredr_data;
@@ -352,8 +364,7 @@ struct hci_dev {
 
        struct rfkill           *rfkill;
 
-       unsigned long           dbg_flags;
-       unsigned long           dev_flags;
+       DECLARE_BITMAP(dev_flags, __HCI_NUM_FLAGS);
 
        struct delayed_work     le_scan_disable;
        struct delayed_work     le_scan_restart;
@@ -364,6 +375,8 @@ struct hci_dev {
        __u8                    scan_rsp_data[HCI_MAX_AD_LENGTH];
        __u8                    scan_rsp_data_len;
 
+       struct adv_info         adv_instance;
+
        __u8                    irk[16];
        __u32                   rpa_timeout;
        struct delayed_work     rpa_expired;
@@ -373,6 +386,7 @@ struct hci_dev {
        int (*close)(struct hci_dev *hdev);
        int (*flush)(struct hci_dev *hdev);
        int (*setup)(struct hci_dev *hdev);
+       int (*shutdown)(struct hci_dev *hdev);
        int (*send)(struct hci_dev *hdev, struct sk_buff *skb);
        void (*notify)(struct hci_dev *hdev, unsigned int evt);
        void (*hw_error)(struct hci_dev *hdev, u8 code);
@@ -498,19 +512,29 @@ struct hci_conn_params {
 extern struct list_head hci_dev_list;
 extern struct list_head hci_cb_list;
 extern rwlock_t hci_dev_list_lock;
-extern rwlock_t hci_cb_list_lock;
+extern struct mutex hci_cb_list_lock;
+
+#define hci_dev_set_flag(hdev, nr)             set_bit((nr), (hdev)->dev_flags)
+#define hci_dev_clear_flag(hdev, nr)           clear_bit((nr), (hdev)->dev_flags)
+#define hci_dev_change_flag(hdev, nr)          change_bit((nr), (hdev)->dev_flags)
+#define hci_dev_test_flag(hdev, nr)            test_bit((nr), (hdev)->dev_flags)
+#define hci_dev_test_and_set_flag(hdev, nr)    test_and_set_bit((nr), (hdev)->dev_flags)
+#define hci_dev_test_and_clear_flag(hdev, nr)  test_and_clear_bit((nr), (hdev)->dev_flags)
+#define hci_dev_test_and_change_flag(hdev, nr) test_and_change_bit((nr), (hdev)->dev_flags)
+
+#define hci_dev_clear_volatile_flags(hdev)                     \
+       do {                                                    \
+               hci_dev_clear_flag(hdev, HCI_LE_SCAN);          \
+               hci_dev_clear_flag(hdev, HCI_LE_ADV);           \
+               hci_dev_clear_flag(hdev, HCI_PERIODIC_INQ);     \
+       } while (0)
 
 /* ----- HCI interface to upper protocols ----- */
 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr);
-void l2cap_connect_cfm(struct hci_conn *hcon, u8 status);
 int l2cap_disconn_ind(struct hci_conn *hcon);
-void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason);
-int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt);
 int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags);
 
 int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 *flags);
-void sco_connect_cfm(struct hci_conn *hcon, __u8 status);
-void sco_disconn_cfm(struct hci_conn *hcon, __u8 reason);
 int sco_recv_scodata(struct hci_conn *hcon, struct sk_buff *skb);
 
 /* ----- Inquiry cache ----- */
@@ -529,6 +553,7 @@ static inline void discovery_init(struct hci_dev *hdev)
 
 static inline void hci_discovery_filter_clear(struct hci_dev *hdev)
 {
+       hdev->discovery.result_filtering = false;
        hdev->discovery.report_invalid_rssi = true;
        hdev->discovery.rssi = HCI_RSSI_INVALID;
        hdev->discovery.uuid_count = 0;
@@ -538,6 +563,11 @@ static inline void hci_discovery_filter_clear(struct hci_dev *hdev)
        hdev->discovery.scan_duration = 0;
 }
 
+static inline void adv_info_init(struct hci_dev *hdev)
+{
+       memset(&hdev->adv_instance, 0, sizeof(struct adv_info));
+}
+
 bool hci_discovery_active(struct hci_dev *hdev);
 
 void hci_discovery_set_state(struct hci_dev *hdev, int state);
@@ -584,7 +614,6 @@ enum {
        HCI_CONN_SC_ENABLED,
        HCI_CONN_AES_CCM,
        HCI_CONN_POWER_SAVE,
-       HCI_CONN_REMOTE_OOB,
        HCI_CONN_FLUSH_KEY,
        HCI_CONN_ENCRYPT,
        HCI_CONN_AUTH,
@@ -600,14 +629,14 @@ enum {
 static inline bool hci_conn_ssp_enabled(struct hci_conn *conn)
 {
        struct hci_dev *hdev = conn->hdev;
-       return test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) &&
+       return hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
               test_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
 }
 
 static inline bool hci_conn_sc_enabled(struct hci_conn *conn)
 {
        struct hci_dev *hdev = conn->hdev;
-       return test_bit(HCI_SC_ENABLED, &hdev->dev_flags) &&
+       return hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
               test_bit(HCI_CONN_SC_ENABLED, &conn->flags);
 }
 
@@ -969,6 +998,8 @@ struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type);
 void hci_smp_irks_clear(struct hci_dev *hdev);
 
+bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type);
+
 void hci_remote_oob_data_clear(struct hci_dev *hdev);
 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
                                          bdaddr_t *bdaddr, u8 bdaddr_type);
@@ -1025,10 +1056,10 @@ void hci_conn_del_sysfs(struct hci_conn *conn);
 #define lmp_host_le_capable(dev)   (!!((dev)->features[1][0] & LMP_HOST_LE))
 #define lmp_host_le_br_capable(dev) (!!((dev)->features[1][0] & LMP_HOST_LE_BREDR))
 
-#define hdev_is_powered(hdev) (test_bit(HCI_UP, &hdev->flags) && \
-                               !test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
-#define bredr_sc_enabled(dev) (lmp_sc_capable(dev) && \
-                              test_bit(HCI_SC_ENABLED, &(dev)->dev_flags))
+#define hdev_is_powered(dev)   (test_bit(HCI_UP, &(dev)->flags) && \
+                               !hci_dev_test_flag(dev, HCI_AUTO_OFF))
+#define bredr_sc_enabled(dev)  (lmp_sc_capable(dev) && \
+                               hci_dev_test_flag(dev, HCI_SC_ENABLED))
 
 /* ----- HCI protocols ----- */
 #define HCI_PROTO_DEFER             0x01
@@ -1050,28 +1081,6 @@ static inline int hci_proto_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr,
        }
 }
 
-static inline void hci_proto_connect_cfm(struct hci_conn *conn, __u8 status)
-{
-       switch (conn->type) {
-       case ACL_LINK:
-       case LE_LINK:
-               l2cap_connect_cfm(conn, status);
-               break;
-
-       case SCO_LINK:
-       case ESCO_LINK:
-               sco_connect_cfm(conn, status);
-               break;
-
-       default:
-               BT_ERR("unknown link type %d", conn->type);
-               break;
-       }
-
-       if (conn->connect_cfm_cb)
-               conn->connect_cfm_cb(conn, status);
-}
-
 static inline int hci_proto_disconn_ind(struct hci_conn *conn)
 {
        if (conn->type != ACL_LINK && conn->type != LE_LINK)
@@ -1080,91 +1089,69 @@ static inline int hci_proto_disconn_ind(struct hci_conn *conn)
        return l2cap_disconn_ind(conn);
 }
 
-static inline void hci_proto_disconn_cfm(struct hci_conn *conn, __u8 reason)
-{
-       switch (conn->type) {
-       case ACL_LINK:
-       case LE_LINK:
-               l2cap_disconn_cfm(conn, reason);
-               break;
-
-       case SCO_LINK:
-       case ESCO_LINK:
-               sco_disconn_cfm(conn, reason);
-               break;
-
-       /* L2CAP would be handled for BREDR chan */
-       case AMP_LINK:
-               break;
+/* ----- HCI callbacks ----- */
+struct hci_cb {
+       struct list_head list;
 
-       default:
-               BT_ERR("unknown link type %d", conn->type);
-               break;
-       }
+       char *name;
 
-       if (conn->disconn_cfm_cb)
-               conn->disconn_cfm_cb(conn, reason);
-}
+       void (*connect_cfm)     (struct hci_conn *conn, __u8 status);
+       void (*disconn_cfm)     (struct hci_conn *conn, __u8 status);
+       void (*security_cfm)    (struct hci_conn *conn, __u8 status,
+                                                               __u8 encrypt);
+       void (*key_change_cfm)  (struct hci_conn *conn, __u8 status);
+       void (*role_switch_cfm) (struct hci_conn *conn, __u8 status, __u8 role);
+};
 
-static inline void hci_proto_auth_cfm(struct hci_conn *conn, __u8 status)
+static inline void hci_connect_cfm(struct hci_conn *conn, __u8 status)
 {
-       __u8 encrypt;
-
-       if (conn->type != ACL_LINK && conn->type != LE_LINK)
-               return;
-
-       if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags))
-               return;
+       struct hci_cb *cb;
 
-       encrypt = test_bit(HCI_CONN_ENCRYPT, &conn->flags) ? 0x01 : 0x00;
-       l2cap_security_cfm(conn, status, encrypt);
+       mutex_lock(&hci_cb_list_lock);
+       list_for_each_entry(cb, &hci_cb_list, list) {
+               if (cb->connect_cfm)
+                       cb->connect_cfm(conn, status);
+       }
+       mutex_unlock(&hci_cb_list_lock);
 
-       if (conn->security_cfm_cb)
-               conn->security_cfm_cb(conn, status);
+       if (conn->connect_cfm_cb)
+               conn->connect_cfm_cb(conn, status);
 }
 
-static inline void hci_proto_encrypt_cfm(struct hci_conn *conn, __u8 status,
-                                                               __u8 encrypt)
+static inline void hci_disconn_cfm(struct hci_conn *conn, __u8 reason)
 {
-       if (conn->type != ACL_LINK && conn->type != LE_LINK)
-               return;
+       struct hci_cb *cb;
 
-       l2cap_security_cfm(conn, status, encrypt);
+       mutex_lock(&hci_cb_list_lock);
+       list_for_each_entry(cb, &hci_cb_list, list) {
+               if (cb->disconn_cfm)
+                       cb->disconn_cfm(conn, reason);
+       }
+       mutex_unlock(&hci_cb_list_lock);
 
-       if (conn->security_cfm_cb)
-               conn->security_cfm_cb(conn, status);
+       if (conn->disconn_cfm_cb)
+               conn->disconn_cfm_cb(conn, reason);
 }
 
-/* ----- HCI callbacks ----- */
-struct hci_cb {
-       struct list_head list;
-
-       char *name;
-
-       void (*security_cfm)    (struct hci_conn *conn, __u8 status,
-                                                               __u8 encrypt);
-       void (*key_change_cfm)  (struct hci_conn *conn, __u8 status);
-       void (*role_switch_cfm) (struct hci_conn *conn, __u8 status, __u8 role);
-};
-
 static inline void hci_auth_cfm(struct hci_conn *conn, __u8 status)
 {
        struct hci_cb *cb;
        __u8 encrypt;
 
-       hci_proto_auth_cfm(conn, status);
-
        if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags))
                return;
 
        encrypt = test_bit(HCI_CONN_ENCRYPT, &conn->flags) ? 0x01 : 0x00;
 
-       read_lock(&hci_cb_list_lock);
+       mutex_lock(&hci_cb_list_lock);
        list_for_each_entry(cb, &hci_cb_list, list) {
                if (cb->security_cfm)
                        cb->security_cfm(conn, status, encrypt);
        }
-       read_unlock(&hci_cb_list_lock);
+       mutex_unlock(&hci_cb_list_lock);
+
+       if (conn->security_cfm_cb)
+               conn->security_cfm_cb(conn, status);
 }
 
 static inline void hci_encrypt_cfm(struct hci_conn *conn, __u8 status,
@@ -1178,26 +1165,27 @@ static inline void hci_encrypt_cfm(struct hci_conn *conn, __u8 status,
        if (conn->pending_sec_level > conn->sec_level)
                conn->sec_level = conn->pending_sec_level;
 
-       hci_proto_encrypt_cfm(conn, status, encrypt);
-
-       read_lock(&hci_cb_list_lock);
+       mutex_lock(&hci_cb_list_lock);
        list_for_each_entry(cb, &hci_cb_list, list) {
                if (cb->security_cfm)
                        cb->security_cfm(conn, status, encrypt);
        }
-       read_unlock(&hci_cb_list_lock);
+       mutex_unlock(&hci_cb_list_lock);
+
+       if (conn->security_cfm_cb)
+               conn->security_cfm_cb(conn, status);
 }
 
 static inline void hci_key_change_cfm(struct hci_conn *conn, __u8 status)
 {
        struct hci_cb *cb;
 
-       read_lock(&hci_cb_list_lock);
+       mutex_lock(&hci_cb_list_lock);
        list_for_each_entry(cb, &hci_cb_list, list) {
                if (cb->key_change_cfm)
                        cb->key_change_cfm(conn, status);
        }
-       read_unlock(&hci_cb_list_lock);
+       mutex_unlock(&hci_cb_list_lock);
 }
 
 static inline void hci_role_switch_cfm(struct hci_conn *conn, __u8 status,
@@ -1205,12 +1193,12 @@ static inline void hci_role_switch_cfm(struct hci_conn *conn, __u8 status,
 {
        struct hci_cb *cb;
 
-       read_lock(&hci_cb_list_lock);
+       mutex_lock(&hci_cb_list_lock);
        list_for_each_entry(cb, &hci_cb_list, list) {
                if (cb->role_switch_cfm)
                        cb->role_switch_cfm(conn, status, role);
        }
-       read_unlock(&hci_cb_list_lock);
+       mutex_unlock(&hci_cb_list_lock);
 }
 
 static inline bool eir_has_data_type(u8 *data, size_t data_len, u8 type)
@@ -1296,8 +1284,6 @@ static inline int hci_check_conn_params(u16 min, u16 max, u16 latency,
 int hci_register_cb(struct hci_cb *hcb);
 int hci_unregister_cb(struct hci_cb *hcb);
 
-bool hci_req_pending(struct hci_dev *hdev);
-
 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
                               const void *param, u32 timeout);
 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
@@ -1312,11 +1298,35 @@ void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode);
 
 /* ----- HCI Sockets ----- */
 void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb);
-void hci_send_to_control(struct sk_buff *skb, struct sock *skip_sk);
+void hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
+                        int flag, struct sock *skip_sk);
 void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb);
 
 void hci_sock_dev_event(struct hci_dev *hdev, int event);
 
+#define HCI_MGMT_VAR_LEN       BIT(0)
+#define HCI_MGMT_NO_HDEV       BIT(1)
+#define HCI_MGMT_UNTRUSTED     BIT(2)
+#define HCI_MGMT_UNCONFIGURED  BIT(3)
+
+struct hci_mgmt_handler {
+       int (*func) (struct sock *sk, struct hci_dev *hdev, void *data,
+                    u16 data_len);
+       size_t data_len;
+       unsigned long flags;
+};
+
+struct hci_mgmt_chan {
+       struct list_head list;
+       unsigned short channel;
+       size_t handler_count;
+       const struct hci_mgmt_handler *handlers;
+       void (*hdev_init) (struct sock *sk, struct hci_dev *hdev);
+};
+
+int hci_mgmt_chan_register(struct hci_mgmt_chan *c);
+void hci_mgmt_chan_unregister(struct hci_mgmt_chan *c);
+
 /* Management interface */
 #define DISCOV_TYPE_BREDR              (BIT(BDADDR_BREDR))
 #define DISCOV_TYPE_LE                 (BIT(BDADDR_LE_PUBLIC) | \
@@ -1336,7 +1346,6 @@ void hci_sock_dev_event(struct hci_dev *hdev, int event);
 #define DISCOV_BREDR_INQUIRY_LEN       0x08
 #define DISCOV_LE_RESTART_DELAY                msecs_to_jiffies(200)   /* msec */
 
-int mgmt_control(struct sock *sk, struct msghdr *msg, size_t len);
 int mgmt_new_settings(struct hci_dev *hdev);
 void mgmt_index_added(struct hci_dev *hdev);
 void mgmt_index_removed(struct hci_dev *hdev);
@@ -1382,9 +1391,6 @@ void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status);
 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
                                    u8 status);
 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status);
-void mgmt_read_local_oob_data_complete(struct hci_dev *hdev, u8 *hash192,
-                                      u8 *rand192, u8 *hash256, u8 *rand256,
-                                      u8 status);
 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
                       u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
                       u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len);
index e218a30f206134776ab2eed1a19a98c0d31ef395..b831242d48a4b8ce420661c9d56bd3e047f79bd6 100644 (file)
@@ -43,6 +43,8 @@
 #define MGMT_STATUS_CANCELLED          0x10
 #define MGMT_STATUS_INVALID_INDEX      0x11
 #define MGMT_STATUS_RFKILLED           0x12
+#define MGMT_STATUS_ALREADY_PAIRED     0x13
+#define MGMT_STATUS_PERMISSION_DENIED  0x14
 
 struct mgmt_hdr {
        __le16  opcode;
@@ -98,6 +100,7 @@ struct mgmt_rp_read_index_list {
 #define MGMT_SETTING_DEBUG_KEYS                0x00001000
 #define MGMT_SETTING_PRIVACY           0x00002000
 #define MGMT_SETTING_CONFIGURATION     0x00004000
+#define MGMT_SETTING_STATIC_ADDRESS    0x00008000
 
 #define MGMT_OP_READ_INFO              0x0004
 #define MGMT_READ_INFO_SIZE            0
@@ -503,6 +506,71 @@ struct mgmt_cp_start_service_discovery {
 } __packed;
 #define MGMT_START_SERVICE_DISCOVERY_SIZE 4
 
+#define MGMT_OP_READ_LOCAL_OOB_EXT_DATA        0x003B
+struct mgmt_cp_read_local_oob_ext_data {
+       __u8 type;
+} __packed;
+#define MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE 1
+struct mgmt_rp_read_local_oob_ext_data {
+       __u8    type;
+       __le16  eir_len;
+       __u8    eir[0];
+} __packed;
+
+#define MGMT_OP_READ_EXT_INDEX_LIST    0x003C
+#define MGMT_READ_EXT_INDEX_LIST_SIZE  0
+struct mgmt_rp_read_ext_index_list {
+       __le16  num_controllers;
+       struct {
+               __le16 index;
+               __u8   type;
+               __u8   bus;
+       } entry[0];
+} __packed;
+
+#define MGMT_OP_READ_ADV_FEATURES      0x0003D
+#define MGMT_READ_ADV_FEATURES_SIZE    0
+struct mgmt_rp_read_adv_features {
+       __le32 supported_flags;
+       __u8   max_adv_data_len;
+       __u8   max_scan_rsp_len;
+       __u8   max_instances;
+       __u8   num_instances;
+       __u8   instance[0];
+} __packed;
+
+#define MGMT_OP_ADD_ADVERTISING                0x003E
+struct mgmt_cp_add_advertising {
+       __u8    instance;
+       __le32  flags;
+       __le16  duration;
+       __le16  timeout;
+       __u8    adv_data_len;
+       __u8    scan_rsp_len;
+       __u8    data[0];
+} __packed;
+#define MGMT_ADD_ADVERTISING_SIZE      11
+struct mgmt_rp_add_advertising {
+       __u8    instance;
+} __packed;
+
+#define MGMT_ADV_FLAG_CONNECTABLE      BIT(0)
+#define MGMT_ADV_FLAG_DISCOV           BIT(1)
+#define MGMT_ADV_FLAG_LIMITED_DISCOV   BIT(2)
+#define MGMT_ADV_FLAG_MANAGED_FLAGS    BIT(3)
+#define MGMT_ADV_FLAG_TX_POWER         BIT(4)
+#define MGMT_ADV_FLAG_APPEARANCE       BIT(5)
+#define MGMT_ADV_FLAG_LOCAL_NAME       BIT(6)
+
+#define MGMT_OP_REMOVE_ADVERTISING     0x003F
+struct mgmt_cp_remove_advertising {
+       __u8    instance;
+} __packed;
+#define MGMT_REMOVE_ADVERTISING_SIZE   1
+struct mgmt_rp_remove_advertising {
+       __u8    instance;
+} __packed;
+
 #define MGMT_EV_CMD_COMPLETE           0x0001
 struct mgmt_ev_cmd_complete {
        __le16  opcode;
@@ -647,9 +715,14 @@ struct mgmt_ev_new_irk {
        struct mgmt_irk_info irk;
 } __packed;
 
+#define MGMT_CSRK_LOCAL_UNAUTHENTICATED                0x00
+#define MGMT_CSRK_REMOTE_UNAUTHENTICATED       0x01
+#define MGMT_CSRK_LOCAL_AUTHENTICATED          0x02
+#define MGMT_CSRK_REMOTE_AUTHENTICATED         0x03
+
 struct mgmt_csrk_info {
        struct mgmt_addr_info addr;
-       __u8 master;
+       __u8 type;
        __u8 val[16];
 } __packed;
 
@@ -685,3 +758,29 @@ struct mgmt_ev_new_conn_param {
 #define MGMT_EV_UNCONF_INDEX_REMOVED   0x001e
 
 #define MGMT_EV_NEW_CONFIG_OPTIONS     0x001f
+
+struct mgmt_ev_ext_index {
+       __u8 type;
+       __u8 bus;
+} __packed;
+
+#define MGMT_EV_EXT_INDEX_ADDED                0x0020
+
+#define MGMT_EV_EXT_INDEX_REMOVED      0x0021
+
+#define MGMT_EV_LOCAL_OOB_DATA_UPDATED 0x0022
+struct mgmt_ev_local_oob_data_updated {
+       __u8    type;
+       __le16  eir_len;
+       __u8    eir[0];
+} __packed;
+
+#define MGMT_EV_ADVERTISING_ADDED      0x0023
+struct mgmt_ev_advertising_added {
+       __u8    instance;
+} __packed;
+
+#define MGMT_EV_ADVERTISING_REMOVED    0x0024
+struct mgmt_ev_advertising_removed {
+       __u8    instance;
+} __packed;
index f04cdbb7848e564062d714b9a6a9ba845c2440ab..c2a40a172fcdde4f97dd4d66d6102f9a561eefb4 100644 (file)
@@ -82,6 +82,13 @@ typedef enum {
        AD_TRANSMIT             /* tx Machine */
 } tx_states_t;
 
+/* churn machine states(43.4.17 in the 802.3ad standard) */
+typedef enum {
+        AD_CHURN_MONITOR, /* monitoring for churn */
+        AD_CHURN,         /* churn detected (error) */
+        AD_NO_CHURN       /* no churn (no error) */
+} churn_state_t;
+
 /* rx indication types */
 typedef enum {
        AD_TYPE_LACPDU = 1,     /* type lacpdu */
@@ -229,6 +236,12 @@ typedef struct port {
        u16 sm_mux_timer_counter;       /* state machine mux timer counter */
        tx_states_t sm_tx_state;        /* state machine tx state */
        u16 sm_tx_timer_counter;        /* state machine tx timer counter(allways on - enter to transmit state 3 time per second) */
+       u16 sm_churn_actor_timer_counter;
+       u16 sm_churn_partner_timer_counter;
+       u32 churn_actor_count;
+       u32 churn_partner_count;
+       churn_state_t sm_churn_actor_state;
+       churn_state_t sm_churn_partner_state;
        struct slave *slave;            /* pointer to the bond slave that this port belongs to */
        struct aggregator *aggregator;  /* pointer to an aggregator that this port related to */
        struct port *next_port_in_aggregator;   /* Next port on the linked list of the parent aggregator */
@@ -262,6 +275,22 @@ struct ad_slave_info {
        u16 id;
 };
 
+static inline const char *bond_3ad_churn_desc(churn_state_t state)
+{
+       static const char *const churn_description[] = {
+               "monitoring",
+               "churned",
+               "none",
+               "unknown"
+       };
+       int max_size = sizeof(churn_description) / sizeof(churn_description[0]);
+
+       if (state >= max_size)
+               state = max_size - 1;
+
+       return churn_description[state];
+}
+
 /* ========== AD Exported functions to the main bonding code ========== */
 void bond_3ad_initialize(struct bonding *bond, u16 tick_resolution);
 void bond_3ad_bind_slave(struct slave *slave);
index 64e09e1e809960be7daaef505e2e39dbab01c238..44130643656933783d77e356878651d7d10db4b5 100644 (file)
@@ -214,6 +214,39 @@ enum ieee80211_rate_flags {
        IEEE80211_RATE_SUPPORTS_10MHZ   = 1<<6,
 };
 
+/**
+ * enum ieee80211_bss_type - BSS type filter
+ *
+ * @IEEE80211_BSS_TYPE_ESS: Infrastructure BSS
+ * @IEEE80211_BSS_TYPE_PBSS: Personal BSS
+ * @IEEE80211_BSS_TYPE_IBSS: Independent BSS
+ * @IEEE80211_BSS_TYPE_MBSS: Mesh BSS
+ * @IEEE80211_BSS_TYPE_ANY: Wildcard value for matching any BSS type
+ */
+enum ieee80211_bss_type {
+       IEEE80211_BSS_TYPE_ESS,
+       IEEE80211_BSS_TYPE_PBSS,
+       IEEE80211_BSS_TYPE_IBSS,
+       IEEE80211_BSS_TYPE_MBSS,
+       IEEE80211_BSS_TYPE_ANY
+};
+
+/**
+ * enum ieee80211_privacy - BSS privacy filter
+ *
+ * @IEEE80211_PRIVACY_ON: privacy bit set
+ * @IEEE80211_PRIVACY_OFF: privacy bit clear
+ * @IEEE80211_PRIVACY_ANY: Wildcard value for matching any privacy setting
+ */
+enum ieee80211_privacy {
+       IEEE80211_PRIVACY_ON,
+       IEEE80211_PRIVACY_OFF,
+       IEEE80211_PRIVACY_ANY
+};
+
+#define IEEE80211_PRIVACY(x)   \
+       ((x) ? IEEE80211_PRIVACY_ON : IEEE80211_PRIVACY_OFF)
+
 /**
  * struct ieee80211_rate - bitrate definition
  *
@@ -2423,6 +2456,7 @@ struct cfg80211_ops {
 
        struct wireless_dev * (*add_virtual_intf)(struct wiphy *wiphy,
                                                  const char *name,
+                                                 unsigned char name_assign_type,
                                                  enum nl80211_iftype type,
                                                  u32 *flags,
                                                  struct vif_params *params);
@@ -3183,10 +3217,8 @@ struct wiphy {
        const struct ieee80211_ht_cap *ht_capa_mod_mask;
        const struct ieee80211_vht_cap *vht_capa_mod_mask;
 
-#ifdef CONFIG_NET_NS
        /* the network namespace this phy lives in currently */
-       struct net *_net;
-#endif
+       possible_net_t _net;
 
 #ifdef CONFIG_CFG80211_WEXT
        const struct iw_handler_def *wext;
@@ -4012,14 +4044,16 @@ struct cfg80211_bss *cfg80211_get_bss(struct wiphy *wiphy,
                                      struct ieee80211_channel *channel,
                                      const u8 *bssid,
                                      const u8 *ssid, size_t ssid_len,
-                                     u16 capa_mask, u16 capa_val);
+                                     enum ieee80211_bss_type bss_type,
+                                     enum ieee80211_privacy);
 static inline struct cfg80211_bss *
 cfg80211_get_ibss(struct wiphy *wiphy,
                  struct ieee80211_channel *channel,
                  const u8 *ssid, size_t ssid_len)
 {
        return cfg80211_get_bss(wiphy, channel, NULL, ssid, ssid_len,
-                               WLAN_CAPABILITY_IBSS, WLAN_CAPABILITY_IBSS);
+                               IEEE80211_BSS_TYPE_IBSS,
+                               IEEE80211_PRIVACY_ANY);
 }
 
 /**
@@ -4260,6 +4294,7 @@ struct sk_buff *__cfg80211_alloc_reply_skb(struct wiphy *wiphy,
                                           int approxlen);
 
 struct sk_buff *__cfg80211_alloc_event_skb(struct wiphy *wiphy,
+                                          struct wireless_dev *wdev,
                                           enum nl80211_commands cmd,
                                           enum nl80211_attrs attr,
                                           int vendor_event_idx,
@@ -4314,6 +4349,7 @@ int cfg80211_vendor_cmd_reply(struct sk_buff *skb);
 /**
  * cfg80211_vendor_event_alloc - allocate vendor-specific event skb
  * @wiphy: the wiphy
+ * @wdev: the wireless device
  * @event_idx: index of the vendor event in the wiphy's vendor_events
  * @approxlen: an upper bound of the length of the data that will
  *     be put into the skb
@@ -4322,16 +4358,20 @@ int cfg80211_vendor_cmd_reply(struct sk_buff *skb);
  * This function allocates and pre-fills an skb for an event on the
  * vendor-specific multicast group.
  *
+ * If wdev != NULL, both the ifindex and identifier of the specified
+ * wireless device are added to the event message before the vendor data
+ * attribute.
+ *
  * When done filling the skb, call cfg80211_vendor_event() with the
  * skb to send the event.
  *
  * Return: An allocated and pre-filled skb. %NULL if any errors happen.
  */
 static inline struct sk_buff *
-cfg80211_vendor_event_alloc(struct wiphy *wiphy, int approxlen,
-                           int event_idx, gfp_t gfp)
+cfg80211_vendor_event_alloc(struct wiphy *wiphy, struct wireless_dev *wdev,
+                            int approxlen, int event_idx, gfp_t gfp)
 {
-       return __cfg80211_alloc_event_skb(wiphy, NL80211_CMD_VENDOR,
+       return __cfg80211_alloc_event_skb(wiphy, wdev, NL80211_CMD_VENDOR,
                                          NL80211_ATTR_VENDOR_DATA,
                                          event_idx, approxlen, gfp);
 }
@@ -4432,7 +4472,7 @@ static inline int cfg80211_testmode_reply(struct sk_buff *skb)
 static inline struct sk_buff *
 cfg80211_testmode_alloc_event_skb(struct wiphy *wiphy, int approxlen, gfp_t gfp)
 {
-       return __cfg80211_alloc_event_skb(wiphy, NL80211_CMD_TESTMODE,
+       return __cfg80211_alloc_event_skb(wiphy, NULL, NL80211_CMD_TESTMODE,
                                          NL80211_ATTR_TESTDATA, -1,
                                          approxlen, gfp);
 }
@@ -4862,6 +4902,17 @@ void cfg80211_ch_switch_started_notify(struct net_device *dev,
 bool ieee80211_operating_class_to_band(u8 operating_class,
                                       enum ieee80211_band *band);
 
+/**
+ * ieee80211_chandef_to_operating_class - convert chandef to operation class
+ *
+ * @chandef: the chandef to convert
+ * @op_class: a pointer to the resulting operating class
+ *
+ * Returns %true if the conversion was successful, %false otherwise.
+ */
+bool ieee80211_chandef_to_operating_class(struct cfg80211_chan_def *chandef,
+                                         u8 *op_class);
+
 /*
  * cfg80211_tdls_oper_request - request userspace to perform TDLS operation
  * @dev: the device on which the operation is requested
index 42a9c8431177c295276068e18967c1bce12e648e..48103cf94e976e9c13809cfec895e9bb1c9fa96c 100644 (file)
@@ -40,7 +40,7 @@ int compat_sock_get_timestampns(struct sock *, struct timespec __user *);
 #define compat_mmsghdr mmsghdr
 #endif /* defined(CONFIG_COMPAT) */
 
-ssize_t get_compat_msghdr(struct msghdr *, struct compat_msghdr __user *,
+int get_compat_msghdr(struct msghdr *, struct compat_msghdr __user *,
                      struct sockaddr __user **, struct iovec **);
 asmlinkage long compat_sys_sendmsg(int, struct compat_msghdr __user *,
                                   unsigned int);
index 597b88a94332150c2da3f163daccbc012ce1b9cd..207d9ba1f92c8ae1789cff6fdc99ad15f256488b 100644 (file)
@@ -49,6 +49,9 @@ struct dcbnl_rtnl_ops {
        int (*ieee_setets) (struct net_device *, struct ieee_ets *);
        int (*ieee_getmaxrate) (struct net_device *, struct ieee_maxrate *);
        int (*ieee_setmaxrate) (struct net_device *, struct ieee_maxrate *);
+       int (*ieee_getqcn) (struct net_device *, struct ieee_qcn *);
+       int (*ieee_setqcn) (struct net_device *, struct ieee_qcn *);
+       int (*ieee_getqcnstats) (struct net_device *, struct ieee_qcn_stats *);
        int (*ieee_getpfc) (struct net_device *, struct ieee_pfc *);
        int (*ieee_setpfc) (struct net_device *, struct ieee_pfc *);
        int (*ieee_getapp) (struct net_device *, struct dcb_app *);
index fac4e3f4a6d3c0ede92af3d4df4c24319a94c0a0..0f26aa707e62a13225031797c9c2edadbff05a3a 100644 (file)
@@ -22,6 +22,7 @@ int dn_neigh_router_hello(struct sk_buff *skb);
 int dn_neigh_endnode_hello(struct sk_buff *skb);
 void dn_neigh_pointopoint_hello(struct sk_buff *skb);
 int dn_neigh_elist(struct net_device *dev, unsigned char *ptr, int n);
+int dn_to_neigh_output(struct sk_buff *skb);
 
 extern struct neigh_table dn_neigh_table;
 
index ed3c34bbb67ab89f1570acf3daf3d93aaf06a34c..fbca63ba8f733fd37fa300bbbe33a300ab05fd49 100644 (file)
@@ -72,6 +72,7 @@ struct dsa_platform_data {
         * to the root switch chip of the tree.
         */
        struct device   *netdev;
+       struct net_device *of_netdev;
 
        /*
         * Info structs describing each of the switch chips
@@ -127,6 +128,11 @@ struct dsa_switch {
        struct dsa_switch_tree  *dst;
        int                     index;
 
+       /*
+        * Tagging protocol understood by this switch
+        */
+       enum dsa_tag_protocol   tag_protocol;
+
        /*
         * Configuration data for this switch.
         */
@@ -165,6 +171,11 @@ static inline bool dsa_is_cpu_port(struct dsa_switch *ds, int p)
        return !!(ds->index == ds->dst->cpu_switch && p == ds->dst->cpu_port);
 }
 
+static inline bool dsa_is_port_initialized(struct dsa_switch *ds, int p)
+{
+       return ds->phys_port_mask & (1 << p) && ds->ports[p];
+}
+
 static inline u8 dsa_upstream_port(struct dsa_switch *ds)
 {
        struct dsa_switch_tree *dst = ds->dst;
@@ -275,6 +286,22 @@ struct dsa_switch_driver {
        int     (*get_regs_len)(struct dsa_switch *ds, int port);
        void    (*get_regs)(struct dsa_switch *ds, int port,
                            struct ethtool_regs *regs, void *p);
+
+       /*
+        * Bridge integration
+        */
+       int     (*port_join_bridge)(struct dsa_switch *ds, int port,
+                                   u32 br_port_mask);
+       int     (*port_leave_bridge)(struct dsa_switch *ds, int port,
+                                    u32 br_port_mask);
+       int     (*port_stp_update)(struct dsa_switch *ds, int port,
+                                  u8 state);
+       int     (*fdb_add)(struct dsa_switch *ds, int port,
+                          const unsigned char *addr, u16 vid);
+       int     (*fdb_del)(struct dsa_switch *ds, int port,
+                          const unsigned char *addr, u16 vid);
+       int     (*fdb_getnext)(struct dsa_switch *ds, int port,
+                              unsigned char *addr, bool *is_static);
 };
 
 void register_switch_driver(struct dsa_switch_driver *type);
index 1f99a1de0e4ff419b5a9f137298e0c0b3182f7a8..d64253914a6ab6406d92becb4c44fbdec8af17e2 100644 (file)
@@ -12,7 +12,6 @@ struct sock;
 
 struct dst_ops {
        unsigned short          family;
-       __be16                  protocol;
        unsigned int            gc_thresh;
 
        int                     (*gc)(struct dst_ops *ops);
index e584de16e4c3629ccebef06d000b47166e5cc65d..6d67383a5114bd1010908bc7c8328a41d52e0a3b 100644 (file)
@@ -58,7 +58,7 @@ struct fib_rules_ops {
                                             struct sk_buff *,
                                             struct fib_rule_hdr *,
                                             struct nlattr **);
-       void                    (*delete)(struct fib_rule *);
+       int                     (*delete)(struct fib_rule *);
        int                     (*compare)(struct fib_rule *,
                                           struct fib_rule_hdr *,
                                           struct nlattr **);
@@ -95,17 +95,10 @@ static inline void fib_rule_get(struct fib_rule *rule)
        atomic_inc(&rule->refcnt);
 }
 
-static inline void fib_rule_put_rcu(struct rcu_head *head)
-{
-       struct fib_rule *rule = container_of(head, struct fib_rule, rcu);
-       release_net(rule->fr_net);
-       kfree(rule);
-}
-
 static inline void fib_rule_put(struct fib_rule *rule)
 {
        if (atomic_dec_and_test(&rule->refcnt))
-               call_rcu(&rule->rcu, fib_rule_put_rcu);
+               kfree_rcu(rule, rcu);
 }
 
 static inline u32 frh_get_table(struct fib_rule_hdr *frh, struct nlattr **nla)
index 0574abd3db86bc0a796819a387dfc751adfcb04d..a9af1cc8c1bc6089d0facdf56001274cf7993a5a 100644 (file)
@@ -92,9 +92,7 @@ struct genl_info {
        struct genlmsghdr *     genlhdr;
        void *                  userhdr;
        struct nlattr **        attrs;
-#ifdef CONFIG_NET_NS
-       struct net *            _net;
-#endif
+       possible_net_t          _net;
        void *                  user_ptr[2];
        struct sock *           dst_sk;
 };
index 98e5f9578f862abb894190ca729e0f4613e1a8fa..1c8b6820b69476f7247baac12183a6f5f5fe2275 100644 (file)
@@ -41,18 +41,18 @@ enum {
 struct inet6_ifaddr {
        struct in6_addr         addr;
        __u32                   prefix_len;
-       
+
        /* In seconds, relative to tstamp. Expiry is at tstamp + HZ * lft. */
        __u32                   valid_lft;
        __u32                   prefered_lft;
        atomic_t                refcnt;
        spinlock_t              lock;
-       spinlock_t              state_lock;
 
        int                     state;
 
        __u32                   flags;
        __u8                    dad_probes;
+       __u8                    stable_privacy_retry;
 
        __u16                   scope;
 
index 74af137304bea4e8aec720d69f7f6ca2c64b0b94..6d539e4e5ba731acb6ee949c0cc636fce93f435a 100644 (file)
@@ -28,8 +28,7 @@ int inet6_csk_bind_conflict(const struct sock *sk,
 struct dst_entry *inet6_csk_route_req(struct sock *sk, struct flowi6 *fl6,
                                      const struct request_sock *req);
 
-struct request_sock *inet6_csk_search_req(const struct sock *sk,
-                                         struct request_sock ***prevp,
+struct request_sock *inet6_csk_search_req(struct sock *sk,
                                          const __be16 rport,
                                          const struct in6_addr *raddr,
                                          const struct in6_addr *laddr,
index 9201afe083faf4f5b12e0f59376f27521050e628..7ff588ca6817afaae545382ae39a8126d9caf946 100644 (file)
@@ -38,8 +38,6 @@ static inline unsigned int __inet6_ehashfn(const u32 lhash,
        return jhash_3words(lhash, fhash, ports, initval);
 }
 
-int __inet6_hash(struct sock *sk, struct inet_timewait_sock *twp);
-
 /*
  * Sockets in TCP_CLOSE state are _always_ taken out of the hash, so
  * we need not check it for TCP lookups anymore, thanks Alexey. -DaveM
index b2828a06a5a63355f1aa2be27e74a472cc99fc62..4a92423eefa509b27d8caf75474406fe03868b04 100644 (file)
@@ -21,12 +21,11 @@ int __inet_stream_connect(struct socket *sock, struct sockaddr *uaddr,
 int inet_dgram_connect(struct socket *sock, struct sockaddr *uaddr,
                       int addr_len, int flags);
 int inet_accept(struct socket *sock, struct socket *newsock, int flags);
-int inet_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
-                size_t size);
+int inet_sendmsg(struct socket *sock, struct msghdr *msg, size_t size);
 ssize_t inet_sendpage(struct socket *sock, struct page *page, int offset,
                      size_t size, int flags);
-int inet_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
-                size_t size, int flags);
+int inet_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
+                int flags);
 int inet_shutdown(struct socket *sock, int how);
 int inet_listen(struct socket *sock, int backlog);
 void inet_sock_destruct(struct sock *sk);
index 5976bdecf58b05b26980c76ae140d7c016ca939b..7b5887cd11723441418daa5ec306d8c8b4d7c1f1 100644 (file)
@@ -126,6 +126,8 @@ struct inet_connection_sock {
 
                /* Information on the current probe. */
                int               probe_size;
+
+               u32               probe_timestamp;
        } icsk_mtup;
        u32                       icsk_ca_priv[16];
        u32                       icsk_user_timeout;
@@ -254,8 +256,7 @@ inet_csk_rto_backoff(const struct inet_connection_sock *icsk,
 
 struct sock *inet_csk_accept(struct sock *sk, int flags, int *err);
 
-struct request_sock *inet_csk_search_req(const struct sock *sk,
-                                        struct request_sock ***prevp,
+struct request_sock *inet_csk_search_req(struct sock *sk,
                                         const __be16 rport,
                                         const __be32 raddr,
                                         const __be32 laddr);
@@ -281,15 +282,13 @@ void inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req,
 static inline void inet_csk_reqsk_queue_removed(struct sock *sk,
                                                struct request_sock *req)
 {
-       if (reqsk_queue_removed(&inet_csk(sk)->icsk_accept_queue, req) == 0)
-               inet_csk_delete_keepalive_timer(sk);
+       reqsk_queue_removed(&inet_csk(sk)->icsk_accept_queue, req);
 }
 
 static inline void inet_csk_reqsk_queue_added(struct sock *sk,
                                              const unsigned long timeout)
 {
-       if (reqsk_queue_added(&inet_csk(sk)->icsk_accept_queue) == 0)
-               inet_csk_reset_keepalive_timer(sk, timeout);
+       reqsk_queue_added(&inet_csk(sk)->icsk_accept_queue);
 }
 
 static inline int inet_csk_reqsk_queue_len(const struct sock *sk)
@@ -308,26 +307,19 @@ static inline int inet_csk_reqsk_queue_is_full(const struct sock *sk)
 }
 
 static inline void inet_csk_reqsk_queue_unlink(struct sock *sk,
-                                              struct request_sock *req,
-                                              struct request_sock **prev)
+                                              struct request_sock *req)
 {
-       reqsk_queue_unlink(&inet_csk(sk)->icsk_accept_queue, req, prev);
+       reqsk_queue_unlink(&inet_csk(sk)->icsk_accept_queue, req);
 }
 
 static inline void inet_csk_reqsk_queue_drop(struct sock *sk,
-                                            struct request_sock *req,
-                                            struct request_sock **prev)
+                                            struct request_sock *req)
 {
-       inet_csk_reqsk_queue_unlink(sk, req, prev);
+       inet_csk_reqsk_queue_unlink(sk, req);
        inet_csk_reqsk_queue_removed(sk, req);
-       reqsk_free(req);
+       reqsk_put(req);
 }
 
-void inet_csk_reqsk_queue_prune(struct sock *parent,
-                               const unsigned long interval,
-                               const unsigned long timeout,
-                               const unsigned long max_rto);
-
 void inet_csk_destroy_sock(struct sock *sk);
 void inet_csk_prepare_forced_close(struct sock *sk);
 
index dd1950a7e2730e0024f1c82fac8ab20f9b533fe5..73fe0f9525d92068aa43fa995a4d1547cc20eb34 100644 (file)
@@ -76,9 +76,7 @@ struct inet_ehash_bucket {
  * ports are created in O(1) time?  I thought so. ;-)  -DaveM
  */
 struct inet_bind_bucket {
-#ifdef CONFIG_NET_NS
-       struct net              *ib_net;
-#endif
+       possible_net_t          ib_net;
        unsigned short          port;
        signed char             fastreuse;
        signed char             fastreuseport;
@@ -223,8 +221,8 @@ inet_bind_bucket_create(struct kmem_cache *cachep, struct net *net,
 void inet_bind_bucket_destroy(struct kmem_cache *cachep,
                              struct inet_bind_bucket *tb);
 
-static inline int inet_bhashfn(struct net *net, const __u16 lport,
-                              const int bhash_size)
+static inline u32 inet_bhashfn(const struct net *net, const __u16 lport,
+                              const u32 bhash_size)
 {
        return (lport + net_hash_mix(net)) & (bhash_size - 1);
 }
@@ -233,7 +231,7 @@ void inet_bind_hash(struct sock *sk, struct inet_bind_bucket *tb,
                    const unsigned short snum);
 
 /* These can have wildcards, don't try too hard. */
-static inline int inet_lhashfn(struct net *net, const unsigned short num)
+static inline u32 inet_lhashfn(const struct net *net, const unsigned short num)
 {
        return (num + net_hash_mix(net)) & (INET_LHTABLE_SIZE - 1);
 }
@@ -251,6 +249,7 @@ void inet_put_port(struct sock *sk);
 void inet_hashinfo_init(struct inet_hashinfo *h);
 
 int __inet_hash_nolisten(struct sock *sk, struct inet_timewait_sock *tw);
+int __inet_hash(struct sock *sk, struct inet_timewait_sock *tw);
 void inet_hash(struct sock *sk);
 void inet_unhash(struct sock *sk);
 
@@ -385,13 +384,32 @@ static inline struct sock *__inet_lookup_skb(struct inet_hashinfo *hashinfo,
                                     iph->daddr, dport, inet_iif(skb));
 }
 
+u32 sk_ehashfn(const struct sock *sk);
+u32 inet6_ehashfn(const struct net *net,
+                 const struct in6_addr *laddr, const u16 lport,
+                 const struct in6_addr *faddr, const __be16 fport);
+
+static inline void sk_daddr_set(struct sock *sk, __be32 addr)
+{
+       sk->sk_daddr = addr; /* alias of inet_daddr */
+#if IS_ENABLED(CONFIG_IPV6)
+       ipv6_addr_set_v4mapped(addr, &sk->sk_v6_daddr);
+#endif
+}
+
+static inline void sk_rcv_saddr_set(struct sock *sk, __be32 addr)
+{
+       sk->sk_rcv_saddr = addr; /* alias of inet_rcv_saddr */
+#if IS_ENABLED(CONFIG_IPV6)
+       ipv6_addr_set_v4mapped(addr, &sk->sk_v6_rcv_saddr);
+#endif
+}
+
 int __inet_hash_connect(struct inet_timewait_death_row *death_row,
                        struct sock *sk, u32 port_offset,
                        int (*check_established)(struct inet_timewait_death_row *,
                                                 struct sock *, __u16,
-                                                struct inet_timewait_sock **),
-                       int (*hash)(struct sock *sk,
-                                   struct inet_timewait_sock *twp));
+                                                struct inet_timewait_sock **));
 
 int inet_hash_connect(struct inet_timewait_death_row *death_row,
                      struct sock *sk);
index eb16c7beed1e9570168d1ccbc5a7cd9cf31dd52d..b6c3737da4e94404585a97f59ad7a4e2e1f6e105 100644 (file)
@@ -27,6 +27,7 @@
 #include <net/sock.h>
 #include <net/request_sock.h>
 #include <net/netns/hash.h>
+#include <net/tcp_states.h>
 
 /** struct ip_options - IP Options
  *
@@ -77,6 +78,10 @@ struct inet_request_sock {
 #define ir_v6_rmt_addr         req.__req_common.skc_v6_daddr
 #define ir_v6_loc_addr         req.__req_common.skc_v6_rcv_saddr
 #define ir_iif                 req.__req_common.skc_bound_dev_if
+#define ir_cookie              req.__req_common.skc_cookie
+#define ireq_net               req.__req_common.skc_net
+#define ireq_state             req.__req_common.skc_state
+#define ireq_family            req.__req_common.skc_family
 
        kmemcheck_bitfield_begin(flags);
        u16                     snd_wscale : 4,
@@ -88,11 +93,11 @@ struct inet_request_sock {
                                acked      : 1,
                                no_srccheck: 1;
        kmemcheck_bitfield_end(flags);
+       u32                     ir_mark;
        union {
                struct ip_options_rcu   *opt;
                struct sk_buff          *pktopts;
        };
-       u32                     ir_mark;
 };
 
 static inline struct inet_request_sock *inet_rsk(const struct request_sock *sk)
@@ -100,13 +105,12 @@ static inline struct inet_request_sock *inet_rsk(const struct request_sock *sk)
        return (struct inet_request_sock *)sk;
 }
 
-static inline u32 inet_request_mark(struct sock *sk, struct sk_buff *skb)
+static inline u32 inet_request_mark(const struct sock *sk, struct sk_buff *skb)
 {
-       if (!sk->sk_mark && sock_net(sk)->ipv4.sysctl_tcp_fwmark_accept) {
+       if (!sk->sk_mark && sock_net(sk)->ipv4.sysctl_tcp_fwmark_accept)
                return skb->mark;
-       } else {
-               return sk->sk_mark;
-       }
+
+       return sk->sk_mark;
 }
 
 struct inet_cork {
@@ -239,18 +243,8 @@ static inline unsigned int __inet_ehashfn(const __be32 laddr,
                            initval);
 }
 
-static inline struct request_sock *inet_reqsk_alloc(struct request_sock_ops *ops)
-{
-       struct request_sock *req = reqsk_alloc(ops);
-       struct inet_request_sock *ireq = inet_rsk(req);
-
-       if (req != NULL) {
-               kmemcheck_annotate_bitfield(ireq, flags);
-               ireq->opt = NULL;
-       }
-
-       return req;
-}
+struct request_sock *inet_reqsk_alloc(const struct request_sock_ops *ops,
+                                     struct sock *sk_listener);
 
 static inline __u8 inet_sk_flowi_flags(const struct sock *sk)
 {
index 6c566034e26d9bed72bfbcce39a308b9e315d2bb..b7ce1003c429d43b0a69e45408eddd5fb35129e4 100644 (file)
@@ -122,6 +122,7 @@ struct inet_timewait_sock {
 #define tw_v6_rcv_saddr        __tw_common.skc_v6_rcv_saddr
 #define tw_dport               __tw_common.skc_dport
 #define tw_num                 __tw_common.skc_num
+#define tw_cookie              __tw_common.skc_cookie
 
        int                     tw_timeout;
        volatile unsigned char  tw_substate;
index 80479abddf73cc181899e1115a572727f578633d..d5332ddcea3f3b58fb7f38254d6c8a7e84de21eb 100644 (file)
@@ -19,6 +19,7 @@ struct inetpeer_addr_base {
        union {
                __be32                  a4;
                __be32                  a6[4];
+               struct in6_addr         in6;
        };
 };
 
@@ -151,7 +152,7 @@ static inline struct inet_peer *inet_getpeer_v6(struct inet_peer_base *base,
 {
        struct inetpeer_addr daddr;
 
-       *(struct in6_addr *)daddr.addr.a6 = *v6daddr;
+       daddr.addr.in6 = *v6daddr;
        daddr.family = AF_INET6;
        return inet_getpeer(base, &daddr, create);
 }
index 025c61c0dffbfe9ddfcc2cd6f9eb38b2af504d3f..69cd9cb8400c981b4a1aa9530e744e8db1bdd015 100644 (file)
@@ -318,9 +318,10 @@ static inline unsigned int ip_skb_dst_mtu(const struct sk_buff *skb)
 }
 
 u32 ip_idents_reserve(u32 hash, int segs);
-void __ip_select_ident(struct iphdr *iph, int segs);
+void __ip_select_ident(struct net *net, struct iphdr *iph, int segs);
 
-static inline void ip_select_ident_segs(struct sk_buff *skb, struct sock *sk, int segs)
+static inline void ip_select_ident_segs(struct net *net, struct sk_buff *skb,
+                                       struct sock *sk, int segs)
 {
        struct iphdr *iph = ip_hdr(skb);
 
@@ -337,13 +338,14 @@ static inline void ip_select_ident_segs(struct sk_buff *skb, struct sock *sk, in
                        iph->id = 0;
                }
        } else {
-               __ip_select_ident(iph, segs);
+               __ip_select_ident(net, iph, segs);
        }
 }
 
-static inline void ip_select_ident(struct sk_buff *skb, struct sock *sk)
+static inline void ip_select_ident(struct net *net, struct sk_buff *skb,
+                                  struct sock *sk)
 {
-       ip_select_ident_segs(skb, sk, 1);
+       ip_select_ident_segs(net, skb, sk, 1);
 }
 
 static inline __wsum inet_compute_pseudo(struct sk_buff *skb, int proto)
@@ -453,22 +455,6 @@ static __inline__ void inet_reset_saddr(struct sock *sk)
 
 #endif
 
-static inline int sk_mc_loop(struct sock *sk)
-{
-       if (!sk)
-               return 1;
-       switch (sk->sk_family) {
-       case AF_INET:
-               return inet_sk(sk)->mc_loop;
-#if IS_ENABLED(CONFIG_IPV6)
-       case AF_INET6:
-               return inet6_sk(sk)->mc_loop;
-#endif
-       }
-       WARN_ON(1);
-       return 1;
-}
-
 bool ip_call_ra_chain(struct sk_buff *skb);
 
 /*
index 1d09b46c1e489325b95f9987327d95ca8affed08..eda131d179d971147f5b7ac254876dbf07946861 100644 (file)
@@ -174,7 +174,8 @@ int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *));
 
 static inline int ip6_skb_dst_mtu(struct sk_buff *skb)
 {
-       struct ipv6_pinfo *np = skb->sk ? inet6_sk(skb->sk) : NULL;
+       struct ipv6_pinfo *np = skb->sk && !dev_recursion_level() ?
+                               inet6_sk(skb->sk) : NULL;
 
        return (np && np->pmtudisc >= IPV6_PMTUDISC_PROBE) ?
               skb_dst(skb)->dev->mtu : dst_mtu(skb_dst(skb));
index 76c091b53daef0c44105a99d843dba281fd08d31..1668be5937e62c9a8f150d75dc0f27f441090222 100644 (file)
@@ -71,6 +71,7 @@ __u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw);
 __u32 ip6_tnl_get_cap(struct ip6_tnl *t, const struct in6_addr *laddr,
                             const struct in6_addr *raddr);
 struct net *ip6_tnl_get_link_net(const struct net_device *dev);
+int ip6_tnl_get_iflink(const struct net_device *dev);
 
 static inline void ip6tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
 {
index 5bd120e4bc0ad586b4118d307343b52105b18324..54271ed0ed45b5b6ee5b8ffadd6aae01e0872bb8 100644 (file)
@@ -136,7 +136,7 @@ struct fib_result {
        u32             tclassid;
        struct fib_info *fi;
        struct fib_table *table;
-       struct list_head *fa_head;
+       struct hlist_head *fa_head;
 };
 
 struct fib_result_nl {
@@ -185,7 +185,9 @@ struct fib_table {
        u32                     tb_id;
        int                     tb_default;
        int                     tb_num_default;
-       unsigned long           tb_data[0];
+       struct rcu_head         rcu;
+       unsigned long           *tb_data;
+       unsigned long           __data[0];
 };
 
 int fib_table_lookup(struct fib_table *tb, const struct flowi4 *flp,
@@ -195,10 +197,10 @@ int fib_table_delete(struct fib_table *, struct fib_config *);
 int fib_table_dump(struct fib_table *table, struct sk_buff *skb,
                   struct netlink_callback *cb);
 int fib_table_flush(struct fib_table *table);
+struct fib_table *fib_trie_unmerge(struct fib_table *main_tb);
+void fib_table_flush_external(struct fib_table *table);
 void fib_free_table(struct fib_table *tb);
 
-
-
 #ifndef CONFIG_IP_MULTIPLE_TABLES
 
 #define TABLE_LOCAL_INDEX      (RT_TABLE_LOCAL & (FIB_TABLE_HASHSZ - 1))
@@ -206,12 +208,16 @@ void fib_free_table(struct fib_table *tb);
 
 static inline struct fib_table *fib_get_table(struct net *net, u32 id)
 {
+       struct hlist_node *tb_hlist;
        struct hlist_head *ptr;
 
        ptr = id == RT_TABLE_LOCAL ?
                &net->ipv4.fib_table_hash[TABLE_LOCAL_INDEX] :
                &net->ipv4.fib_table_hash[TABLE_MAIN_INDEX];
-       return hlist_entry(ptr->first, struct fib_table, tb_hlist);
+
+       tb_hlist = rcu_dereference_rtnl(hlist_first_rcu(ptr));
+
+       return hlist_entry(tb_hlist, struct fib_table, tb_hlist);
 }
 
 static inline struct fib_table *fib_new_table(struct net *net, u32 id)
@@ -222,14 +228,13 @@ static inline struct fib_table *fib_new_table(struct net *net, u32 id)
 static inline int fib_lookup(struct net *net, const struct flowi4 *flp,
                             struct fib_result *res)
 {
+       struct fib_table *tb;
        int err = -ENETUNREACH;
 
        rcu_read_lock();
 
-       if (!fib_table_lookup(fib_get_table(net, RT_TABLE_LOCAL), flp, res,
-                             FIB_LOOKUP_NOREF) ||
-           !fib_table_lookup(fib_get_table(net, RT_TABLE_MAIN), flp, res,
-                             FIB_LOOKUP_NOREF))
+       tb = fib_get_table(net, RT_TABLE_MAIN);
+       if (tb && !fib_table_lookup(tb, flp, res, FIB_LOOKUP_NOREF))
                err = 0;
 
        rcu_read_unlock();
@@ -249,28 +254,29 @@ int __fib_lookup(struct net *net, struct flowi4 *flp, struct fib_result *res);
 static inline int fib_lookup(struct net *net, struct flowi4 *flp,
                             struct fib_result *res)
 {
-       if (!net->ipv4.fib_has_custom_rules) {
-               int err = -ENETUNREACH;
-
-               rcu_read_lock();
-
-               res->tclassid = 0;
-               if ((net->ipv4.fib_local &&
-                    !fib_table_lookup(net->ipv4.fib_local, flp, res,
-                                      FIB_LOOKUP_NOREF)) ||
-                   (net->ipv4.fib_main &&
-                    !fib_table_lookup(net->ipv4.fib_main, flp, res,
-                                      FIB_LOOKUP_NOREF)) ||
-                   (net->ipv4.fib_default &&
-                    !fib_table_lookup(net->ipv4.fib_default, flp, res,
-                                      FIB_LOOKUP_NOREF)))
-                       err = 0;
-
-               rcu_read_unlock();
-
-               return err;
+       struct fib_table *tb;
+       int err;
+
+       if (net->ipv4.fib_has_custom_rules)
+               return __fib_lookup(net, flp, res);
+
+       rcu_read_lock();
+
+       res->tclassid = 0;
+
+       for (err = 0; !err; err = -ENETUNREACH) {
+               tb = rcu_dereference_rtnl(net->ipv4.fib_main);
+               if (tb && !fib_table_lookup(tb, flp, res, FIB_LOOKUP_NOREF))
+                       break;
+
+               tb = rcu_dereference_rtnl(net->ipv4.fib_default);
+               if (tb && !fib_table_lookup(tb, flp, res, FIB_LOOKUP_NOREF))
+                       break;
        }
-       return __fib_lookup(net, flp, res);
+
+       rcu_read_unlock();
+
+       return err;
 }
 
 #endif /* CONFIG_IP_MULTIPLE_TABLES */
@@ -294,6 +300,8 @@ static inline int fib_num_tclassid_users(struct net *net)
        return 0;
 }
 #endif
+int fib_unmerge(struct net *net);
+void fib_flush_external(struct net *net);
 
 /* Exported by fib_semantics.c */
 int ip_fib_check_default(__be32 gw, struct net_device *dev);
@@ -304,7 +312,7 @@ void fib_select_multipath(struct fib_result *res);
 
 /* Exported by fib_trie.c */
 void fib_trie_init(void);
-struct fib_table *fib_trie_table(u32 id);
+struct fib_table *fib_trie_table(u32 id, struct fib_table *alias);
 
 static inline void fib_combine_itag(u32 *itag, const struct fib_result *res)
 {
index 2c47061a6954543abe462ece352e93cdd0538033..d8214cb88bbcfa6524a7d1900c543a45a05f7f31 100644 (file)
@@ -142,6 +142,7 @@ int ip_tunnel_init(struct net_device *dev);
 void ip_tunnel_uninit(struct net_device *dev);
 void  ip_tunnel_dellink(struct net_device *dev, struct list_head *head);
 struct net *ip_tunnel_get_link_net(const struct net_device *dev);
+int ip_tunnel_get_iflink(const struct net_device *dev);
 int ip_tunnel_init_net(struct net *net, int ip_tnl_net_id,
                       struct rtnl_link_ops *ops, char *devname);
 
index 615b20b585452111a25085890d8fa875657dbe76..4e3731ee4eac802e24e5d7259d0e4db36566d2f6 100644 (file)
@@ -47,13 +47,13 @@ static inline struct net *skb_net(const struct sk_buff *skb)
         * Start with the most likely hit
         * End with BUG
         */
-       if (likely(skb->dev && skb->dev->nd_net))
+       if (likely(skb->dev && dev_net(skb->dev)))
                return dev_net(skb->dev);
        if (skb_dst(skb) && skb_dst(skb)->dev)
                return dev_net(skb_dst(skb)->dev);
        WARN(skb->sk, "Maybe skb_sknet should be used in %s() at line:%d\n",
                      __func__, __LINE__);
-       if (likely(skb->sk && skb->sk->sk_net))
+       if (likely(skb->sk && sock_net(skb->sk)))
                return sock_net(skb->sk);
        pr_err("There is no net ptr to find in the skb in %s() line:%d\n",
                __func__, __LINE__);
@@ -71,11 +71,11 @@ static inline struct net *skb_sknet(const struct sk_buff *skb)
 #ifdef CONFIG_NET_NS
 #ifdef CONFIG_IP_VS_DEBUG
        /* Start with the most likely hit */
-       if (likely(skb->sk && skb->sk->sk_net))
+       if (likely(skb->sk && sock_net(skb->sk)))
                return sock_net(skb->sk);
        WARN(skb->dev, "Maybe skb_net should be used instead in %s() line:%d\n",
                       __func__, __LINE__);
-       if (likely(skb->dev && skb->dev->nd_net))
+       if (likely(skb->dev && dev_net(skb->dev)))
                return dev_net(skb->dev);
        pr_err("There is no net ptr to find in the skb in %s() line:%d\n",
                __func__, __LINE__);
@@ -365,15 +365,15 @@ struct ip_vs_seq {
 
 /* counters per cpu */
 struct ip_vs_counters {
-       __u32           conns;          /* connections scheduled */
-       __u32           inpkts;         /* incoming packets */
-       __u32           outpkts;        /* outgoing packets */
+       __u64           conns;          /* connections scheduled */
+       __u64           inpkts;         /* incoming packets */
+       __u64           outpkts;        /* outgoing packets */
        __u64           inbytes;        /* incoming bytes */
        __u64           outbytes;       /* outgoing bytes */
 };
 /* Stats per cpu */
 struct ip_vs_cpu_stats {
-       struct ip_vs_counters   ustats;
+       struct ip_vs_counters   cnt;
        struct u64_stats_sync   syncp;
 };
 
@@ -383,23 +383,40 @@ struct ip_vs_estimator {
 
        u64                     last_inbytes;
        u64                     last_outbytes;
-       u32                     last_conns;
-       u32                     last_inpkts;
-       u32                     last_outpkts;
-
-       u32                     cps;
-       u32                     inpps;
-       u32                     outpps;
-       u32                     inbps;
-       u32                     outbps;
+       u64                     last_conns;
+       u64                     last_inpkts;
+       u64                     last_outpkts;
+
+       u64                     cps;
+       u64                     inpps;
+       u64                     outpps;
+       u64                     inbps;
+       u64                     outbps;
+};
+
+/*
+ * IPVS statistics object, 64-bit kernel version of struct ip_vs_stats_user
+ */
+struct ip_vs_kstats {
+       u64                     conns;          /* connections scheduled */
+       u64                     inpkts;         /* incoming packets */
+       u64                     outpkts;        /* outgoing packets */
+       u64                     inbytes;        /* incoming bytes */
+       u64                     outbytes;       /* outgoing bytes */
+
+       u64                     cps;            /* current connection rate */
+       u64                     inpps;          /* current in packet rate */
+       u64                     outpps;         /* current out packet rate */
+       u64                     inbps;          /* current in byte rate */
+       u64                     outbps;         /* current out byte rate */
 };
 
 struct ip_vs_stats {
-       struct ip_vs_stats_user ustats;         /* statistics */
+       struct ip_vs_kstats     kstats;         /* kernel statistics */
        struct ip_vs_estimator  est;            /* estimator */
        struct ip_vs_cpu_stats __percpu *cpustats;      /* per cpu counters */
        spinlock_t              lock;           /* spin lock */
-       struct ip_vs_stats_user ustats0;        /* reset values */
+       struct ip_vs_kstats     kstats0;        /* reset values */
 };
 
 struct dst_entry;
@@ -924,6 +941,7 @@ struct netns_ipvs {
        int                     sysctl_nat_icmp_send;
        int                     sysctl_pmtu_disc;
        int                     sysctl_backup_only;
+       int                     sysctl_conn_reuse_mode;
 
        /* ip_vs_lblc */
        int                     sysctl_lblc_expiration;
@@ -1042,6 +1060,11 @@ static inline int sysctl_backup_only(struct netns_ipvs *ipvs)
               ipvs->sysctl_backup_only;
 }
 
+static inline int sysctl_conn_reuse_mode(struct netns_ipvs *ipvs)
+{
+       return ipvs->sysctl_conn_reuse_mode;
+}
+
 #else
 
 static inline int sysctl_sync_threshold(struct netns_ipvs *ipvs)
@@ -1109,6 +1132,11 @@ static inline int sysctl_backup_only(struct netns_ipvs *ipvs)
        return 0;
 }
 
+static inline int sysctl_conn_reuse_mode(struct netns_ipvs *ipvs)
+{
+       return 1;
+}
+
 #endif
 
 /* IPVS core functions
@@ -1388,8 +1416,7 @@ void ip_vs_sync_conn(struct net *net, struct ip_vs_conn *cp, int pkts);
 void ip_vs_start_estimator(struct net *net, struct ip_vs_stats *stats);
 void ip_vs_stop_estimator(struct net *net, struct ip_vs_stats *stats);
 void ip_vs_zero_estimator(struct ip_vs_stats *stats);
-void ip_vs_read_estimator(struct ip_vs_stats_user *dst,
-                         struct ip_vs_stats *stats);
+void ip_vs_read_estimator(struct ip_vs_kstats *dst, struct ip_vs_stats *stats);
 
 /* Various IPVS packet transmitters (from ip_vs_xmit.c) */
 int ip_vs_null_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
index 4c9fe224d73bff7b61b27be16460af51cc73f515..65142e6af44052b2807914c96cf48cf5882072df 100644 (file)
@@ -671,8 +671,9 @@ static inline int ipv6_addr_diff(const struct in6_addr *a1, const struct in6_add
        return __ipv6_addr_diff(a1, a2, sizeof(struct in6_addr));
 }
 
-void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt);
-void ipv6_proxy_select_ident(struct sk_buff *skb);
+void ipv6_select_ident(struct net *net, struct frag_hdr *fhdr,
+                      struct rt6_info *rt);
+void ipv6_proxy_select_ident(struct net *net, struct sk_buff *skb);
 
 int ip6_dst_hoplimit(struct dst_entry *dst);
 
@@ -940,4 +941,8 @@ int ipv6_sysctl_register(void);
 void ipv6_sysctl_unregister(void);
 #endif
 
+int ipv6_sock_mc_join(struct sock *sk, int ifindex,
+                     const struct in6_addr *addr);
+int ipv6_sock_mc_drop(struct sock *sk, int ifindex,
+                     const struct in6_addr *addr);
 #endif /* _NET_IPV6_H */
index a830b01baba4b08a1d5054557c540312084b8f50..8f81bbbc38fc939070a5761e3af90da62faf8d68 100644 (file)
@@ -519,6 +519,17 @@ iwe_stream_add_event(struct iw_request_info *info, char *stream, char *ends,
        return stream;
 }
 
+static inline char *
+iwe_stream_add_event_check(struct iw_request_info *info, char *stream,
+                          char *ends, struct iw_event *iwe, int event_len)
+{
+       char *res = iwe_stream_add_event(info, stream, ends, iwe, event_len);
+
+       if (res == stream)
+               return ERR_PTR(-E2BIG);
+       return res;
+}
+
 /*------------------------------------------------------------------*/
 /*
  * Wrapper to add an short Wireless Event containing a pointer to a
@@ -545,6 +556,17 @@ iwe_stream_add_point(struct iw_request_info *info, char *stream, char *ends,
        return stream;
 }
 
+static inline char *
+iwe_stream_add_point_check(struct iw_request_info *info, char *stream,
+                          char *ends, struct iw_event *iwe, char *extra)
+{
+       char *res = iwe_stream_add_point(info, stream, ends, iwe, extra);
+
+       if (res == stream)
+               return ERR_PTR(-E2BIG);
+       return res;
+}
+
 /*------------------------------------------------------------------*/
 /*
  * Wrapper to add a value to a Wireless Event in a stream of events.
index d52914b75331a7bf2a384e10d9ec4a4ce427260a..201bc68e0cffd19766c31e983c6e652d50c681f9 100644 (file)
@@ -301,16 +301,85 @@ enum ieee80211_bss_change {
 #define IEEE80211_BSS_ARP_ADDR_LIST_LEN 4
 
 /**
- * enum ieee80211_rssi_event - RSSI threshold event
- * An indicator for when RSSI goes below/above a certain threshold.
- * @RSSI_EVENT_HIGH: AP's rssi crossed the high threshold set by the driver.
- * @RSSI_EVENT_LOW: AP's rssi crossed the low threshold set by the driver.
+ * enum ieee80211_event_type - event to be notified to the low level driver
+ * @RSSI_EVENT: AP's rssi crossed the a threshold set by the driver.
+ * @MLME_EVENT: event related to MLME
  */
-enum ieee80211_rssi_event {
+enum ieee80211_event_type {
+       RSSI_EVENT,
+       MLME_EVENT,
+};
+
+/**
+ * enum ieee80211_rssi_event_data - relevant when event type is %RSSI_EVENT
+ * @RSSI_EVENT_HIGH: AP's rssi went below the threshold set by the driver.
+ * @RSSI_EVENT_LOW: AP's rssi went above the threshold set by the driver.
+ */
+enum ieee80211_rssi_event_data {
        RSSI_EVENT_HIGH,
        RSSI_EVENT_LOW,
 };
 
+/**
+ * enum ieee80211_rssi_event - data attached to an %RSSI_EVENT
+ * @data: See &enum ieee80211_rssi_event_data
+ */
+struct ieee80211_rssi_event {
+       enum ieee80211_rssi_event_data data;
+};
+
+/**
+ * enum ieee80211_mlme_event_data - relevant when event type is %MLME_EVENT
+ * @AUTH_EVENT: the MLME operation is authentication
+ * @ASSOC_EVENT: the MLME operation is association
+ * @DEAUTH_RX_EVENT: deauth received..
+ * @DEAUTH_TX_EVENT: deauth sent.
+ */
+enum ieee80211_mlme_event_data {
+       AUTH_EVENT,
+       ASSOC_EVENT,
+       DEAUTH_RX_EVENT,
+       DEAUTH_TX_EVENT,
+};
+
+/**
+ * enum ieee80211_mlme_event_status - relevant when event type is %MLME_EVENT
+ * @MLME_SUCCESS: the MLME operation completed successfully.
+ * @MLME_DENIED: the MLME operation was denied by the peer.
+ * @MLME_TIMEOUT: the MLME operation timed out.
+ */
+enum ieee80211_mlme_event_status {
+       MLME_SUCCESS,
+       MLME_DENIED,
+       MLME_TIMEOUT,
+};
+
+/**
+ * enum ieee80211_mlme_event - data attached to an %MLME_EVENT
+ * @data: See &enum ieee80211_mlme_event_data
+ * @status: See &enum ieee80211_mlme_event_status
+ * @reason: the reason code if applicable
+ */
+struct ieee80211_mlme_event {
+       enum ieee80211_mlme_event_data data;
+       enum ieee80211_mlme_event_status status;
+       u16 reason;
+};
+
+/**
+ * struct ieee80211_event - event to be sent to the driver
+ * @type The event itself. See &enum ieee80211_event_type.
+ * @rssi: relevant if &type is %RSSI_EVENT
+ * @mlme: relevant if &type is %AUTH_EVENT
+ */
+struct ieee80211_event {
+       enum ieee80211_event_type type;
+       union {
+               struct ieee80211_rssi_event rssi;
+               struct ieee80211_mlme_event mlme;
+       } u;
+};
+
 /**
  * struct ieee80211_bss_conf - holds the BSS's changing parameters
  *
@@ -337,12 +406,15 @@ enum ieee80211_rssi_event {
  *     HW flag %IEEE80211_HW_TIMING_BEACON_ONLY is set, then this can
  *     only come from a beacon, but might not become valid until after
  *     association when a beacon is received (which is notified with the
- *     %BSS_CHANGED_DTIM flag.)
+ *     %BSS_CHANGED_DTIM flag.). See also sync_dtim_count important notice.
  * @sync_device_ts: the device timestamp corresponding to the sync_tsf,
  *     the driver/device can use this to calculate synchronisation
- *     (see @sync_tsf)
+ *     (see @sync_tsf). See also sync_dtim_count important notice.
  * @sync_dtim_count: Only valid when %IEEE80211_HW_TIMING_BEACON_ONLY
  *     is requested, see @sync_tsf/@sync_device_ts.
+ *     IMPORTANT: These three sync_* parameters would possibly be out of sync
+ *     by the time the driver will use them. The synchronized view is currently
+ *     guaranteed only in certain callbacks.
  * @beacon_int: beacon interval
  * @assoc_capability: capabilities taken from assoc resp
  * @basic_rates: bitmap of basic rates, each bit stands for an
@@ -1278,6 +1350,19 @@ static inline bool ieee80211_vif_is_mesh(struct ieee80211_vif *vif)
  */
 struct ieee80211_vif *wdev_to_ieee80211_vif(struct wireless_dev *wdev);
 
+/**
+ * ieee80211_vif_to_wdev - return a wdev struct from a vif
+ * @vif: the vif to get the wdev for
+ *
+ * This can be used by mac80211 drivers with direct cfg80211 APIs
+ * (like the vendor commands) that needs to get the wdev for a vif.
+ *
+ * Note that this function may return %NULL if the given wdev isn't
+ * associated with a vif that the driver knows about (e.g. monitor
+ * or AP_VLAN interfaces.)
+ */
+struct wireless_dev *ieee80211_vif_to_wdev(struct ieee80211_vif *vif);
+
 /**
  * enum ieee80211_key_flags - key flags
  *
@@ -1472,7 +1557,8 @@ struct ieee80211_sta_rates {
  * @supp_rates: Bitmap of supported rates (per band)
  * @ht_cap: HT capabilities of this STA; restricted to our own capabilities
  * @vht_cap: VHT capabilities of this STA; restricted to our own capabilities
- * @wme: indicates whether the STA supports QoS/WME.
+ * @wme: indicates whether the STA supports QoS/WME (if local devices does,
+ *     otherwise always false)
  * @drv_priv: data area for driver use, will always be aligned to
  *     sizeof(void *), size is determined in hw information.
  * @uapsd_queues: bitmap of queues configured for uapsd. Only valid
@@ -1488,6 +1574,7 @@ struct ieee80211_sta_rates {
  * @tdls: indicates whether the STA is a TDLS peer
  * @tdls_initiator: indicates the STA is an initiator of the TDLS link. Only
  *     valid if the STA is a TDLS peer in the first place.
+ * @mfp: indicates whether the STA uses management frame protection or not.
  */
 struct ieee80211_sta {
        u32 supp_rates[IEEE80211_NUM_BANDS];
@@ -1504,6 +1591,7 @@ struct ieee80211_sta {
        struct ieee80211_sta_rates __rcu *rates;
        bool tdls;
        bool tdls_initiator;
+       bool mfp;
 
        /* must be last */
        u8 drv_priv[0] __aligned(sizeof(void *));
@@ -2844,8 +2932,9 @@ enum ieee80211_reconfig_type {
  * @set_bitrate_mask: Set a mask of rates to be used for rate control selection
  *     when transmitting a frame. Currently only legacy rates are handled.
  *     The callback can sleep.
- * @rssi_callback: Notify driver when the average RSSI goes above/below
- *     thresholds that were registered previously. The callback can sleep.
+ * @event_callback: Notify driver about any event in mac80211. See
+ *     &enum ieee80211_event_type for the different types.
+ *     The callback can sleep.
  *
  * @release_buffered_frames: Release buffered frames according to the given
  *     parameters. In the case where the driver buffers some frames for
@@ -3141,9 +3230,9 @@ struct ieee80211_ops {
        bool (*tx_frames_pending)(struct ieee80211_hw *hw);
        int (*set_bitrate_mask)(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
                                const struct cfg80211_bitrate_mask *mask);
-       void (*rssi_callback)(struct ieee80211_hw *hw,
-                             struct ieee80211_vif *vif,
-                             enum ieee80211_rssi_event rssi_event);
+       void (*event_callback)(struct ieee80211_hw *hw,
+                              struct ieee80211_vif *vif,
+                              const struct ieee80211_event *event);
 
        void (*allow_buffered_frames)(struct ieee80211_hw *hw,
                                      struct ieee80211_sta *sta,
@@ -4343,12 +4432,32 @@ void ieee80211_sched_scan_stopped(struct ieee80211_hw *hw);
  *     haven't been re-added to the driver yet.
  * @IEEE80211_IFACE_ITER_RESUME_ALL: During resume, iterate over all
  *     interfaces, even if they haven't been re-added to the driver yet.
+ * @IEEE80211_IFACE_ITER_ACTIVE: Iterate only active interfaces (netdev is up).
  */
 enum ieee80211_interface_iteration_flags {
        IEEE80211_IFACE_ITER_NORMAL     = 0,
        IEEE80211_IFACE_ITER_RESUME_ALL = BIT(0),
+       IEEE80211_IFACE_ITER_ACTIVE     = BIT(1),
 };
 
+/**
+ * ieee80211_iterate_interfaces - iterate interfaces
+ *
+ * This function iterates over the interfaces associated with a given
+ * hardware and calls the callback for them. This includes active as well as
+ * inactive interfaces. This function allows the iterator function to sleep.
+ * Will iterate over a new interface during add_interface().
+ *
+ * @hw: the hardware struct of which the interfaces should be iterated over
+ * @iter_flags: iteration flags, see &enum ieee80211_interface_iteration_flags
+ * @iterator: the iterator function to call
+ * @data: first argument of the iterator function
+ */
+void ieee80211_iterate_interfaces(struct ieee80211_hw *hw, u32 iter_flags,
+                                 void (*iterator)(void *data, u8 *mac,
+                                                  struct ieee80211_vif *vif),
+                                 void *data);
+
 /**
  * ieee80211_iterate_active_interfaces - iterate active interfaces
  *
@@ -4364,11 +4473,16 @@ enum ieee80211_interface_iteration_flags {
  * @iterator: the iterator function to call
  * @data: first argument of the iterator function
  */
-void ieee80211_iterate_active_interfaces(struct ieee80211_hw *hw,
-                                        u32 iter_flags,
-                                        void (*iterator)(void *data, u8 *mac,
-                                               struct ieee80211_vif *vif),
-                                        void *data);
+static inline void
+ieee80211_iterate_active_interfaces(struct ieee80211_hw *hw, u32 iter_flags,
+                                   void (*iterator)(void *data, u8 *mac,
+                                                    struct ieee80211_vif *vif),
+                                   void *data)
+{
+       ieee80211_iterate_interfaces(hw,
+                                    iter_flags | IEEE80211_IFACE_ITER_ACTIVE,
+                                    iterator, data);
+}
 
 /**
  * ieee80211_iterate_active_interfaces_atomic - iterate active interfaces
index 8506478117496c971f19ec07eb00625b4b847498..fb4e8a3d6229b1205bd14644900d4640a3ac40fa 100644 (file)
@@ -19,6 +19,7 @@
 #include <net/af_ieee802154.h>
 #include <linux/ieee802154.h>
 #include <linux/skbuff.h>
+#include <linux/unaligned/memmove.h>
 
 #include <net/cfg802154.h>
 
@@ -233,9 +234,7 @@ struct ieee802154_ops {
  */
 static inline void ieee802154_be64_to_le64(void *le64_dst, const void *be64_src)
 {
-       __le64 tmp = (__force __le64)swab64p(be64_src);
-
-       memcpy(le64_dst, &tmp, IEEE802154_EXTENDED_ADDR_LEN);
+       __put_unaligned_memmove64(swab64p(be64_src), le64_dst);
 }
 
 /**
@@ -245,9 +244,7 @@ static inline void ieee802154_be64_to_le64(void *le64_dst, const void *be64_src)
  */
 static inline void ieee802154_le64_to_be64(void *be64_dst, const void *le64_src)
 {
-       __be64 tmp = (__force __be64)swab64p(le64_src);
-
-       memcpy(be64_dst, &tmp, IEEE802154_EXTENDED_ADDR_LEN);
+       __put_unaligned_memmove64(swab64p(le64_src), be64_dst);
 }
 
 /* Basic interface to register ieee802154 hwice */
index 6bbda34d5e59d030a1e2d69e93cda754d06ae5c0..b3a7751251b4cb9ce1a7fcb1d3999a63f4ff5074 100644 (file)
@@ -156,24 +156,7 @@ static inline u32 ndisc_hashfn(const void *pkey, const struct net_device *dev, _
 
 static inline struct neighbour *__ipv6_neigh_lookup_noref(struct net_device *dev, const void *pkey)
 {
-       struct neigh_hash_table *nht;
-       const u32 *p32 = pkey;
-       struct neighbour *n;
-       u32 hash_val;
-
-       nht = rcu_dereference_bh(nd_tbl.nht);
-       hash_val = ndisc_hashfn(pkey, dev, nht->hash_rnd) >> (32 - nht->hash_shift);
-       for (n = rcu_dereference_bh(nht->hash_buckets[hash_val]);
-            n != NULL;
-            n = rcu_dereference_bh(n->next)) {
-               u32 *n32 = (u32 *) n->primary_key;
-               if (n->dev == dev &&
-                   ((n32[0] ^ p32[0]) | (n32[1] ^ p32[1]) |
-                    (n32[2] ^ p32[2]) | (n32[3] ^ p32[3])) == 0)
-                       return n;
-       }
-
-       return NULL;
+       return ___neigh_lookup_noref(&nd_tbl, neigh_key_eq128, ndisc_hashfn, pkey, dev);
 }
 
 static inline struct neighbour *__ipv6_neigh_lookup(struct net_device *dev, const void *pkey)
index 76f708486aaec76031a24ee5ff1d02f126185304..bd33e66f49aad086784b2dd66cb054ea18e73c7a 100644 (file)
@@ -42,6 +42,7 @@ enum {
        NEIGH_VAR_MCAST_PROBES,
        NEIGH_VAR_UCAST_PROBES,
        NEIGH_VAR_APP_PROBES,
+       NEIGH_VAR_MCAST_REPROBES,
        NEIGH_VAR_RETRANS_TIME,
        NEIGH_VAR_BASE_REACHABLE_TIME,
        NEIGH_VAR_DELAY_PROBE_TIME,
@@ -65,9 +66,7 @@ enum {
 };
 
 struct neigh_parms {
-#ifdef CONFIG_NET_NS
-       struct net *net;
-#endif
+       possible_net_t net;
        struct net_device *dev;
        struct list_head list;
        int     (*neigh_setup)(struct neighbour *);
@@ -167,9 +166,7 @@ struct neigh_ops {
 
 struct pneigh_entry {
        struct pneigh_entry     *next;
-#ifdef CONFIG_NET_NS
-       struct net              *net;
-#endif
+       possible_net_t          net;
        struct net_device       *dev;
        u8                      flags;
        u8                      key[0];
@@ -193,9 +190,11 @@ struct neigh_table {
        int                     family;
        int                     entry_size;
        int                     key_len;
+       __be16                  protocol;
        __u32                   (*hash)(const void *pkey,
                                        const struct net_device *dev,
                                        __u32 *hash_rnd);
+       bool                    (*key_eq)(const struct neighbour *, const void *pkey);
        int                     (*constructor)(struct neighbour *);
        int                     (*pconstructor)(struct pneigh_entry *);
        void                    (*pdestructor)(struct pneigh_entry *);
@@ -224,6 +223,7 @@ enum {
        NEIGH_ND_TABLE = 1,
        NEIGH_DN_TABLE = 2,
        NEIGH_NR_TABLES,
+       NEIGH_LINK_TABLE = NEIGH_NR_TABLES /* Pseudo table for neigh_xmit */
 };
 
 static inline int neigh_parms_family(struct neigh_parms *p)
@@ -246,6 +246,57 @@ static inline void *neighbour_priv(const struct neighbour *n)
 #define NEIGH_UPDATE_F_ISROUTER                        0x40000000
 #define NEIGH_UPDATE_F_ADMIN                   0x80000000
 
+
+static inline bool neigh_key_eq16(const struct neighbour *n, const void *pkey)
+{
+       return *(const u16 *)n->primary_key == *(const u16 *)pkey;
+}
+
+static inline bool neigh_key_eq32(const struct neighbour *n, const void *pkey)
+{
+       return *(const u32 *)n->primary_key == *(const u32 *)pkey;
+}
+
+static inline bool neigh_key_eq128(const struct neighbour *n, const void *pkey)
+{
+       const u32 *n32 = (const u32 *)n->primary_key;
+       const u32 *p32 = pkey;
+
+       return ((n32[0] ^ p32[0]) | (n32[1] ^ p32[1]) |
+               (n32[2] ^ p32[2]) | (n32[3] ^ p32[3])) == 0;
+}
+
+static inline struct neighbour *___neigh_lookup_noref(
+       struct neigh_table *tbl,
+       bool (*key_eq)(const struct neighbour *n, const void *pkey),
+       __u32 (*hash)(const void *pkey,
+                     const struct net_device *dev,
+                     __u32 *hash_rnd),
+       const void *pkey,
+       struct net_device *dev)
+{
+       struct neigh_hash_table *nht = rcu_dereference_bh(tbl->nht);
+       struct neighbour *n;
+       u32 hash_val;
+
+       hash_val = hash(pkey, dev, nht->hash_rnd) >> (32 - nht->hash_shift);
+       for (n = rcu_dereference_bh(nht->hash_buckets[hash_val]);
+            n != NULL;
+            n = rcu_dereference_bh(n->next)) {
+               if (n->dev == dev && key_eq(n, pkey))
+                       return n;
+       }
+
+       return NULL;
+}
+
+static inline struct neighbour *__neigh_lookup_noref(struct neigh_table *tbl,
+                                                    const void *pkey,
+                                                    struct net_device *dev)
+{
+       return ___neigh_lookup_noref(tbl, tbl->key_eq, tbl->hash, pkey, dev);
+}
+
 void neigh_table_init(int index, struct neigh_table *tbl);
 int neigh_table_clear(int index, struct neigh_table *tbl);
 struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey,
@@ -268,7 +319,6 @@ void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev);
 int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev);
 int neigh_resolve_output(struct neighbour *neigh, struct sk_buff *skb);
 int neigh_connected_output(struct neighbour *neigh, struct sk_buff *skb);
-int neigh_compat_output(struct neighbour *neigh, struct sk_buff *skb);
 int neigh_direct_output(struct neighbour *neigh, struct sk_buff *skb);
 struct neighbour *neigh_event_ns(struct neigh_table *tbl,
                                                u8 *lladdr, void *saddr,
@@ -306,6 +356,7 @@ void neigh_for_each(struct neigh_table *tbl,
                    void (*cb)(struct neighbour *, void *), void *cookie);
 void __neigh_for_each_release(struct neigh_table *tbl,
                              int (*cb)(struct neighbour *));
+int neigh_xmit(int fam, struct net_device *, const void *, struct sk_buff *);
 void pneigh_for_each(struct neigh_table *tbl,
                     void (*cb)(struct pneigh_entry *));
 
@@ -459,4 +510,6 @@ static inline void neigh_ha_snapshot(char *dst, const struct neighbour *n,
                memcpy(dst, n->ha, dev->addr_len);
        } while (read_seqretry(&n->ha_lock, seq));
 }
+
+
 #endif
index 36faf4990c4b6f2604fd2b00178c941d75f540a8..f733656404de0ea5cba33c83a641e37dc4e4d5e5 100644 (file)
@@ -26,6 +26,7 @@
 #endif
 #include <net/netns/nftables.h>
 #include <net/netns/xfrm.h>
+#include <net/netns/mpls.h>
 #include <linux/ns_common.h>
 
 struct user_namespace;
@@ -48,13 +49,10 @@ struct net {
        atomic_t                count;          /* To decided when the network
                                                 *  namespace should be shut down.
                                                 */
-#ifdef NETNS_REFCNT_DEBUG
-       atomic_t                use_count;      /* To track references we
-                                                * destroy on demand
-                                                */
-#endif
        spinlock_t              rules_mod_lock;
 
+       atomic64_t              cookie_gen;
+
        struct list_head        list;           /* list of network namespaces */
        struct list_head        cleanup_list;   /* namespaces on death row */
        struct list_head        exit_list;      /* Use only net_mutex */
@@ -129,6 +127,9 @@ struct net {
 #endif
 #if IS_ENABLED(CONFIG_IP_VS)
        struct netns_ipvs       *ipvs;
+#endif
+#if IS_ENABLED(CONFIG_MPLS)
+       struct netns_mpls       mpls;
 #endif
        struct sock             *diag_nlsk;
        atomic_t                fnhe_genid;
@@ -230,48 +231,27 @@ int net_eq(const struct net *net1, const struct net *net2)
 #endif
 
 
-#ifdef NETNS_REFCNT_DEBUG
-static inline struct net *hold_net(struct net *net)
-{
-       if (net)
-               atomic_inc(&net->use_count);
-       return net;
-}
-
-static inline void release_net(struct net *net)
-{
-       if (net)
-               atomic_dec(&net->use_count);
-}
-#else
-static inline struct net *hold_net(struct net *net)
-{
-       return net;
-}
-
-static inline void release_net(struct net *net)
-{
-}
-#endif
-
+typedef struct {
 #ifdef CONFIG_NET_NS
+       struct net *net;
+#endif
+} possible_net_t;
 
-static inline void write_pnet(struct net **pnet, struct net *net)
+static inline void write_pnet(possible_net_t *pnet, struct net *net)
 {
-       *pnet = net;
+#ifdef CONFIG_NET_NS
+       pnet->net = net;
+#endif
 }
 
-static inline struct net *read_pnet(struct net * const *pnet)
+static inline struct net *read_pnet(const possible_net_t *pnet)
 {
-       return *pnet;
-}
-
+#ifdef CONFIG_NET_NS
+       return pnet->net;
 #else
-
-#define write_pnet(pnet, net)  do { (void)(net);} while (0)
-#define read_pnet(pnet)                (&init_net)
-
+       return &init_net;
 #endif
+}
 
 #define for_each_net(VAR)                              \
        list_for_each_entry(VAR, &net_namespace_list, list)
index 03e928a552290f57b050c4ce7dbe6592d02102e4..77862c3645f07000070f94e4838ca8b8cd792110 100644 (file)
@@ -5,18 +5,14 @@
 #include <net/ip.h>
 #include <net/icmp.h>
 
-static inline void nf_send_unreach(struct sk_buff *skb_in, int code)
-{
-       icmp_send(skb_in, ICMP_DEST_UNREACH, code, 0);
-}
-
+void nf_send_unreach(struct sk_buff *skb_in, int code, int hook);
 void nf_send_reset(struct sk_buff *oldskb, int hook);
 
 const struct tcphdr *nf_reject_ip_tcphdr_get(struct sk_buff *oldskb,
                                             struct tcphdr *_oth, int hook);
 struct iphdr *nf_reject_iphdr_put(struct sk_buff *nskb,
                                  const struct sk_buff *oldskb,
-                                 __be16 protocol, int ttl);
+                                 __u8 protocol, int ttl);
 void nf_reject_ip_tcphdr_put(struct sk_buff *nskb, const struct sk_buff *oldskb,
                             const struct tcphdr *oth);
 
index 23216d48abf9ddb79aa188cbf55e0a032e4f9e78..0ea4fa37db16d87cf5ed9b72feb8c50d24cb05f5 100644 (file)
@@ -3,15 +3,8 @@
 
 #include <linux/icmpv6.h>
 
-static inline void
-nf_send_unreach6(struct net *net, struct sk_buff *skb_in, unsigned char code,
-            unsigned int hooknum)
-{
-       if (hooknum == NF_INET_LOCAL_OUT && skb_in->dev == NULL)
-               skb_in->dev = net->loopback_dev;
-
-       icmpv6_send(skb_in, ICMPV6_DEST_UNREACH, code, 0);
-}
+void nf_send_unreach6(struct net *net, struct sk_buff *skb_in, unsigned char code,
+                     unsigned int hooknum);
 
 void nf_send_reset6(struct net *net, struct sk_buff *oldskb, int hook);
 
@@ -20,7 +13,7 @@ const struct tcphdr *nf_reject_ip6_tcphdr_get(struct sk_buff *oldskb,
                                              unsigned int *otcplen, int hook);
 struct ipv6hdr *nf_reject_ip6hdr_put(struct sk_buff *nskb,
                                     const struct sk_buff *oldskb,
-                                    __be16 protocol, int hoplimit);
+                                    __u8 protocol, int hoplimit);
 void nf_reject_ip6_tcphdr_put(struct sk_buff *nskb,
                              const struct sk_buff *oldskb,
                              const struct tcphdr *oth, unsigned int otcplen);
index 74f271a172dd181ad0dd521395e5480f29250cff..095433b8a8b03dec4b99057dd7165c9dc6d7846f 100644 (file)
@@ -95,9 +95,8 @@ struct nf_conn {
        /* Timer function; drops refcnt when it goes off. */
        struct timer_list timeout;
 
-#ifdef CONFIG_NET_NS
-       struct net *ct_net;
-#endif
+       possible_net_t ct_net;
+
        /* all members below initialized via memset */
        u8 __nfct_init_offset[0];
 
index 340c013795a49c9fcd91469ccb04a2be88f332fe..a3127325f624b9afd8cf160c63358e557f5cdf40 100644 (file)
@@ -44,40 +44,32 @@ int nf_nat_icmp_reply_translation(struct sk_buff *skb, struct nf_conn *ct,
                                  unsigned int hooknum);
 
 unsigned int nf_nat_ipv4_in(const struct nf_hook_ops *ops, struct sk_buff *skb,
-                           const struct net_device *in,
-                           const struct net_device *out,
+                           const struct nf_hook_state *state,
                            unsigned int (*do_chain)(const struct nf_hook_ops *ops,
                                                     struct sk_buff *skb,
-                                                    const struct net_device *in,
-                                                    const struct net_device *out,
+                                                    const struct nf_hook_state *state,
                                                     struct nf_conn *ct));
 
 unsigned int nf_nat_ipv4_out(const struct nf_hook_ops *ops, struct sk_buff *skb,
-                            const struct net_device *in,
-                            const struct net_device *out,
+                            const struct nf_hook_state *state,
                             unsigned int (*do_chain)(const struct nf_hook_ops *ops,
                                                      struct sk_buff *skb,
-                                                     const struct net_device *in,
-                                                     const struct net_device *out,
+                                                     const struct nf_hook_state *state,
                                                      struct nf_conn *ct));
 
 unsigned int nf_nat_ipv4_local_fn(const struct nf_hook_ops *ops,
                                  struct sk_buff *skb,
-                                 const struct net_device *in,
-                                 const struct net_device *out,
+                                 const struct nf_hook_state *state,
                                  unsigned int (*do_chain)(const struct nf_hook_ops *ops,
                                                           struct sk_buff *skb,
-                                                          const struct net_device *in,
-                                                          const struct net_device *out,
+                                                          const struct nf_hook_state *state,
                                                           struct nf_conn *ct));
 
 unsigned int nf_nat_ipv4_fn(const struct nf_hook_ops *ops, struct sk_buff *skb,
-                           const struct net_device *in,
-                           const struct net_device *out,
+                           const struct nf_hook_state *state,
                            unsigned int (*do_chain)(const struct nf_hook_ops *ops,
                                                     struct sk_buff *skb,
-                                                    const struct net_device *in,
-                                                    const struct net_device *out,
+                                                    const struct nf_hook_state *state,
                                                     struct nf_conn *ct));
 
 int nf_nat_icmpv6_reply_translation(struct sk_buff *skb, struct nf_conn *ct,
@@ -85,40 +77,32 @@ int nf_nat_icmpv6_reply_translation(struct sk_buff *skb, struct nf_conn *ct,
                                    unsigned int hooknum, unsigned int hdrlen);
 
 unsigned int nf_nat_ipv6_in(const struct nf_hook_ops *ops, struct sk_buff *skb,
-                           const struct net_device *in,
-                           const struct net_device *out,
+                           const struct nf_hook_state *state,
                            unsigned int (*do_chain)(const struct nf_hook_ops *ops,
                                                     struct sk_buff *skb,
-                                                    const struct net_device *in,
-                                                    const struct net_device *out,
+                                                    const struct nf_hook_state *state,
                                                     struct nf_conn *ct));
 
 unsigned int nf_nat_ipv6_out(const struct nf_hook_ops *ops, struct sk_buff *skb,
-                            const struct net_device *in,
-                            const struct net_device *out,
+                            const struct nf_hook_state *state,
                             unsigned int (*do_chain)(const struct nf_hook_ops *ops,
                                                      struct sk_buff *skb,
-                                                     const struct net_device *in,
-                                                     const struct net_device *out,
+                                                     const struct nf_hook_state *state,
                                                      struct nf_conn *ct));
 
 unsigned int nf_nat_ipv6_local_fn(const struct nf_hook_ops *ops,
                                  struct sk_buff *skb,
-                                 const struct net_device *in,
-                                 const struct net_device *out,
+                                 const struct nf_hook_state *state,
                                  unsigned int (*do_chain)(const struct nf_hook_ops *ops,
                                                           struct sk_buff *skb,
-                                                          const struct net_device *in,
-                                                          const struct net_device *out,
+                                                          const struct nf_hook_state *state,
                                                           struct nf_conn *ct));
 
 unsigned int nf_nat_ipv6_fn(const struct nf_hook_ops *ops, struct sk_buff *skb,
-                           const struct net_device *in,
-                           const struct net_device *out,
+                           const struct nf_hook_state *state,
                            unsigned int (*do_chain)(const struct nf_hook_ops *ops,
                                                     struct sk_buff *skb,
-                                                    const struct net_device *in,
-                                                    const struct net_device *out,
+                                                    const struct nf_hook_state *state,
                                                     struct nf_conn *ct));
 
 #endif /* _NF_NAT_L3PROTO_H */
index 84a53d7803069dd15bb5bb8575dffa1a900d8ea5..d81d584157e11f5d80013de967ee341fa512baa0 100644 (file)
@@ -12,12 +12,8 @@ struct nf_queue_entry {
        unsigned int            id;
 
        struct nf_hook_ops      *elem;
-       u_int8_t                pf;
+       struct nf_hook_state    state;
        u16                     size; /* sizeof(entry) + saved route keys */
-       unsigned int            hook;
-       struct net_device       *indev;
-       struct net_device       *outdev;
-       int                     (*okfn)(struct sk_buff *);
 
        /* extra space to store route keys */
 };
index decb9a095ae7c4df5ace79f366762138c223958c..8049819803939043db30ea182b715341f0cd9587 100644 (file)
@@ -26,12 +26,11 @@ struct nft_pktinfo {
 static inline void nft_set_pktinfo(struct nft_pktinfo *pkt,
                                   const struct nf_hook_ops *ops,
                                   struct sk_buff *skb,
-                                  const struct net_device *in,
-                                  const struct net_device *out)
+                                  const struct nf_hook_state *state)
 {
        pkt->skb = skb;
-       pkt->in = pkt->xt.in = in;
-       pkt->out = pkt->xt.out = out;
+       pkt->in = pkt->xt.in = state->in;
+       pkt->out = pkt->xt.out = state->out;
        pkt->ops = ops;
        pkt->xt.hooknum = ops->hooknum;
        pkt->xt.family = ops->pf;
@@ -138,19 +137,12 @@ struct nft_userdata {
 /**
  *     struct nft_set_elem - generic representation of set elements
  *
- *     @cookie: implementation specific element cookie
  *     @key: element key
- *     @data: element data (maps only)
- *     @flags: element flags (end of interval)
- *
- *     The cookie can be used to store a handle to the element for subsequent
- *     removal.
+ *     @priv: element private data and extensions
  */
 struct nft_set_elem {
-       void                    *cookie;
        struct nft_data         key;
-       struct nft_data         data;
-       u32                     flags;
+       void                    *priv;
 };
 
 struct nft_set;
@@ -202,11 +194,15 @@ struct nft_set_estimate {
        enum nft_set_class      class;
 };
 
+struct nft_set_ext;
+
 /**
  *     struct nft_set_ops - nf_tables set operations
  *
  *     @lookup: look up an element within the set
  *     @insert: insert new element into set
+ *     @activate: activate new element in the next generation
+ *     @deactivate: deactivate element in the next generation
  *     @remove: remove element from set
  *     @walk: iterate over all set elemeennts
  *     @privsize: function to return size of set private data
@@ -214,16 +210,19 @@ struct nft_set_estimate {
  *     @destroy: destroy private data of set instance
  *     @list: nf_tables_set_ops list node
  *     @owner: module reference
+ *     @elemsize: element private size
  *     @features: features supported by the implementation
  */
 struct nft_set_ops {
        bool                            (*lookup)(const struct nft_set *set,
                                                  const struct nft_data *key,
-                                                 struct nft_data *data);
-       int                             (*get)(const struct nft_set *set,
-                                              struct nft_set_elem *elem);
+                                                 const struct nft_set_ext **ext);
        int                             (*insert)(const struct nft_set *set,
                                                  const struct nft_set_elem *elem);
+       void                            (*activate)(const struct nft_set *set,
+                                                   const struct nft_set_elem *elem);
+       void *                          (*deactivate)(const struct nft_set *set,
+                                                     const struct nft_set_elem *elem);
        void                            (*remove)(const struct nft_set *set,
                                                  const struct nft_set_elem *elem);
        void                            (*walk)(const struct nft_ctx *ctx,
@@ -241,6 +240,7 @@ struct nft_set_ops {
 
        struct list_head                list;
        struct module                   *owner;
+       unsigned int                    elemsize;
        u32                             features;
 };
 
@@ -259,6 +259,7 @@ void nft_unregister_set(struct nft_set_ops *ops);
  *     @nelems: number of elements
  *     @policy: set parameterization (see enum nft_set_policies)
  *     @ops: set ops
+ *     @pnet: network namespace
  *     @flags: set flags
  *     @klen: key length
  *     @dlen: data length
@@ -275,6 +276,7 @@ struct nft_set {
        u16                             policy;
        /* runtime data below here */
        const struct nft_set_ops        *ops ____cacheline_aligned;
+       possible_net_t                  pnet;
        u16                             flags;
        u8                              klen;
        u8                              dlen;
@@ -311,6 +313,121 @@ int nf_tables_bind_set(const struct nft_ctx *ctx, struct nft_set *set,
 void nf_tables_unbind_set(const struct nft_ctx *ctx, struct nft_set *set,
                          struct nft_set_binding *binding);
 
+/**
+ *     enum nft_set_extensions - set extension type IDs
+ *
+ *     @NFT_SET_EXT_KEY: element key
+ *     @NFT_SET_EXT_DATA: mapping data
+ *     @NFT_SET_EXT_FLAGS: element flags
+ *     @NFT_SET_EXT_NUM: number of extension types
+ */
+enum nft_set_extensions {
+       NFT_SET_EXT_KEY,
+       NFT_SET_EXT_DATA,
+       NFT_SET_EXT_FLAGS,
+       NFT_SET_EXT_NUM
+};
+
+/**
+ *     struct nft_set_ext_type - set extension type
+ *
+ *     @len: fixed part length of the extension
+ *     @align: alignment requirements of the extension
+ */
+struct nft_set_ext_type {
+       u8      len;
+       u8      align;
+};
+
+extern const struct nft_set_ext_type nft_set_ext_types[];
+
+/**
+ *     struct nft_set_ext_tmpl - set extension template
+ *
+ *     @len: length of extension area
+ *     @offset: offsets of individual extension types
+ */
+struct nft_set_ext_tmpl {
+       u16     len;
+       u8      offset[NFT_SET_EXT_NUM];
+};
+
+/**
+ *     struct nft_set_ext - set extensions
+ *
+ *     @genmask: generation mask
+ *     @offset: offsets of individual extension types
+ *     @data: beginning of extension data
+ */
+struct nft_set_ext {
+       u8      genmask;
+       u8      offset[NFT_SET_EXT_NUM];
+       char    data[0];
+};
+
+static inline void nft_set_ext_prepare(struct nft_set_ext_tmpl *tmpl)
+{
+       memset(tmpl, 0, sizeof(*tmpl));
+       tmpl->len = sizeof(struct nft_set_ext);
+}
+
+static inline void nft_set_ext_add_length(struct nft_set_ext_tmpl *tmpl, u8 id,
+                                         unsigned int len)
+{
+       tmpl->len        = ALIGN(tmpl->len, nft_set_ext_types[id].align);
+       BUG_ON(tmpl->len > U8_MAX);
+       tmpl->offset[id] = tmpl->len;
+       tmpl->len       += nft_set_ext_types[id].len + len;
+}
+
+static inline void nft_set_ext_add(struct nft_set_ext_tmpl *tmpl, u8 id)
+{
+       nft_set_ext_add_length(tmpl, id, 0);
+}
+
+static inline void nft_set_ext_init(struct nft_set_ext *ext,
+                                   const struct nft_set_ext_tmpl *tmpl)
+{
+       memcpy(ext->offset, tmpl->offset, sizeof(ext->offset));
+}
+
+static inline bool __nft_set_ext_exists(const struct nft_set_ext *ext, u8 id)
+{
+       return !!ext->offset[id];
+}
+
+static inline bool nft_set_ext_exists(const struct nft_set_ext *ext, u8 id)
+{
+       return ext && __nft_set_ext_exists(ext, id);
+}
+
+static inline void *nft_set_ext(const struct nft_set_ext *ext, u8 id)
+{
+       return (void *)ext + ext->offset[id];
+}
+
+static inline struct nft_data *nft_set_ext_key(const struct nft_set_ext *ext)
+{
+       return nft_set_ext(ext, NFT_SET_EXT_KEY);
+}
+
+static inline struct nft_data *nft_set_ext_data(const struct nft_set_ext *ext)
+{
+       return nft_set_ext(ext, NFT_SET_EXT_DATA);
+}
+
+static inline u8 *nft_set_ext_flags(const struct nft_set_ext *ext)
+{
+       return nft_set_ext(ext, NFT_SET_EXT_FLAGS);
+}
+
+static inline struct nft_set_ext *nft_set_elem_ext(const struct nft_set *set,
+                                                  void *elem)
+{
+       return elem + set->ops->elemsize;
+}
+
+void nft_set_elem_destroy(const struct nft_set *set, void *elem);
 
 /**
  *     struct nft_expr_type - nf_tables expression type
@@ -409,74 +526,6 @@ struct nft_rule {
                __attribute__((aligned(__alignof__(struct nft_expr))));
 };
 
-/**
- *     struct nft_trans - nf_tables object update in transaction
- *
- *     @list: used internally
- *     @msg_type: message type
- *     @ctx: transaction context
- *     @data: internal information related to the transaction
- */
-struct nft_trans {
-       struct list_head                list;
-       int                             msg_type;
-       struct nft_ctx                  ctx;
-       char                            data[0];
-};
-
-struct nft_trans_rule {
-       struct nft_rule                 *rule;
-};
-
-#define nft_trans_rule(trans)  \
-       (((struct nft_trans_rule *)trans->data)->rule)
-
-struct nft_trans_set {
-       struct nft_set  *set;
-       u32             set_id;
-};
-
-#define nft_trans_set(trans)   \
-       (((struct nft_trans_set *)trans->data)->set)
-#define nft_trans_set_id(trans)        \
-       (((struct nft_trans_set *)trans->data)->set_id)
-
-struct nft_trans_chain {
-       bool            update;
-       char            name[NFT_CHAIN_MAXNAMELEN];
-       struct nft_stats __percpu *stats;
-       u8              policy;
-};
-
-#define nft_trans_chain_update(trans)  \
-       (((struct nft_trans_chain *)trans->data)->update)
-#define nft_trans_chain_name(trans)    \
-       (((struct nft_trans_chain *)trans->data)->name)
-#define nft_trans_chain_stats(trans)   \
-       (((struct nft_trans_chain *)trans->data)->stats)
-#define nft_trans_chain_policy(trans)  \
-       (((struct nft_trans_chain *)trans->data)->policy)
-
-struct nft_trans_table {
-       bool            update;
-       bool            enable;
-};
-
-#define nft_trans_table_update(trans)  \
-       (((struct nft_trans_table *)trans->data)->update)
-#define nft_trans_table_enable(trans)  \
-       (((struct nft_trans_table *)trans->data)->enable)
-
-struct nft_trans_elem {
-       struct nft_set          *set;
-       struct nft_set_elem     elem;
-};
-
-#define nft_trans_elem_set(trans)      \
-       (((struct nft_trans_elem *)trans->data)->set)
-#define nft_trans_elem(trans)  \
-       (((struct nft_trans_elem *)trans->data)->elem)
-
 static inline struct nft_expr *nft_expr_first(const struct nft_rule *rule)
 {
        return (struct nft_expr *)&rule->data[0];
@@ -517,7 +566,6 @@ enum nft_chain_flags {
  *
  *     @rules: list of rules in the chain
  *     @list: used internally
- *     @net: net namespace that this chain belongs to
  *     @table: table that this chain belongs to
  *     @handle: chain handle
  *     @use: number of jump references to this chain
@@ -528,7 +576,6 @@ enum nft_chain_flags {
 struct nft_chain {
        struct list_head                rules;
        struct list_head                list;
-       struct net                      *net;
        struct nft_table                *table;
        u64                             handle;
        u32                             use;
@@ -544,6 +591,25 @@ enum nft_chain_type {
        NFT_CHAIN_T_MAX
 };
 
+/**
+ *     struct nf_chain_type - nf_tables chain type info
+ *
+ *     @name: name of the type
+ *     @type: numeric identifier
+ *     @family: address family
+ *     @owner: module owner
+ *     @hook_mask: mask of valid hooks
+ *     @hooks: hookfn overrides
+ */
+struct nf_chain_type {
+       const char                      *name;
+       enum nft_chain_type             type;
+       int                             family;
+       struct module                   *owner;
+       unsigned int                    hook_mask;
+       nf_hookfn                       *hooks[NF_MAX_HOOKS];
+};
+
 int nft_chain_validate_dependency(const struct nft_chain *chain,
                                  enum nft_chain_type type);
 int nft_chain_validate_hooks(const struct nft_chain *chain,
@@ -561,6 +627,7 @@ struct nft_stats {
  *     struct nft_base_chain - nf_tables base chain
  *
  *     @ops: netfilter hook ops
+ *     @pnet: net namespace that this chain belongs to
  *     @type: chain type
  *     @policy: default policy
  *     @stats: per-cpu chain stats
@@ -568,6 +635,7 @@ struct nft_stats {
  */
 struct nft_base_chain {
        struct nf_hook_ops              ops[NFT_HOOK_OPS_MAX];
+       possible_net_t                  pnet;
        const struct nf_chain_type      *type;
        u8                              policy;
        struct nft_stats __percpu       *stats;
@@ -600,7 +668,7 @@ struct nft_table {
        u64                             hgenerator;
        u32                             use;
        u16                             flags;
-       char                            name[];
+       char                            name[NFT_TABLE_MAXNAMELEN];
 };
 
 /**
@@ -630,25 +698,6 @@ struct nft_af_info {
 int nft_register_afinfo(struct net *, struct nft_af_info *);
 void nft_unregister_afinfo(struct nft_af_info *);
 
-/**
- *     struct nf_chain_type - nf_tables chain type info
- *
- *     @name: name of the type
- *     @type: numeric identifier
- *     @family: address family
- *     @owner: module owner
- *     @hook_mask: mask of valid hooks
- *     @hooks: hookfn overrides
- */
-struct nf_chain_type {
-       const char                      *name;
-       enum nft_chain_type             type;
-       int                             family;
-       struct module                   *owner;
-       unsigned int                    hook_mask;
-       nf_hookfn                       *hooks[NF_MAX_HOOKS];
-};
-
 int nft_register_chain_type(const struct nf_chain_type *);
 void nft_unregister_chain_type(const struct nf_chain_type *);
 
@@ -673,4 +722,116 @@ void nft_unregister_expr(struct nft_expr_type *);
 #define MODULE_ALIAS_NFT_SET() \
        MODULE_ALIAS("nft-set")
 
+/*
+ * The gencursor defines two generations, the currently active and the
+ * next one. Objects contain a bitmask of 2 bits specifying the generations
+ * they're active in. A set bit means they're inactive in the generation
+ * represented by that bit.
+ *
+ * New objects start out as inactive in the current and active in the
+ * next generation. When committing the ruleset the bitmask is cleared,
+ * meaning they're active in all generations. When removing an object,
+ * it is set inactive in the next generation. After committing the ruleset,
+ * the objects are removed.
+ */
+static inline unsigned int nft_gencursor_next(const struct net *net)
+{
+       return net->nft.gencursor + 1 == 1 ? 1 : 0;
+}
+
+static inline u8 nft_genmask_next(const struct net *net)
+{
+       return 1 << nft_gencursor_next(net);
+}
+
+static inline u8 nft_genmask_cur(const struct net *net)
+{
+       /* Use ACCESS_ONCE() to prevent refetching the value for atomicity */
+       return 1 << ACCESS_ONCE(net->nft.gencursor);
+}
+
+/*
+ * Set element transaction helpers
+ */
+
+static inline bool nft_set_elem_active(const struct nft_set_ext *ext,
+                                      u8 genmask)
+{
+       return !(ext->genmask & genmask);
+}
+
+static inline void nft_set_elem_change_active(const struct nft_set *set,
+                                             struct nft_set_ext *ext)
+{
+       ext->genmask ^= nft_genmask_next(read_pnet(&set->pnet));
+}
+
+/**
+ *     struct nft_trans - nf_tables object update in transaction
+ *
+ *     @list: used internally
+ *     @msg_type: message type
+ *     @ctx: transaction context
+ *     @data: internal information related to the transaction
+ */
+struct nft_trans {
+       struct list_head                list;
+       int                             msg_type;
+       struct nft_ctx                  ctx;
+       char                            data[0];
+};
+
+struct nft_trans_rule {
+       struct nft_rule                 *rule;
+};
+
+#define nft_trans_rule(trans)  \
+       (((struct nft_trans_rule *)trans->data)->rule)
+
+struct nft_trans_set {
+       struct nft_set                  *set;
+       u32                             set_id;
+};
+
+#define nft_trans_set(trans)   \
+       (((struct nft_trans_set *)trans->data)->set)
+#define nft_trans_set_id(trans)        \
+       (((struct nft_trans_set *)trans->data)->set_id)
+
+struct nft_trans_chain {
+       bool                            update;
+       char                            name[NFT_CHAIN_MAXNAMELEN];
+       struct nft_stats __percpu       *stats;
+       u8                              policy;
+};
+
+#define nft_trans_chain_update(trans)  \
+       (((struct nft_trans_chain *)trans->data)->update)
+#define nft_trans_chain_name(trans)    \
+       (((struct nft_trans_chain *)trans->data)->name)
+#define nft_trans_chain_stats(trans)   \
+       (((struct nft_trans_chain *)trans->data)->stats)
+#define nft_trans_chain_policy(trans)  \
+       (((struct nft_trans_chain *)trans->data)->policy)
+
+struct nft_trans_table {
+       bool                            update;
+       bool                            enable;
+};
+
+#define nft_trans_table_update(trans)  \
+       (((struct nft_trans_table *)trans->data)->update)
+#define nft_trans_table_enable(trans)  \
+       (((struct nft_trans_table *)trans->data)->enable)
+
+struct nft_trans_elem {
+       struct nft_set                  *set;
+       struct nft_set_elem             elem;
+};
+
+#define nft_trans_elem_set(trans)      \
+       (((struct nft_trans_elem *)trans->data)->set)
+#define nft_trans_elem(trans)  \
+       (((struct nft_trans_elem *)trans->data)->elem)
+
 #endif /* _NET_NF_TABLES_H */
index cba143fbd2e4fca9f5d0280dda035215ebd922fb..2df7f96902ee96edaa9a85f7c96c254220b5e12d 100644 (file)
@@ -8,12 +8,11 @@ static inline void
 nft_set_pktinfo_ipv4(struct nft_pktinfo *pkt,
                     const struct nf_hook_ops *ops,
                     struct sk_buff *skb,
-                    const struct net_device *in,
-                    const struct net_device *out)
+                    const struct nf_hook_state *state)
 {
        struct iphdr *ip;
 
-       nft_set_pktinfo(pkt, ops, skb, in, out);
+       nft_set_pktinfo(pkt, ops, skb, state);
 
        ip = ip_hdr(pkt->skb);
        pkt->tprot = ip->protocol;
index 74d97613765801512e523bab6e40b6e2767d982d..97db2e3a5e657c4b67fdbcfd9b609aea390e1ec0 100644 (file)
@@ -8,13 +8,12 @@ static inline int
 nft_set_pktinfo_ipv6(struct nft_pktinfo *pkt,
                     const struct nf_hook_ops *ops,
                     struct sk_buff *skb,
-                    const struct net_device *in,
-                    const struct net_device *out)
+                    const struct nf_hook_state *state)
 {
        int protohdr, thoff = 0;
        unsigned short frag_off;
 
-       nft_set_pktinfo(pkt, ops, skb, in, out);
+       nft_set_pktinfo(pkt, ops, skb, state);
 
        protohdr = ipv6_find_hdr(pkt->skb, &thoff, -1, &frag_off, NULL);
        /* If malformed, drop it */
index e010ee8da41ded34ae6e407cfab40734031b73aa..2a5dbcc90d1cafc950a4eafd3fd18b4d1e342d47 100644 (file)
@@ -4,6 +4,7 @@
 #include <linux/types.h>
 #include <linux/netlink.h>
 #include <linux/jiffies.h>
+#include <linux/in6.h>
 
 /* ========================================================================
  *         Netlink Messages and Attributes Interface (As Seen On TV)
  *   nla_put_string(skb, type, str)    add string attribute to skb
  *   nla_put_flag(skb, type)           add flag attribute to skb
  *   nla_put_msecs(skb, type, jiffies) add msecs attribute to skb
+ *   nla_put_in_addr(skb, type, addr)  add IPv4 address attribute to skb
+ *   nla_put_in6_addr(skb, type, addr) add IPv6 address attribute to skb
  *
  * Nested Attributes Construction:
  *   nla_nest_start(skb, type)         start a nested attribute
@@ -956,6 +959,32 @@ static inline int nla_put_msecs(struct sk_buff *skb, int attrtype,
        return nla_put(skb, attrtype, sizeof(u64), &tmp);
 }
 
+/**
+ * nla_put_in_addr - Add an IPv4 address netlink attribute to a socket
+ * buffer
+ * @skb: socket buffer to add attribute to
+ * @attrtype: attribute type
+ * @addr: IPv4 address
+ */
+static inline int nla_put_in_addr(struct sk_buff *skb, int attrtype,
+                                 __be32 addr)
+{
+       return nla_put_be32(skb, attrtype, addr);
+}
+
+/**
+ * nla_put_in6_addr - Add an IPv6 address netlink attribute to a socket
+ * buffer
+ * @skb: socket buffer to add attribute to
+ * @attrtype: attribute type
+ * @addr: IPv6 address
+ */
+static inline int nla_put_in6_addr(struct sk_buff *skb, int attrtype,
+                                  const struct in6_addr *addr)
+{
+       return nla_put(skb, attrtype, sizeof(*addr), addr);
+}
+
 /**
  * nla_get_u32 - return payload of u32 attribute
  * @nla: u32 netlink attribute
@@ -1098,6 +1127,27 @@ static inline unsigned long nla_get_msecs(const struct nlattr *nla)
        return msecs_to_jiffies((unsigned long) msecs);
 }
 
+/**
+ * nla_get_in_addr - return payload of IPv4 address attribute
+ * @nla: IPv4 address netlink attribute
+ */
+static inline __be32 nla_get_in_addr(const struct nlattr *nla)
+{
+       return *(__be32 *) nla_data(nla);
+}
+
+/**
+ * nla_get_in6_addr - return payload of IPv6 address attribute
+ * @nla: IPv6 address netlink attribute
+ */
+static inline struct in6_addr nla_get_in6_addr(const struct nlattr *nla)
+{
+       struct in6_addr tmp;
+
+       nla_memcpy(&tmp, nla, sizeof(tmp));
+       return tmp;
+}
+
 /**
  * nla_nest_start - Start a new level of nested attributes
  * @skb: socket buffer to add attributes to
index c06ac58ca107c1a11f9d3a479efe86fe3cc9a4dc..69a6715d9f3fd16c9a6e90f9be403ba0cba1f022 100644 (file)
@@ -5,7 +5,7 @@
 
 struct net;
 
-static inline unsigned int net_hash_mix(struct net *net)
+static inline u32 net_hash_mix(const struct net *net)
 {
 #ifdef CONFIG_NET_NS
        /*
@@ -13,7 +13,7 @@ static inline unsigned int net_hash_mix(struct net *net)
         * always zeroed
         */
 
-       return (unsigned)(((unsigned long)net) >> L1_CACHE_SHIFT);
+       return (u32)(((unsigned long)net) >> L1_CACHE_SHIFT);
 #else
        return 0;
 #endif
index dbe225478adb08a31cc4e6ee798a6edf15a5d282..614a49be68a92897525ce9a1b5a7afd458faf126 100644 (file)
@@ -7,6 +7,7 @@
 
 #include <linux/uidgid.h>
 #include <net/inet_frag.h>
+#include <linux/rcupdate.h>
 
 struct tcpm_hash_bucket;
 struct ctl_table_header;
@@ -38,21 +39,21 @@ struct netns_ipv4 {
 #ifdef CONFIG_IP_MULTIPLE_TABLES
        struct fib_rules_ops    *rules_ops;
        bool                    fib_has_custom_rules;
-       struct fib_table        *fib_local;
-       struct fib_table        *fib_main;
-       struct fib_table        *fib_default;
+       struct fib_table __rcu  *fib_local;
+       struct fib_table __rcu  *fib_main;
+       struct fib_table __rcu  *fib_default;
 #endif
 #ifdef CONFIG_IP_ROUTE_CLASSID
        int                     fib_num_tclassid_users;
 #endif
        struct hlist_head       *fib_table_hash;
+       bool                    fib_offload_disabled;
        struct sock             *fibnl;
 
        struct sock  * __percpu *icmp_sk;
+       struct sock             *mc_autojoin_sk;
 
        struct inet_peer_base   *peers;
-       struct tcpm_hash_bucket *tcp_metrics_hash;
-       unsigned int            tcp_metrics_hash_log;
        struct sock  * __percpu *tcp_sk;
        struct netns_frags      frags;
 #ifdef CONFIG_NETFILTER
@@ -84,6 +85,8 @@ struct netns_ipv4 {
        int sysctl_tcp_fwmark_accept;
        int sysctl_tcp_mtu_probing;
        int sysctl_tcp_base_mss;
+       int sysctl_tcp_probe_threshold;
+       u32 sysctl_tcp_probe_interval;
 
        struct ping_group_range ping_group_range;
 
index 69ae41f2098c159548a9e6ee6b432eabfa369785..d2527bf81142e28508e4d415353d8a4ecf8c609e 100644 (file)
@@ -32,6 +32,8 @@ struct netns_sysctl_ipv6 {
        int icmpv6_time;
        int anycast_src_echo_reply;
        int fwmark_reflect;
+       int idgen_retries;
+       int idgen_delay;
 };
 
 struct netns_ipv6 {
@@ -67,6 +69,7 @@ struct netns_ipv6 {
        struct sock             *ndisc_sk;
        struct sock             *tcp_sk;
        struct sock             *igmp_sk;
+       struct sock             *mc_autojoin_sk;
 #ifdef CONFIG_IPV6_MROUTE
 #ifndef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
        struct mr6_table        *mrt6;
diff --git a/include/net/netns/mpls.h b/include/net/netns/mpls.h
new file mode 100644 (file)
index 0000000..d292036
--- /dev/null
@@ -0,0 +1,17 @@
+/*
+ * mpls in net namespaces
+ */
+
+#ifndef __NETNS_MPLS_H__
+#define __NETNS_MPLS_H__
+
+struct mpls_route;
+struct ctl_table_header;
+
+struct netns_mpls {
+       size_t platform_labels;
+       struct mpls_route __rcu * __rcu *platform_label;
+       struct ctl_table_header *ctl;
+};
+
+#endif /* __NETNS_MPLS_H__ */
index c24060ee411e9e15a91a0fc5bd189eef8425869b..4d6597ad6067a838f0ab32ec5bb2ab2cbd0d070d 100644 (file)
@@ -9,6 +9,7 @@ struct ebt_table;
 struct netns_xt {
        struct list_head tables[NFPROTO_NUMPROTO];
        bool notrack_deprecated_warning;
+       bool clusterip_deprecated_warning;
 #if defined(CONFIG_BRIDGE_NF_EBTABLES) || \
     defined(CONFIG_BRIDGE_NF_EBTABLES_MODULE)
        struct ebt_table *broute_table;
index cc16d413f681c077f743547af38ab8d3293a0007..ac80cb45e63004ef5e12383002721e9a4634971c 100644 (file)
@@ -75,12 +75,11 @@ void ping_err(struct sk_buff *skb, int offset, u32 info);
 int  ping_getfrag(void *from, char *to, int offset, int fraglen, int odd,
                  struct sk_buff *);
 
-int  ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
-                 size_t len, int noblock, int flags, int *addr_len);
+int  ping_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int noblock,
+                 int flags, int *addr_len);
 int  ping_common_sendmsg(int family, struct msghdr *msg, size_t len,
                         void *user_icmph, size_t icmph_len);
-int  ping_v6_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
-                    size_t len);
+int  ping_v6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len);
 int  ping_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
 bool ping_rcv(struct sk_buff *skb);
 
index 7f830ff67f08c318717e809221d1586f17948185..fe41f3ceb008d767d594de6a042393ba463b509b 100644 (file)
@@ -39,8 +39,7 @@ struct request_sock_ops {
        void            (*send_reset)(struct sock *sk,
                                      struct sk_buff *skb);
        void            (*destructor)(struct request_sock *req);
-       void            (*syn_ack_timeout)(struct sock *sk,
-                                          struct request_sock *req);
+       void            (*syn_ack_timeout)(const struct request_sock *req);
 };
 
 int inet_rtx_syn_ack(struct sock *parent, struct request_sock *req);
@@ -49,7 +48,11 @@ int inet_rtx_syn_ack(struct sock *parent, struct request_sock *req);
  */
 struct request_sock {
        struct sock_common              __req_common;
+#define rsk_refcnt                     __req_common.skc_refcnt
+#define rsk_hash                       __req_common.skc_hash
+
        struct request_sock             *dl_next;
+       struct sock                     *rsk_listener;
        u16                             mss;
        u8                              num_retrans; /* number of retransmits */
        u8                              cookie_ts:1; /* syncookie: encode tcpopts in timestamp */
@@ -58,32 +61,56 @@ struct request_sock {
        u32                             window_clamp; /* window clamp at creation time */
        u32                             rcv_wnd;          /* rcv_wnd offered first time */
        u32                             ts_recent;
-       unsigned long                   expires;
+       struct timer_list               rsk_timer;
        const struct request_sock_ops   *rsk_ops;
        struct sock                     *sk;
        u32                             secid;
        u32                             peer_secid;
 };
 
-static inline struct request_sock *reqsk_alloc(const struct request_sock_ops *ops)
+static inline struct request_sock *
+reqsk_alloc(const struct request_sock_ops *ops, struct sock *sk_listener)
 {
        struct request_sock *req = kmem_cache_alloc(ops->slab, GFP_ATOMIC);
 
-       if (req != NULL)
+       if (req) {
                req->rsk_ops = ops;
-
+               sock_hold(sk_listener);
+               req->rsk_listener = sk_listener;
+
+               /* Following is temporary. It is coupled with debugging
+                * helpers in reqsk_put() & reqsk_free()
+                */
+               atomic_set(&req->rsk_refcnt, 0);
+       }
        return req;
 }
 
-static inline void __reqsk_free(struct request_sock *req)
+static inline struct request_sock *inet_reqsk(struct sock *sk)
 {
-       kmem_cache_free(req->rsk_ops->slab, req);
+       return (struct request_sock *)sk;
+}
+
+static inline struct sock *req_to_sk(struct request_sock *req)
+{
+       return (struct sock *)req;
 }
 
 static inline void reqsk_free(struct request_sock *req)
 {
+       /* temporary debugging */
+       WARN_ON_ONCE(atomic_read(&req->rsk_refcnt) != 0);
+
        req->rsk_ops->destructor(req);
-       __reqsk_free(req);
+       if (req->rsk_listener)
+               sock_put(req->rsk_listener);
+       kmem_cache_free(req->rsk_ops->slab, req);
+}
+
+static inline void reqsk_put(struct request_sock *req)
+{
+       if (atomic_dec_and_test(&req->rsk_refcnt))
+               reqsk_free(req);
 }
 
 extern int sysctl_max_syn_backlog;
@@ -93,12 +120,16 @@ extern int sysctl_max_syn_backlog;
  * @max_qlen_log - log_2 of maximal queued SYNs/REQUESTs
  */
 struct listen_sock {
-       u8                      max_qlen_log;
+       int                     qlen_inc; /* protected by listener lock */
+       int                     young_inc;/* protected by listener lock */
+
+       /* following fields can be updated by timer */
+       atomic_t                qlen_dec; /* qlen = qlen_inc - qlen_dec */
+       atomic_t                young_dec;
+
+       u8                      max_qlen_log ____cacheline_aligned_in_smp;
        u8                      synflood_warned;
        /* 2 bytes hole, try to use */
-       int                     qlen;
-       int                     qlen_young;
-       int                     clock_hand;
        u32                     hash_rnd;
        u32                     nr_table_entries;
        struct request_sock     *syn_table[0];
@@ -142,18 +173,11 @@ struct fastopen_queue {
  * %syn_wait_lock is necessary only to avoid proc interface having to grab the main
  * lock sock while browsing the listening hash (otherwise it's deadlock prone).
  *
- * This lock is acquired in read mode only from listening_get_next() seq_file
- * op and it's acquired in write mode _only_ from code that is actively
- * changing rskq_accept_head. All readers that are holding the master sock lock
- * don't need to grab this lock in read mode too as rskq_accept_head. writes
- * are always protected from the main sock lock.
  */
 struct request_sock_queue {
        struct request_sock     *rskq_accept_head;
        struct request_sock     *rskq_accept_tail;
-       rwlock_t                syn_wait_lock;
        u8                      rskq_defer_accept;
-       /* 3 bytes hole, try to pack */
        struct listen_sock      *listen_opt;
        struct fastopen_queue   *fastopenq; /* This is non-NULL iff TFO has been
                                             * enabled on this listener. Check
@@ -161,6 +185,9 @@ struct request_sock_queue {
                                             * to determine if TFO is enabled
                                             * right at this moment.
                                             */
+
+       /* temporary alignment, our goal is to get rid of this lock */
+       spinlock_t              syn_wait_lock ____cacheline_aligned_in_smp;
 };
 
 int reqsk_queue_alloc(struct request_sock_queue *queue,
@@ -186,12 +213,21 @@ static inline int reqsk_queue_empty(struct request_sock_queue *queue)
 }
 
 static inline void reqsk_queue_unlink(struct request_sock_queue *queue,
-                                     struct request_sock *req,
-                                     struct request_sock **prev_req)
+                                     struct request_sock *req)
 {
-       write_lock(&queue->syn_wait_lock);
-       *prev_req = req->dl_next;
-       write_unlock(&queue->syn_wait_lock);
+       struct listen_sock *lopt = queue->listen_opt;
+       struct request_sock **prev;
+
+       spin_lock(&queue->syn_wait_lock);
+
+       prev = &lopt->syn_table[req->rsk_hash];
+       while (*prev != req)
+               prev = &(*prev)->dl_next;
+       *prev = req->dl_next;
+
+       spin_unlock(&queue->syn_wait_lock);
+       if (del_timer(&req->rsk_timer))
+               reqsk_put(req);
 }
 
 static inline void reqsk_queue_add(struct request_sock_queue *queue,
@@ -224,57 +260,53 @@ static inline struct request_sock *reqsk_queue_remove(struct request_sock_queue
        return req;
 }
 
-static inline int reqsk_queue_removed(struct request_sock_queue *queue,
-                                     struct request_sock *req)
+static inline void reqsk_queue_removed(struct request_sock_queue *queue,
+                                      const struct request_sock *req)
 {
        struct listen_sock *lopt = queue->listen_opt;
 
        if (req->num_timeout == 0)
-               --lopt->qlen_young;
-
-       return --lopt->qlen;
+               atomic_inc(&lopt->young_dec);
+       atomic_inc(&lopt->qlen_dec);
 }
 
-static inline int reqsk_queue_added(struct request_sock_queue *queue)
+static inline void reqsk_queue_added(struct request_sock_queue *queue)
 {
        struct listen_sock *lopt = queue->listen_opt;
-       const int prev_qlen = lopt->qlen;
 
-       lopt->qlen_young++;
-       lopt->qlen++;
-       return prev_qlen;
+       lopt->young_inc++;
+       lopt->qlen_inc++;
 }
 
-static inline int reqsk_queue_len(const struct request_sock_queue *queue)
+static inline int listen_sock_qlen(const struct listen_sock *lopt)
 {
-       return queue->listen_opt != NULL ? queue->listen_opt->qlen : 0;
+       return lopt->qlen_inc - atomic_read(&lopt->qlen_dec);
 }
 
-static inline int reqsk_queue_len_young(const struct request_sock_queue *queue)
+static inline int listen_sock_young(const struct listen_sock *lopt)
 {
-       return queue->listen_opt->qlen_young;
+       return lopt->young_inc - atomic_read(&lopt->young_dec);
 }
 
-static inline int reqsk_queue_is_full(const struct request_sock_queue *queue)
+static inline int reqsk_queue_len(const struct request_sock_queue *queue)
 {
-       return queue->listen_opt->qlen >> queue->listen_opt->max_qlen_log;
+       const struct listen_sock *lopt = queue->listen_opt;
+
+       return lopt ? listen_sock_qlen(lopt) : 0;
 }
 
-static inline void reqsk_queue_hash_req(struct request_sock_queue *queue,
-                                       u32 hash, struct request_sock *req,
-                                       unsigned long timeout)
+static inline int reqsk_queue_len_young(const struct request_sock_queue *queue)
 {
-       struct listen_sock *lopt = queue->listen_opt;
-
-       req->expires = jiffies + timeout;
-       req->num_retrans = 0;
-       req->num_timeout = 0;
-       req->sk = NULL;
-       req->dl_next = lopt->syn_table[hash];
+       return listen_sock_young(queue->listen_opt);
+}
 
-       write_lock(&queue->syn_wait_lock);
-       lopt->syn_table[hash] = req;
-       write_unlock(&queue->syn_wait_lock);
+static inline int reqsk_queue_is_full(const struct request_sock_queue *queue)
+{
+       return reqsk_queue_len(queue) >> queue->listen_opt->max_qlen_log;
 }
 
+void reqsk_queue_hash_req(struct request_sock_queue *queue,
+                         u32 hash, struct request_sock *req,
+                         unsigned long timeout);
+
 #endif /* _REQUEST_SOCK_H */
index c605d305c577074d11bee6f19479dda8a4949ee3..6d778efcfdfd6c8a3973e03424625667ec350c3e 100644 (file)
@@ -213,7 +213,7 @@ struct tcf_proto_ops {
                                            const struct tcf_proto *,
                                            struct tcf_result *);
        int                     (*init)(struct tcf_proto*);
-       void                    (*destroy)(struct tcf_proto*);
+       bool                    (*destroy)(struct tcf_proto*, bool);
 
        unsigned long           (*get)(struct tcf_proto*, u32 handle);
        int                     (*change)(struct net *net, struct sk_buff *,
@@ -399,7 +399,7 @@ struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue,
                                const struct Qdisc_ops *ops, u32 parentid);
 void __qdisc_calculate_pkt_len(struct sk_buff *skb,
                               const struct qdisc_size_table *stab);
-void tcf_destroy(struct tcf_proto *tp);
+bool tcf_destroy(struct tcf_proto *tp, bool force);
 void tcf_destroy_chain(struct tcf_proto __rcu **fl);
 
 /* Reset all TX qdiscs greater then index of a device.  */
index 856f01cb51dd296ed591719c4fc4504cb0bf12ef..c56a438c3a1eaf89d630edb17dd20802f0e01590 100644 (file)
@@ -166,6 +166,9 @@ void sctp_remaddr_proc_exit(struct net *net);
   */
 extern struct kmem_cache *sctp_chunk_cachep __read_mostly;
 extern struct kmem_cache *sctp_bucket_cachep __read_mostly;
+extern long sysctl_sctp_mem[3];
+extern int sysctl_sctp_rmem[3];
+extern int sysctl_sctp_wmem[3];
 
 /*
  *  Section:  Macros, externs, and inlines
index ab186b1d31fffe7cc5b4888d85aca99c2d875da4..3a4898ec8c67c5242e9467a0d32f2e68f3a55302 100644 (file)
@@ -57,7 +57,6 @@
 #include <linux/page_counter.h>
 #include <linux/memcontrol.h>
 #include <linux/static_key.h>
-#include <linux/aio.h>
 #include <linux/sched.h>
 
 #include <linux/filter.h>
@@ -67,6 +66,7 @@
 #include <linux/atomic.h>
 #include <net/dst.h>
 #include <net/checksum.h>
+#include <net/tcp_states.h>
 #include <linux/net_tstamp.h>
 
 struct cgroup;
@@ -190,15 +190,15 @@ struct sock_common {
                struct hlist_nulls_node skc_portaddr_node;
        };
        struct proto            *skc_prot;
-#ifdef CONFIG_NET_NS
-       struct net              *skc_net;
-#endif
+       possible_net_t          skc_net;
 
 #if IS_ENABLED(CONFIG_IPV6)
        struct in6_addr         skc_v6_daddr;
        struct in6_addr         skc_v6_rcv_saddr;
 #endif
 
+       atomic64_t              skc_cookie;
+
        /*
         * fields between dontcopy_begin/dontcopy_end
         * are not copied in sock_copy()
@@ -329,6 +329,7 @@ struct sock {
 #define sk_net                 __sk_common.skc_net
 #define sk_v6_daddr            __sk_common.skc_v6_daddr
 #define sk_v6_rcv_saddr        __sk_common.skc_v6_rcv_saddr
+#define sk_cookie              __sk_common.skc_cookie
 
        socket_lock_t           sk_lock;
        struct sk_buff_head     sk_receive_queue;
@@ -403,8 +404,8 @@ struct sock {
        rwlock_t                sk_callback_lock;
        int                     sk_err,
                                sk_err_soft;
-       unsigned short          sk_ack_backlog;
-       unsigned short          sk_max_ack_backlog;
+       u32                     sk_ack_backlog;
+       u32                     sk_max_ack_backlog;
        __u32                   sk_priority;
 #if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
        __u32                   sk_cgrp_prioidx;
@@ -958,10 +959,9 @@ struct proto {
        int                     (*compat_ioctl)(struct sock *sk,
                                        unsigned int cmd, unsigned long arg);
 #endif
-       int                     (*sendmsg)(struct kiocb *iocb, struct sock *sk,
-                                          struct msghdr *msg, size_t len);
-       int                     (*recvmsg)(struct kiocb *iocb, struct sock *sk,
-                                          struct msghdr *msg,
+       int                     (*sendmsg)(struct sock *sk, struct msghdr *msg,
+                                          size_t len);
+       int                     (*recvmsg)(struct sock *sk, struct msghdr *msg,
                                           size_t len, int noblock, int flags,
                                           int *addr_len);
        int                     (*sendpage)(struct sock *sk, struct page *page,
@@ -1562,9 +1562,8 @@ int sock_no_listen(struct socket *, int);
 int sock_no_shutdown(struct socket *, int);
 int sock_no_getsockopt(struct socket *, int , int, char __user *, int __user *);
 int sock_no_setsockopt(struct socket *, int, int, char __user *, unsigned int);
-int sock_no_sendmsg(struct kiocb *, struct socket *, struct msghdr *, size_t);
-int sock_no_recvmsg(struct kiocb *, struct socket *, struct msghdr *, size_t,
-                   int);
+int sock_no_sendmsg(struct socket *, struct msghdr *, size_t);
+int sock_no_recvmsg(struct socket *, struct msghdr *, size_t, int);
 int sock_no_mmap(struct file *file, struct socket *sock,
                 struct vm_area_struct *vma);
 ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset,
@@ -1576,8 +1575,8 @@ ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset,
  */
 int sock_common_getsockopt(struct socket *sock, int level, int optname,
                                  char __user *optval, int __user *optlen);
-int sock_common_recvmsg(struct kiocb *iocb, struct socket *sock,
-                              struct msghdr *msg, size_t size, int flags);
+int sock_common_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
+                       int flags);
 int sock_common_setsockopt(struct socket *sock, int level, int optname,
                                  char __user *optval, unsigned int optlen);
 int compat_sock_common_getsockopt(struct socket *sock, int level,
@@ -1626,7 +1625,7 @@ static inline void sock_put(struct sock *sk)
                sk_free(sk);
 }
 /* Generic version of sock_put(), dealing with all sockets
- * (TCP_TIMEWAIT, ESTABLISHED...)
+ * (TCP_TIMEWAIT, TCP_NEW_SYN_RECV, ESTABLISHED...)
  */
 void sock_gen_put(struct sock *sk);
 
@@ -1762,6 +1761,8 @@ struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie);
 
 struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie);
 
+bool sk_mc_loop(struct sock *sk);
+
 static inline bool sk_can_gso(const struct sock *sk)
 {
        return net_gso_ok(sk->sk_route_caps, sk->sk_gso_type);
@@ -2078,6 +2079,29 @@ static inline int sock_intr_errno(long timeo)
        return timeo == MAX_SCHEDULE_TIMEOUT ? -ERESTARTSYS : -EINTR;
 }
 
+struct sock_skb_cb {
+       u32 dropcount;
+};
+
+/* Store sock_skb_cb at the end of skb->cb[] so protocol families
+ * using skb->cb[] would keep using it directly and utilize its
+ * alignement guarantee.
+ */
+#define SOCK_SKB_CB_OFFSET ((FIELD_SIZEOF(struct sk_buff, cb) - \
+                           sizeof(struct sock_skb_cb)))
+
+#define SOCK_SKB_CB(__skb) ((struct sock_skb_cb *)((__skb)->cb + \
+                           SOCK_SKB_CB_OFFSET))
+
+#define sock_skb_cb_check_size(size) \
+       BUILD_BUG_ON((size) > SOCK_SKB_CB_OFFSET)
+
+static inline void
+sock_skb_set_dropcount(const struct sock *sk, struct sk_buff *skb)
+{
+       SOCK_SKB_CB(skb)->dropcount = atomic_read(&sk->sk_drops);
+}
+
 void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk,
                           struct sk_buff *skb);
 void __sock_recv_wifi_status(struct msghdr *msg, struct sock *sk,
@@ -2180,7 +2204,7 @@ static inline void sk_change_net(struct sock *sk, struct net *net)
 
        if (!net_eq(current_net, net)) {
                put_net(current_net);
-               sock_net_set(sk, hold_net(net));
+               sock_net_set(sk, net);
        }
 }
 
@@ -2196,6 +2220,14 @@ static inline struct sock *skb_steal_sock(struct sk_buff *skb)
        return NULL;
 }
 
+/* This helper checks if a socket is a full socket,
+ * ie _not_ a timewait or request socket.
+ */
+static inline bool sk_fullsock(const struct sock *sk)
+{
+       return (1 << sk->sk_state) & ~(TCPF_TIME_WAIT | TCPF_NEW_SYN_RECV);
+}
+
 void sock_enable_timestamp(struct sock *sk, int flag);
 int sock_get_timestamp(struct sock *, struct timeval __user *);
 int sock_get_timestampns(struct sock *, struct timespec __user *);
index cfcdac2e5d253ef431bc18709cc5e7edd0e47e6f..d2e69ee3019a06308dbf2951283d6db663d0ad78 100644 (file)
@@ -1,6 +1,7 @@
 /*
  * include/net/switchdev.h - Switch device API
  * Copyright (c) 2014 Jiri Pirko <jiri@resnulli.us>
+ * Copyright (c) 2014-2015 Scott Feldman <sfeldma@gmail.com>
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
 #include <linux/netdevice.h>
 #include <linux/notifier.h>
 
+struct fib_info;
+
+/**
+ * struct switchdev_ops - switchdev operations
+ *
+ * @swdev_parent_id_get: Called to get an ID of the switch chip this port
+ *   is part of.  If driver implements this, it indicates that it
+ *   represents a port of a switch chip.
+ *
+ * @swdev_port_stp_update: Called to notify switch device port of bridge
+ *   port STP state change.
+ *
+ * @swdev_fib_ipv4_add: Called to add/modify IPv4 route to switch device.
+ *
+ * @swdev_fib_ipv4_del: Called to delete IPv4 route from switch device.
+ */
+struct swdev_ops {
+       int     (*swdev_parent_id_get)(struct net_device *dev,
+                                      struct netdev_phys_item_id *psid);
+       int     (*swdev_port_stp_update)(struct net_device *dev, u8 state);
+       int     (*swdev_fib_ipv4_add)(struct net_device *dev, __be32 dst,
+                                     int dst_len, struct fib_info *fi,
+                                     u8 tos, u8 type, u32 nlflags,
+                                     u32 tb_id);
+       int     (*swdev_fib_ipv4_del)(struct net_device *dev, __be32 dst,
+                                     int dst_len, struct fib_info *fi,
+                                     u8 tos, u8 type, u32 tb_id);
+};
+
 enum netdev_switch_notifier_type {
        NETDEV_SWITCH_FDB_ADD = 1,
        NETDEV_SWITCH_FDB_DEL,
@@ -51,6 +81,12 @@ int ndo_dflt_netdev_switch_port_bridge_dellink(struct net_device *dev,
                                               struct nlmsghdr *nlh, u16 flags);
 int ndo_dflt_netdev_switch_port_bridge_setlink(struct net_device *dev,
                                               struct nlmsghdr *nlh, u16 flags);
+int netdev_switch_fib_ipv4_add(u32 dst, int dst_len, struct fib_info *fi,
+                              u8 tos, u8 type, u32 nlflags, u32 tb_id);
+int netdev_switch_fib_ipv4_del(u32 dst, int dst_len, struct fib_info *fi,
+                              u8 tos, u8 type, u32 tb_id);
+void netdev_switch_fib_ipv4_abort(struct fib_info *fi);
+
 #else
 
 static inline int netdev_switch_parent_id_get(struct net_device *dev,
@@ -109,6 +145,25 @@ static inline int ndo_dflt_netdev_switch_port_bridge_setlink(struct net_device *
        return 0;
 }
 
+static inline int netdev_switch_fib_ipv4_add(u32 dst, int dst_len,
+                                            struct fib_info *fi,
+                                            u8 tos, u8 type,
+                                            u32 nlflags, u32 tb_id)
+{
+       return 0;
+}
+
+static inline int netdev_switch_fib_ipv4_del(u32 dst, int dst_len,
+                                            struct fib_info *fi,
+                                            u8 tos, u8 type, u32 tb_id)
+{
+       return 0;
+}
+
+static inline void netdev_switch_fib_ipv4_abort(struct fib_info *fi)
+{
+}
+
 #endif
 
 #endif /* _LINUX_SWITCHDEV_H_ */
index 86a070ffc930285846d26db8e4730f5d362ca84e..a152e9858b2c819a8429cb0ebd5a2af11690e007 100644 (file)
 struct tcf_bpf {
        struct tcf_common       common;
        struct bpf_prog         *filter;
+       union {
+               u32             bpf_fd;
+               u16             bpf_num_ops;
+       };
        struct sock_filter      *bpf_ops;
-       u16                     bpf_num_ops;
+       const char              *bpf_name;
 };
 #define to_bpf(a) \
        container_of(a->priv, struct tcf_bpf, common)
index 8d6b983d509959dcc270b73605bbbcb5835d5885..963303fb96ae227263e648fb0c8dbafdc9cbc945 100644 (file)
@@ -65,7 +65,13 @@ void tcp_time_wait(struct sock *sk, int state, int timeo);
 #define TCP_MIN_MSS            88U
 
 /* The least MTU to use for probing */
-#define TCP_BASE_MSS           512
+#define TCP_BASE_MSS           1024
+
+/* probing interval, default to 10 minutes as per RFC4821 */
+#define TCP_PROBE_INTERVAL     600
+
+/* Specify interval when tcp mtu probing will stop */
+#define TCP_PROBE_THRESHOLD    8
 
 /* After receiving this amount of duplicate ACKs fast retransmit starts. */
 #define TCP_FASTRETRANS_THRESH 3
@@ -349,8 +355,7 @@ void tcp_v4_early_demux(struct sk_buff *skb);
 int tcp_v4_rcv(struct sk_buff *skb);
 
 int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw);
-int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
-               size_t size);
+int tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
 int tcp_sendpage(struct sock *sk, struct page *page, int offset, size_t size,
                 int flags);
 void tcp_release_cb(struct sock *sk);
@@ -401,8 +406,7 @@ enum tcp_tw_status tcp_timewait_state_process(struct inet_timewait_sock *tw,
                                              struct sk_buff *skb,
                                              const struct tcphdr *th);
 struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
-                          struct request_sock *req, struct request_sock **prev,
-                          bool fastopen);
+                          struct request_sock *req, bool fastopen);
 int tcp_child_process(struct sock *parent, struct sock *child,
                      struct sk_buff *skb);
 void tcp_enter_loss(struct sock *sk);
@@ -429,9 +433,9 @@ int compat_tcp_getsockopt(struct sock *sk, int level, int optname,
 int compat_tcp_setsockopt(struct sock *sk, int level, int optname,
                          char __user *optval, unsigned int optlen);
 void tcp_set_keepalive(struct sock *sk, int val);
-void tcp_syn_ack_timeout(struct sock *sk, struct request_sock *req);
-int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
-               size_t len, int nonblock, int flags, int *addr_len);
+void tcp_syn_ack_timeout(const struct request_sock *req);
+int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
+               int flags, int *addr_len);
 void tcp_parse_options(const struct sk_buff *skb,
                       struct tcp_options_received *opt_rx,
                       int estab, struct tcp_fastopen_cookie *foc);
@@ -443,6 +447,7 @@ const u8 *tcp_parse_md5sig_option(const struct tcphdr *th);
 
 void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb);
 void tcp_v4_mtu_reduced(struct sock *sk);
+void tcp_req_err(struct sock *sk, u32 seq);
 int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb);
 struct sock *tcp_create_openreq_child(struct sock *sk,
                                      struct request_sock *req,
@@ -524,8 +529,6 @@ int tcp_write_wakeup(struct sock *);
 void tcp_send_fin(struct sock *sk);
 void tcp_send_active_reset(struct sock *sk, gfp_t priority);
 int tcp_send_synack(struct sock *);
-bool tcp_syn_flood_action(struct sock *sk, const struct sk_buff *skb,
-                         const char *proto);
 void tcp_push_one(struct sock *, unsigned int mss_now);
 void tcp_send_ack(struct sock *sk);
 void tcp_send_delayed_ack(struct sock *sk);
@@ -1132,31 +1135,6 @@ static inline int tcp_full_space(const struct sock *sk)
        return tcp_win_from_space(sk->sk_rcvbuf);
 }
 
-static inline void tcp_openreq_init(struct request_sock *req,
-                                   struct tcp_options_received *rx_opt,
-                                   struct sk_buff *skb, struct sock *sk)
-{
-       struct inet_request_sock *ireq = inet_rsk(req);
-
-       req->rcv_wnd = 0;               /* So that tcp_send_synack() knows! */
-       req->cookie_ts = 0;
-       tcp_rsk(req)->rcv_isn = TCP_SKB_CB(skb)->seq;
-       tcp_rsk(req)->rcv_nxt = TCP_SKB_CB(skb)->seq + 1;
-       tcp_rsk(req)->snt_synack = tcp_time_stamp;
-       tcp_rsk(req)->last_oow_ack_time = 0;
-       req->mss = rx_opt->mss_clamp;
-       req->ts_recent = rx_opt->saw_tstamp ? rx_opt->rcv_tsval : 0;
-       ireq->tstamp_ok = rx_opt->tstamp_ok;
-       ireq->sack_ok = rx_opt->sack_ok;
-       ireq->snd_wscale = rx_opt->snd_wscale;
-       ireq->wscale_ok = rx_opt->wscale_ok;
-       ireq->acked = 0;
-       ireq->ecn_ok = 0;
-       ireq->ir_rmt_port = tcp_hdr(skb)->source;
-       ireq->ir_num = ntohs(tcp_hdr(skb)->dest);
-       ireq->ir_mark = inet_request_mark(sk, skb);
-}
-
 extern void tcp_openreq_init_rwin(struct request_sock *req,
                                  struct sock *sk, struct dst_entry *dst);
 
@@ -1236,36 +1214,8 @@ static inline bool tcp_paws_reject(const struct tcp_options_received *rx_opt,
        return true;
 }
 
-/* Return true if we're currently rate-limiting out-of-window ACKs and
- * thus shouldn't send a dupack right now. We rate-limit dupacks in
- * response to out-of-window SYNs or ACKs to mitigate ACK loops or DoS
- * attacks that send repeated SYNs or ACKs for the same connection. To
- * do this, we do not send a duplicate SYNACK or ACK if the remote
- * endpoint is sending out-of-window SYNs or pure ACKs at a high rate.
- */
-static inline bool tcp_oow_rate_limited(struct net *net,
-                                       const struct sk_buff *skb,
-                                       int mib_idx, u32 *last_oow_ack_time)
-{
-       /* Data packets without SYNs are not likely part of an ACK loop. */
-       if ((TCP_SKB_CB(skb)->seq != TCP_SKB_CB(skb)->end_seq) &&
-           !tcp_hdr(skb)->syn)
-               goto not_rate_limited;
-
-       if (*last_oow_ack_time) {
-               s32 elapsed = (s32)(tcp_time_stamp - *last_oow_ack_time);
-
-               if (0 <= elapsed && elapsed < sysctl_tcp_invalid_ratelimit) {
-                       NET_INC_STATS_BH(net, mib_idx);
-                       return true;    /* rate-limited: don't send yet! */
-               }
-       }
-
-       *last_oow_ack_time = tcp_time_stamp;
-
-not_rate_limited:
-       return false;   /* not rate-limited: go ahead, send dupack now! */
-}
+bool tcp_oow_rate_limited(struct net *net, const struct sk_buff *skb,
+                         int mib_idx, u32 *last_oow_ack_time);
 
 static inline void tcp_mib_init(struct net *net)
 {
@@ -1344,15 +1294,14 @@ struct tcp_md5sig_pool {
 };
 
 /* - functions */
-int tcp_v4_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
-                       const struct sock *sk, const struct request_sock *req,
-                       const struct sk_buff *skb);
+int tcp_v4_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key,
+                       const struct sock *sk, const struct sk_buff *skb);
 int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
                   int family, const u8 *newkey, u8 newkeylen, gfp_t gfp);
 int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr,
                   int family);
 struct tcp_md5sig_key *tcp_v4_md5_lookup(struct sock *sk,
-                                        struct sock *addr_sk);
+                                        const struct sock *addr_sk);
 
 #ifdef CONFIG_TCP_MD5SIG
 struct tcp_md5sig_key *tcp_md5_do_lookup(struct sock *sk,
@@ -1663,28 +1612,26 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops,
 struct tcp_sock_af_ops {
 #ifdef CONFIG_TCP_MD5SIG
        struct tcp_md5sig_key   *(*md5_lookup) (struct sock *sk,
-                                               struct sock *addr_sk);
-       int                     (*calc_md5_hash) (char *location,
-                                                 struct tcp_md5sig_key *md5,
-                                                 const struct sock *sk,
-                                                 const struct request_sock *req,
-                                                 const struct sk_buff *skb);
-       int                     (*md5_parse) (struct sock *sk,
-                                             char __user *optval,
-                                             int optlen);
+                                               const struct sock *addr_sk);
+       int             (*calc_md5_hash)(char *location,
+                                        const struct tcp_md5sig_key *md5,
+                                        const struct sock *sk,
+                                        const struct sk_buff *skb);
+       int             (*md5_parse)(struct sock *sk,
+                                    char __user *optval,
+                                    int optlen);
 #endif
 };
 
 struct tcp_request_sock_ops {
        u16 mss_clamp;
 #ifdef CONFIG_TCP_MD5SIG
-       struct tcp_md5sig_key   *(*md5_lookup) (struct sock *sk,
-                                               struct request_sock *req);
-       int                     (*calc_md5_hash) (char *location,
-                                                 struct tcp_md5sig_key *md5,
-                                                 const struct sock *sk,
-                                                 const struct request_sock *req,
-                                                 const struct sk_buff *skb);
+       struct tcp_md5sig_key *(*req_md5_lookup)(struct sock *sk,
+                                                const struct sock *addr_sk);
+       int             (*calc_md5_hash) (char *location,
+                                         const struct tcp_md5sig_key *md5,
+                                         const struct sock *sk,
+                                         const struct sk_buff *skb);
 #endif
        void (*init_req)(struct request_sock *req, struct sock *sk,
                         struct sk_buff *skb);
index b0b645988bd86687ac06b170f468e1239f4576f0..50e78a74d0dfa957394e68da9aa636dad4b08a05 100644 (file)
@@ -25,6 +25,7 @@ enum {
        TCP_LAST_ACK,
        TCP_LISTEN,
        TCP_CLOSING,    /* Now a valid state */
+       TCP_NEW_SYN_RECV,
 
        TCP_MAX_STATES  /* Leave at the end! */
 };
@@ -44,7 +45,8 @@ enum {
        TCPF_CLOSE_WAIT  = (1 << 8),
        TCPF_LAST_ACK    = (1 << 9),
        TCPF_LISTEN      = (1 << 10),
-       TCPF_CLOSING     = (1 << 11) 
+       TCPF_CLOSING     = (1 << 11),
+       TCPF_NEW_SYN_RECV = (1 << 12),
 };
 
 #endif /* _LINUX_TCP_STATES_H */
index 07f9b70962f64f0a7e80fe1ebbc81996be2d4567..6d4ed18e14278a6091b7e1c3fa40f1b2d4796d06 100644 (file)
@@ -194,6 +194,8 @@ int udp_lib_get_port(struct sock *sk, unsigned short snum,
                     int (*)(const struct sock *, const struct sock *),
                     unsigned int hash2_nulladdr);
 
+u32 udp_flow_hashrnd(void);
+
 static inline __be16 udp_flow_src_port(struct net *net, struct sk_buff *skb,
                                       int min, int max, bool use_eth)
 {
@@ -205,12 +207,19 @@ static inline __be16 udp_flow_src_port(struct net *net, struct sk_buff *skb,
        }
 
        hash = skb_get_hash(skb);
-       if (unlikely(!hash) && use_eth) {
-               /* Can't find a normal hash, caller has indicated an Ethernet
-                * packet so use that to compute a hash.
-                */
-               hash = jhash(skb->data, 2 * ETH_ALEN,
-                            (__force u32) skb->protocol);
+       if (unlikely(!hash)) {
+               if (use_eth) {
+                       /* Can't find a normal hash, caller has indicated an
+                        * Ethernet packet so use that to compute a hash.
+                        */
+                       hash = jhash(skb->data, 2 * ETH_ALEN,
+                                    (__force u32) skb->protocol);
+               } else {
+                       /* Can't derive any sort of hash for the packet, set
+                        * to some consistent random value.
+                        */
+                       hash = udp_flow_hashrnd();
+               }
        }
 
        /* Since this is being sent on the wire obfuscate hash a bit
@@ -229,8 +238,7 @@ int udp_get_port(struct sock *sk, unsigned short snum,
                 int (*saddr_cmp)(const struct sock *,
                                  const struct sock *));
 void udp_err(struct sk_buff *, u32);
-int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
-               size_t len);
+int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len);
 int udp_push_pending_frames(struct sock *sk);
 void udp_flush_pending_frames(struct sock *sk);
 void udp4_hwcsum(struct sk_buff *skb, __be32 src, __be32 dst);
index c73e7abbbaa50e917e65a1117e881bd78665b12a..756e4636bad8a3a8395013db990cf774aa49aa19 100644 (file)
@@ -131,7 +131,7 @@ struct vxlan_sock {
 #define VXLAN_F_GBP                    0x800
 #define VXLAN_F_REMCSUM_NOPARTIAL      0x1000
 
-/* Flags that are used in the receive patch. These flags must match in
+/* Flags that are used in the receive path. These flags must match in
  * order for a socket to be shareable
  */
 #define VXLAN_F_RCV_FLAGS              (VXLAN_F_GBP |                  \
index dc4865e90fe489c4859659b3a937e7c1f8a7a4bb..461f8353949370f7342f83d23513d864ac0adc6f 100644 (file)
@@ -126,9 +126,7 @@ struct xfrm_state_walk {
 
 /* Full description of state of transformer. */
 struct xfrm_state {
-#ifdef CONFIG_NET_NS
-       struct net              *xs_net;
-#endif
+       possible_net_t          xs_net;
        union {
                struct hlist_node       gclist;
                struct hlist_node       bydst;
@@ -522,9 +520,7 @@ struct xfrm_policy_queue {
 };
 
 struct xfrm_policy {
-#ifdef CONFIG_NET_NS
-       struct net              *xp_net;
-#endif
+       possible_net_t          xp_net;
        struct hlist_node       bydst;
        struct hlist_node       byidx;
 
@@ -1029,7 +1025,7 @@ xfrm_addr_any(const xfrm_address_t *addr, unsigned short family)
        case AF_INET:
                return addr->a4 == 0;
        case AF_INET6:
-               return ipv6_addr_any((struct in6_addr *)&addr->a6);
+               return ipv6_addr_any(&addr->in6);
        }
        return 0;
 }
@@ -1242,8 +1238,8 @@ void xfrm_flowi_addr_get(const struct flowi *fl,
                memcpy(&daddr->a4, &fl->u.ip4.daddr, sizeof(daddr->a4));
                break;
        case AF_INET6:
-               *(struct in6_addr *)saddr->a6 = fl->u.ip6.saddr;
-               *(struct in6_addr *)daddr->a6 = fl->u.ip6.daddr;
+               saddr->in6 = fl->u.ip6.saddr;
+               daddr->in6 = fl->u.ip6.daddr;
                break;
        }
 }
index f2902ef7ab75cdf2f81880b95db6fea4486bd461..4dce116bfd80c81ab09aec171817e082723ad74e 100644 (file)
@@ -47,7 +47,8 @@ struct rxrpc_header {
 #define RXRPC_PACKET_TYPE_CHALLENGE    6       /* connection security challenge (SRVR->CLNT) */
 #define RXRPC_PACKET_TYPE_RESPONSE     7       /* connection secutity response (CLNT->SRVR) */
 #define RXRPC_PACKET_TYPE_DEBUG                8       /* debug info request */
-#define RXRPC_N_PACKET_TYPES           9       /* number of packet types (incl type 0) */
+#define RXRPC_PACKET_TYPE_VERSION      13      /* version string request */
+#define RXRPC_N_PACKET_TYPES           14      /* number of packet types (incl type 0) */
 
        uint8_t         flags;          /* packet flags */
 #define RXRPC_CLIENT_INITIATED 0x01            /* signifies a packet generated by a client */
index 45da7ec7d2742235e05b2ea53ab19d80af64794f..23df3e7f8e7d2eee7b196bff4c825d38ab284043 100644 (file)
@@ -118,8 +118,12 @@ enum bpf_map_type {
 enum bpf_prog_type {
        BPF_PROG_TYPE_UNSPEC,
        BPF_PROG_TYPE_SOCKET_FILTER,
+       BPF_PROG_TYPE_SCHED_CLS,
+       BPF_PROG_TYPE_SCHED_ACT,
 };
 
+#define BPF_PSEUDO_MAP_FD      1
+
 /* flags for BPF_MAP_UPDATE_ELEM command */
 #define BPF_ANY                0 /* create new element or update existing */
 #define BPF_NOEXIST    1 /* create new element if it didn't exist */
@@ -162,7 +166,61 @@ enum bpf_func_id {
        BPF_FUNC_map_lookup_elem, /* void *map_lookup_elem(&map, &key) */
        BPF_FUNC_map_update_elem, /* int map_update_elem(&map, &key, &value, flags) */
        BPF_FUNC_map_delete_elem, /* int map_delete_elem(&map, &key) */
+       BPF_FUNC_get_prandom_u32, /* u32 prandom_u32(void) */
+       BPF_FUNC_get_smp_processor_id, /* u32 raw_smp_processor_id(void) */
+
+       /**
+        * skb_store_bytes(skb, offset, from, len, flags) - store bytes into packet
+        * @skb: pointer to skb
+        * @offset: offset within packet from skb->data
+        * @from: pointer where to copy bytes from
+        * @len: number of bytes to store into packet
+        * @flags: bit 0 - if true, recompute skb->csum
+        *         other bits - reserved
+        * Return: 0 on success
+        */
+       BPF_FUNC_skb_store_bytes,
+
+       /**
+        * l3_csum_replace(skb, offset, from, to, flags) - recompute IP checksum
+        * @skb: pointer to skb
+        * @offset: offset within packet where IP checksum is located
+        * @from: old value of header field
+        * @to: new value of header field
+        * @flags: bits 0-3 - size of header field
+        *         other bits - reserved
+        * Return: 0 on success
+        */
+       BPF_FUNC_l3_csum_replace,
+
+       /**
+        * l4_csum_replace(skb, offset, from, to, flags) - recompute TCP/UDP checksum
+        * @skb: pointer to skb
+        * @offset: offset within packet where TCP/UDP checksum is located
+        * @from: old value of header field
+        * @to: new value of header field
+        * @flags: bits 0-3 - size of header field
+        *         bit 4 - is pseudo header
+        *         other bits - reserved
+        * Return: 0 on success
+        */
+       BPF_FUNC_l4_csum_replace,
        __BPF_FUNC_MAX_ID,
 };
 
+/* user accessible mirror of in-kernel sk_buff.
+ * new fields can only be added to the end of this structure
+ */
+struct __sk_buff {
+       __u32 len;
+       __u32 pkt_type;
+       __u32 mark;
+       __u32 queue_mapping;
+       __u32 protocol;
+       __u32 vlan_present;
+       __u32 vlan_tci;
+       __u32 vlan_proto;
+       __u32 priority;
+};
+
 #endif /* _UAPI__LINUX_BPF_H__ */
index 78ec76fd89a6ce4fe70161576ec0c01e5d6156d3..8735f1080385d045fff82660fd53c3a79dd98fc8 100644 (file)
@@ -57,6 +57,7 @@ enum {
        CAN_RAW_LOOPBACK,       /* local loopback (default:on)       */
        CAN_RAW_RECV_OWN_MSGS,  /* receive my own msgs (default:off) */
        CAN_RAW_FD_FRAMES,      /* allow CAN FD frames (default:off) */
+       CAN_RAW_JOIN_FILTERS,   /* all filters must match to trigger */
 };
 
 #endif /* !_UAPI_CAN_RAW_H */
index e711f20dc522ee22c728a338a203b6ce4c27d22a..6497d7933d5be0c97ffa2cf629895ca41230483c 100644 (file)
@@ -78,6 +78,70 @@ struct ieee_maxrate {
        __u64   tc_maxrate[IEEE_8021QAZ_MAX_TCS];
 };
 
+enum dcbnl_cndd_states {
+       DCB_CNDD_RESET = 0,
+       DCB_CNDD_EDGE,
+       DCB_CNDD_INTERIOR,
+       DCB_CNDD_INTERIOR_READY,
+};
+
+/* This structure contains the IEEE 802.1Qau QCN managed object.
+ *
+ *@rpg_enable: enable QCN RP
+ *@rppp_max_rps: maximum number of RPs allowed for this CNPV on this port
+ *@rpg_time_reset: time between rate increases if no CNMs received.
+ *                given in u-seconds
+ *@rpg_byte_reset: transmitted data between rate increases if no CNMs received.
+ *                given in Bytes
+ *@rpg_threshold: The number of times rpByteStage or rpTimeStage can count
+ *                before RP rate control state machine advances states
+ *@rpg_max_rate: the maxinun rate, in Mbits per second,
+ *              at which an RP can transmit
+ *@rpg_ai_rate: The rate, in Mbits per second,
+ *             used to increase rpTargetRate in the RPR_ACTIVE_INCREASE
+ *@rpg_hai_rate: The rate, in Mbits per second,
+ *              used to increase rpTargetRate in the RPR_HYPER_INCREASE state
+ *@rpg_gd: Upon CNM receive, flow rate is limited to (Fb/Gd)*CurrentRate.
+ *        rpgGd is given as log2(Gd), where Gd may only be powers of 2
+ *@rpg_min_dec_fac: The minimum factor by which the current transmit rate
+ *                 can be changed by reception of a CNM.
+ *                 value is given as percentage (1-100)
+ *@rpg_min_rate: The minimum value, in bits per second, for rate to limit
+ *@cndd_state_machine: The state of the congestion notification domain
+ *                    defense state machine, as defined by IEEE 802.3Qau
+ *                    section 32.1.1. In the interior ready state,
+ *                    the QCN capable hardware may add CN-TAG TLV to the
+ *                    outgoing traffic, to specifically identify outgoing
+ *                    flows.
+ */
+
+struct ieee_qcn {
+       __u8 rpg_enable[IEEE_8021QAZ_MAX_TCS];
+       __u32 rppp_max_rps[IEEE_8021QAZ_MAX_TCS];
+       __u32 rpg_time_reset[IEEE_8021QAZ_MAX_TCS];
+       __u32 rpg_byte_reset[IEEE_8021QAZ_MAX_TCS];
+       __u32 rpg_threshold[IEEE_8021QAZ_MAX_TCS];
+       __u32 rpg_max_rate[IEEE_8021QAZ_MAX_TCS];
+       __u32 rpg_ai_rate[IEEE_8021QAZ_MAX_TCS];
+       __u32 rpg_hai_rate[IEEE_8021QAZ_MAX_TCS];
+       __u32 rpg_gd[IEEE_8021QAZ_MAX_TCS];
+       __u32 rpg_min_dec_fac[IEEE_8021QAZ_MAX_TCS];
+       __u32 rpg_min_rate[IEEE_8021QAZ_MAX_TCS];
+       __u32 cndd_state_machine[IEEE_8021QAZ_MAX_TCS];
+};
+
+/* This structure contains the IEEE 802.1Qau QCN statistics.
+ *
+ *@rppp_rp_centiseconds: the number of RP-centiseconds accumulated
+ *                      by RPs at this priority level on this Port
+ *@rppp_created_rps: number of active RPs(flows) that react to CNMs
+ */
+
+struct ieee_qcn_stats {
+       __u64 rppp_rp_centiseconds[IEEE_8021QAZ_MAX_TCS];
+       __u32 rppp_created_rps[IEEE_8021QAZ_MAX_TCS];
+};
+
 /* This structure contains the IEEE 802.1Qaz PFC managed object
  *
  * @pfc_cap: Indicates the number of traffic classes on the local device
@@ -334,6 +398,8 @@ enum ieee_attrs {
        DCB_ATTR_IEEE_PEER_PFC,
        DCB_ATTR_IEEE_PEER_APP,
        DCB_ATTR_IEEE_MAXRATE,
+       DCB_ATTR_IEEE_QCN,
+       DCB_ATTR_IEEE_QCN_STATS,
        __DCB_ATTR_IEEE_MAX
 };
 #define DCB_ATTR_IEEE_MAX (__DCB_ATTR_IEEE_MAX - 1)
index 47785d5ecf17d465add70aae88c536bb4ca92ece..34c7936ca114e726190fde39f80f481186cf3174 100644 (file)
@@ -77,7 +77,8 @@ struct sock_fprog {   /* Required for SO_ATTACH_FILTER. */
 #define SKF_AD_VLAN_TAG_PRESENT 48
 #define SKF_AD_PAY_OFFSET      52
 #define SKF_AD_RANDOM  56
-#define SKF_AD_MAX     60
+#define SKF_AD_VLAN_TPID       60
+#define SKF_AD_MAX     64
 #define SKF_NET_OFF   (-0x100000)
 #define SKF_LL_OFF    (-0x200000)
 
index dea10a87dfd10d961f9f63a527c729db69f613f7..4318ab1635cedfc65a14f969fdb0077e4b984f74 100644 (file)
@@ -50,6 +50,8 @@ enum {
 #define IFA_F_PERMANENT                0x80
 #define IFA_F_MANAGETEMPADDR   0x100
 #define IFA_F_NOPREFIXROUTE    0x200
+#define IFA_F_MCAUTOJOIN       0x400
+#define IFA_F_STABLE_PRIVACY   0x800
 
 struct ifa_cacheinfo {
        __u32   ifa_prefered;
index dfd0bb22e554e7d7ac2cb45e2c4021b901346f38..7ffb18df01caebb7cbe1c06db756a7a9e284a541 100644 (file)
@@ -147,6 +147,7 @@ enum {
        IFLA_CARRIER_CHANGES,
        IFLA_PHYS_SWITCH_ID,
        IFLA_LINK_NETNSID,
+       IFLA_PHYS_PORT_NAME,
        __IFLA_MAX
 };
 
@@ -215,6 +216,7 @@ enum {
 enum in6_addr_gen_mode {
        IN6_ADDR_GEN_MODE_EUI64,
        IN6_ADDR_GEN_MODE_NONE,
+       IN6_ADDR_GEN_MODE_STABLE_PRIVACY,
 };
 
 /* Bridge section */
@@ -224,6 +226,9 @@ enum {
        IFLA_BR_FORWARD_DELAY,
        IFLA_BR_HELLO_TIME,
        IFLA_BR_MAX_AGE,
+       IFLA_BR_AGEING_TIME,
+       IFLA_BR_STP_STATE,
+       IFLA_BR_PRIORITY,
        __IFLA_BR_MAX,
 };
 
@@ -247,6 +252,7 @@ enum {
        IFLA_BRPORT_UNICAST_FLOOD, /* flood unicast traffic */
        IFLA_BRPORT_PROXYARP,   /* proxy ARP */
        IFLA_BRPORT_LEARNING_SYNC, /* mac learning sync from device */
+       IFLA_BRPORT_PROXYARP_WIFI, /* proxy ARP for Wi-Fi */
        __IFLA_BRPORT_MAX
 };
 #define IFLA_BRPORT_MAX (__IFLA_BRPORT_MAX - 1)
index da2d668b8cf1703d167bcea99013385639c14967..053bd102fbe00a0affd7227359e25c7246de9d7e 100644 (file)
@@ -99,6 +99,7 @@ struct tpacket_auxdata {
 #define TP_STATUS_VLAN_VALID           (1 << 4) /* auxdata has valid tp_vlan_tci */
 #define TP_STATUS_BLK_TMO              (1 << 5)
 #define TP_STATUS_VLAN_TPID_VALID      (1 << 6) /* auxdata has valid tp_vlan_tpid */
+#define TP_STATUS_CSUM_VALID           (1 << 7)
 
 /* Tx ring - header status */
 #define TP_STATUS_AVAILABLE          0
index b0a81307985282005ade90ee6da96e3048c405d3..2f62ab2d7bf99bd75b16eef6db74ce8aa233e8ff 100644 (file)
@@ -973,7 +973,8 @@ struct input_keymap_entry {
  */
 #define MT_TOOL_FINGER         0
 #define MT_TOOL_PEN            1
-#define MT_TOOL_MAX            1
+#define MT_TOOL_PALM           2
+#define MT_TOOL_MAX            2
 
 /*
  * Values describing the status of a force-feedback effect
index cabe95d5b4613c78af31f16be630ef03146a7d0b..3199243f20282050fa992de45f9f627a5a72199e 100644 (file)
@@ -358,6 +358,8 @@ enum {
 
        IPVS_SVC_ATTR_PE_NAME,          /* name of ct retriever */
 
+       IPVS_SVC_ATTR_STATS64,          /* nested attribute for service stats */
+
        __IPVS_SVC_ATTR_MAX,
 };
 
@@ -387,6 +389,8 @@ enum {
 
        IPVS_DEST_ATTR_ADDR_FAMILY,     /* Address family of address */
 
+       IPVS_DEST_ATTR_STATS64,         /* nested attribute for dest stats */
+
        __IPVS_DEST_ATTR_MAX,
 };
 
@@ -410,7 +414,8 @@ enum {
 /*
  * Attributes used to describe service or destination entry statistics
  *
- * Used inside nested attributes IPVS_SVC_ATTR_STATS and IPVS_DEST_ATTR_STATS
+ * Used inside nested attributes IPVS_SVC_ATTR_STATS, IPVS_DEST_ATTR_STATS,
+ * IPVS_SVC_ATTR_STATS64 and IPVS_DEST_ATTR_STATS64.
  */
 enum {
        IPVS_STATS_ATTR_UNSPEC = 0,
index 437a6a4b125a1383be23027c7130966d7c862bd5..5efa54ae567ca933a15dc1210b5af2cb569951c6 100644 (file)
@@ -170,6 +170,7 @@ enum {
        DEVCONF_ACCEPT_RA_FROM_LOCAL,
        DEVCONF_USE_OPTIMISTIC,
        DEVCONF_ACCEPT_RA_MTU,
+       DEVCONF_STABLE_SECRET,
        DEVCONF_MAX
 };
 
index 3873a35509aad201f4d5ddb31346077a89641b78..2e35c61bbdd192eb08c8dbe787f179cd62b9aaad 100644 (file)
@@ -126,6 +126,7 @@ enum {
        NDTPA_PROXY_QLEN,               /* u32 */
        NDTPA_LOCKTIME,                 /* u64, msecs */
        NDTPA_QUEUE_LENBYTES,           /* u32 */
+       NDTPA_MCAST_REPROBES,           /* u32 */
        __NDTPA_MAX
 };
 #define NDTPA_MAX (__NDTPA_MAX - 1)
index 832bc46db78bc3498e8ab18b6e7a50ba12c86acd..b9783931503b1f704a2422d85922301929dc2675 100644 (file)
@@ -1,6 +1,7 @@
 #ifndef _LINUX_NF_TABLES_H
 #define _LINUX_NF_TABLES_H
 
+#define NFT_TABLE_MAXNAMELEN   32
 #define NFT_CHAIN_MAXNAMELEN   32
 #define NFT_USERDATA_MAXLEN    256
 
index 68b294e839447ab5a4bfeeaf0b2bebf6fe2ba427..241220c43e861233789420a4012a90f0e2308d28 100644 (file)
  *
  */
 
+/*
+ * This header file defines the userspace API to the wireless stack. Please
+ * be careful not to break things - i.e. don't move anything around or so
+ * unless you can demonstrate that it breaks neither API nor ABI.
+ *
+ * Additions to the API should be accompanied by actual implementations in
+ * an upstream driver, so that example implementations exist in case there
+ * are ever concerns about the precise semantics of the API or changes are
+ * needed, and to ensure that code for dead (no longer implemented) API
+ * can actually be identified and removed.
+ * Nonetheless, semantics should also be documented carefully in this file.
+ */
+
 #include <linux/types.h>
 
 #define NL80211_GENL_NAME "nl80211"
@@ -1684,6 +1697,10 @@ enum nl80211_commands {
  *     If set during scheduled scan start then the new scan req will be
  *     owned by the netlink socket that created it and the scheduled scan will
  *     be stopped when the socket is closed.
+ *     If set during configuration of regulatory indoor operation then the
+ *     regulatory indoor configuration would be owned by the netlink socket
+ *     that configured the indoor setting, and the indoor operation would be
+ *     cleared when the socket is closed.
  *
  * @NL80211_ATTR_TDLS_INITIATOR: flag attribute indicating the current end is
  *     the TDLS link initiator.
@@ -1737,8 +1754,12 @@ enum nl80211_commands {
  *     should be contained in the result as the sum of the respective counters
  *     over all channels.
  *
- * @NL80211_ATTR_SCHED_SCAN_DELAY: delay before a scheduled scan (or a
- *     WoWLAN net-detect scan) is started, u32 in seconds.
+ * @NL80211_ATTR_SCHED_SCAN_DELAY: delay before the first cycle of a
+ *     scheduled scan (or a WoWLAN net-detect scan) is started, u32
+ *     in seconds.
+
+ * @NL80211_ATTR_REG_INDOOR: flag attribute, if set indicates that the device
+ *      is operating in an indoor environment.
  *
  * @NUM_NL80211_ATTR: total number of nl80211_attrs available
  * @NL80211_ATTR_MAX: highest attribute number currently defined
@@ -2107,6 +2128,8 @@ enum nl80211_attrs {
 
        NL80211_ATTR_SCHED_SCAN_DELAY,
 
+       NL80211_ATTR_REG_INDOOR,
+
        /* add attributes here, update the policy in nl80211.c */
 
        __NL80211_ATTR_AFTER_LAST,
@@ -3092,7 +3115,8 @@ enum nl80211_mesh_power_mode {
  *
  * @NL80211_MESHCONF_PLINK_TIMEOUT: If no tx activity is seen from a STA we've
  *     established peering with for longer than this time (in seconds), then
- *     remove it from the STA's list of peers.  Default is 30 minutes.
+ *     remove it from the STA's list of peers. You may set this to 0 to disable
+ *     the removal of the STA. Default is 30 minutes.
  *
  * @__NL80211_MESHCONF_ATTR_AFTER_LAST: internal use
  */
@@ -3694,6 +3718,8 @@ struct nl80211_pattern_support {
  * @NL80211_WOWLAN_TRIG_ANY: wake up on any activity, do not really put
  *     the chip into a special state -- works best with chips that have
  *     support for low-power operation already (flag)
+ *     Note that this mode is incompatible with all of the others, if
+ *     any others are even supported by the device.
  * @NL80211_WOWLAN_TRIG_DISCONNECT: wake up on disconnect, the way disconnect
  *     is detected is implementation-specific (flag)
  * @NL80211_WOWLAN_TRIG_MAGIC_PKT: wake up on magic packet (6x 0xff, followed
@@ -4327,11 +4353,13 @@ enum nl80211_feature_flags {
 
 /**
  * enum nl80211_ext_feature_index - bit index of extended features.
+ * @NL80211_EXT_FEATURE_VHT_IBSS: This driver supports IBSS with VHT datarates.
  *
  * @NUM_NL80211_EXT_FEATURES: number of extended features.
  * @MAX_NL80211_EXT_FEATURES: highest extended feature index.
  */
 enum nl80211_ext_feature_index {
+       NL80211_EXT_FEATURE_VHT_IBSS,
 
        /* add new features before the definition below */
        NUM_NL80211_EXT_FEATURES,
index 25731dfb3fcc2f331cbba3c8f415c75614c1f46d..bf08e76bf50525912360edbfe09b1e220f683e94 100644 (file)
@@ -397,6 +397,8 @@ enum {
        TCA_BPF_CLASSID,
        TCA_BPF_OPS_LEN,
        TCA_BPF_OPS,
+       TCA_BPF_FD,
+       TCA_BPF_NAME,
        __TCA_BPF_MAX,
 };
 
index 5cc5d66bf519f65cb4b29e041c3b06fdbe01c889..bea910f924dde6b125568e83e737801e721778c0 100644 (file)
@@ -303,6 +303,9 @@ enum rtattr_type_t {
        RTA_TABLE,
        RTA_MARK,
        RTA_MFC_STATS,
+       RTA_VIA,
+       RTA_NEWDST,
+       RTA_PREF,
        __RTA_MAX
 };
 
@@ -332,6 +335,7 @@ struct rtnexthop {
 #define RTNH_F_DEAD            1       /* Nexthop is dead (used by multipath)  */
 #define RTNH_F_PERVASIVE       2       /* Do recursive gateway lookup  */
 #define RTNH_F_ONLINK          4       /* Gateway is forced on link    */
+#define RTNH_F_EXTERNAL                8       /* Route installed externally   */
 
 /* Macros to handle hexthops */
 
@@ -344,6 +348,12 @@ struct rtnexthop {
 #define RTNH_SPACE(len)        RTNH_ALIGN(RTNH_LENGTH(len))
 #define RTNH_DATA(rtnh)   ((struct rtattr*)(((char*)(rtnh)) + RTNH_LENGTH(0)))
 
+/* RTA_VIA */
+struct rtvia {
+       __kernel_sa_family_t    rtvia_family;
+       __u8                    rtvia_addr[0];
+};
+
 /* RTM_CACHEINFO */
 
 struct rta_cacheinfo {
@@ -623,6 +633,8 @@ enum rtnetlink_groups {
 #define RTNLGRP_IPV6_NETCONF   RTNLGRP_IPV6_NETCONF
        RTNLGRP_MDB,
 #define RTNLGRP_MDB            RTNLGRP_MDB
+       RTNLGRP_MPLS_ROUTE,
+#define RTNLGRP_MPLS_ROUTE     RTNLGRP_MPLS_ROUTE
        __RTNLGRP_MAX
 };
 #define RTNLGRP_MAX    (__RTNLGRP_MAX - 1)
index 5288bd77e63bbfd734e8454aee633a3340ca636f..07f17cc70bb3ee2f8ca7667221679b67f09c3cc6 100644 (file)
@@ -24,6 +24,8 @@ enum {
        TCA_ACT_BPF_PARMS,
        TCA_ACT_BPF_OPS_LEN,
        TCA_ACT_BPF_OPS,
+       TCA_ACT_BPF_FD,
+       TCA_ACT_BPF_NAME,
        __TCA_ACT_BPF_MAX,
 };
 #define TCA_ACT_BPF_MAX (__TCA_ACT_BPF_MAX - 1)
index 8d723824ad6934825d34077431fc43d11ae4a718..d4c8f142ba633d8e96d5a227ce1d91dc79c70235 100644 (file)
@@ -83,11 +83,20 @@ enum {
        TIPC_NLA_BEARER_NAME,           /* string */
        TIPC_NLA_BEARER_PROP,           /* nest */
        TIPC_NLA_BEARER_DOMAIN,         /* u32 */
+       TIPC_NLA_BEARER_UDP_OPTS,       /* nest */
 
        __TIPC_NLA_BEARER_MAX,
        TIPC_NLA_BEARER_MAX = __TIPC_NLA_BEARER_MAX - 1
 };
 
+enum {
+       TIPC_NLA_UDP_UNSPEC,
+       TIPC_NLA_UDP_LOCAL,             /* sockaddr_storage */
+       TIPC_NLA_UDP_REMOTE,            /* sockaddr_storage */
+
+       __TIPC_NLA_UDP_MAX,
+       TIPC_NLA_UDP_MAX = __TIPC_NLA_UDP_MAX - 1
+};
 /* Socket info */
 enum {
        TIPC_NLA_SOCK_UNSPEC,
index 02d5125a5ee8b285032ef3b066d2c48efca7c95a..2cd9e608d0d17d88985c3e32774e567da3236bb5 100644 (file)
@@ -1,6 +1,7 @@
 #ifndef _LINUX_XFRM_H
 #define _LINUX_XFRM_H
 
+#include <linux/in6.h>
 #include <linux/types.h>
 
 /* All of the structures in this file may not change size as they are
@@ -13,6 +14,7 @@
 typedef union {
        __be32          a4;
        __be32          a6[4];
+       struct in6_addr in6;
 } xfrm_address_t;
 
 /* Ident of a specific xfrm_state. It is used on input to lookup
index e6c10d1a405821d55b7595a2cd5e3404766d3d42..74963d192c5d96ce3674f0adbf554c95af4867ec 100644 (file)
@@ -213,7 +213,7 @@ static int acct_on(struct filename *pathname)
                return -EACCES;
        }
 
-       if (!file->f_op->write) {
+       if (!(file->f_mode & FMODE_CAN_WRITE)) {
                kfree(acct);
                filp_close(file, NULL);
                return -EIO;
index a5ae60f0b0a2d33a48cf7d5b2585e5d048f931f6..e6983be12bd373dfc68eab347f9c6e764fbb6ce3 100644 (file)
@@ -1,5 +1,2 @@
 obj-y := core.o
 obj-$(CONFIG_BPF_SYSCALL) += syscall.o verifier.o hashtab.o arraymap.o helpers.o
-ifdef CONFIG_TEST_BPF
-obj-$(CONFIG_BPF_SYSCALL) += test_stub.o
-endif
index 9eb4d8a7cd870b513d9d19e2707bc2950e91f744..8a6616583f38adce1e90ecc19b3bce92e01d2f83 100644 (file)
@@ -134,7 +134,7 @@ static void array_map_free(struct bpf_map *map)
        kvfree(array);
 }
 
-static struct bpf_map_ops array_ops = {
+static const struct bpf_map_ops array_ops = {
        .map_alloc = array_map_alloc,
        .map_free = array_map_free,
        .map_get_next_key = array_map_get_next_key,
@@ -143,14 +143,14 @@ static struct bpf_map_ops array_ops = {
        .map_delete_elem = array_map_delete_elem,
 };
 
-static struct bpf_map_type_list tl = {
+static struct bpf_map_type_list array_type __read_mostly = {
        .ops = &array_ops,
        .type = BPF_MAP_TYPE_ARRAY,
 };
 
 static int __init register_array_map(void)
 {
-       bpf_register_map_type(&tl);
+       bpf_register_map_type(&array_type);
        return 0;
 }
 late_initcall(register_array_map);
index a64e7a207d2b5cd123b65f7143c6d659c0aed726..4139a0f8b558e4e7db4f79adbd353c97c7fda553 100644 (file)
@@ -656,6 +656,14 @@ void bpf_prog_free(struct bpf_prog *fp)
 }
 EXPORT_SYMBOL_GPL(bpf_prog_free);
 
+/* Weak definitions of helper functions in case we don't have bpf syscall. */
+const struct bpf_func_proto bpf_map_lookup_elem_proto __weak;
+const struct bpf_func_proto bpf_map_update_elem_proto __weak;
+const struct bpf_func_proto bpf_map_delete_elem_proto __weak;
+
+const struct bpf_func_proto bpf_get_prandom_u32_proto __weak;
+const struct bpf_func_proto bpf_get_smp_processor_id_proto __weak;
+
 /* To execute LD_ABS/LD_IND instructions __bpf_prog_run() may call
  * skb_copy_bits(), so provide a weak definition of it for NET-less config.
  */
index b3ba43674310145585d8a1441030a50ebac48252..83c209d9b17ae6b23eb16d0fd0d94b873897ffba 100644 (file)
@@ -345,7 +345,7 @@ static void htab_map_free(struct bpf_map *map)
        kfree(htab);
 }
 
-static struct bpf_map_ops htab_ops = {
+static const struct bpf_map_ops htab_ops = {
        .map_alloc = htab_map_alloc,
        .map_free = htab_map_free,
        .map_get_next_key = htab_map_get_next_key,
@@ -354,14 +354,14 @@ static struct bpf_map_ops htab_ops = {
        .map_delete_elem = htab_map_delete_elem,
 };
 
-static struct bpf_map_type_list tl = {
+static struct bpf_map_type_list htab_type __read_mostly = {
        .ops = &htab_ops,
        .type = BPF_MAP_TYPE_HASH,
 };
 
 static int __init register_htab_map(void)
 {
-       bpf_register_map_type(&tl);
+       bpf_register_map_type(&htab_type);
        return 0;
 }
 late_initcall(register_htab_map);
index 9e3414d854593f922fe6cc0021c07727c5b3daa0..bd7f5988ed9cca0f4973388f68741444b0cbfcb8 100644 (file)
@@ -11,6 +11,8 @@
  */
 #include <linux/bpf.h>
 #include <linux/rcupdate.h>
+#include <linux/random.h>
+#include <linux/smp.h>
 
 /* If kernel subsystem is allowing eBPF programs to call this function,
  * inside its own verifier_ops->get_func_proto() callback it should return
@@ -41,7 +43,7 @@ static u64 bpf_map_lookup_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
        return (unsigned long) value;
 }
 
-struct bpf_func_proto bpf_map_lookup_elem_proto = {
+const struct bpf_func_proto bpf_map_lookup_elem_proto = {
        .func = bpf_map_lookup_elem,
        .gpl_only = false,
        .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
@@ -60,7 +62,7 @@ static u64 bpf_map_update_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
        return map->ops->map_update_elem(map, key, value, r4);
 }
 
-struct bpf_func_proto bpf_map_update_elem_proto = {
+const struct bpf_func_proto bpf_map_update_elem_proto = {
        .func = bpf_map_update_elem,
        .gpl_only = false,
        .ret_type = RET_INTEGER,
@@ -80,10 +82,32 @@ static u64 bpf_map_delete_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
        return map->ops->map_delete_elem(map, key);
 }
 
-struct bpf_func_proto bpf_map_delete_elem_proto = {
+const struct bpf_func_proto bpf_map_delete_elem_proto = {
        .func = bpf_map_delete_elem,
        .gpl_only = false,
        .ret_type = RET_INTEGER,
        .arg1_type = ARG_CONST_MAP_PTR,
        .arg2_type = ARG_PTR_TO_MAP_KEY,
 };
+
+static u64 bpf_get_prandom_u32(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
+{
+       return prandom_u32();
+}
+
+const struct bpf_func_proto bpf_get_prandom_u32_proto = {
+       .func           = bpf_get_prandom_u32,
+       .gpl_only       = false,
+       .ret_type       = RET_INTEGER,
+};
+
+static u64 bpf_get_smp_processor_id(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
+{
+       return raw_smp_processor_id();
+}
+
+const struct bpf_func_proto bpf_get_smp_processor_id_proto = {
+       .func           = bpf_get_smp_processor_id,
+       .gpl_only       = false,
+       .ret_type       = RET_INTEGER,
+};
index 536edc2be3072e91ab132555fc4f9bc3ce656604..ea75c654af1b0011ecf3efdd481822ce9f073a63 100644 (file)
@@ -354,10 +354,11 @@ static int find_prog_type(enum bpf_prog_type type, struct bpf_prog *prog)
        list_for_each_entry(tl, &bpf_prog_types, list_node) {
                if (tl->type == type) {
                        prog->aux->ops = tl->ops;
-                       prog->aux->prog_type = type;
+                       prog->type = type;
                        return 0;
                }
        }
+
        return -EINVAL;
 }
 
@@ -418,6 +419,7 @@ void bpf_prog_put(struct bpf_prog *prog)
                bpf_prog_free(prog);
        }
 }
+EXPORT_SYMBOL_GPL(bpf_prog_put);
 
 static int bpf_prog_release(struct inode *inode, struct file *filp)
 {
@@ -465,6 +467,7 @@ struct bpf_prog *bpf_prog_get(u32 ufd)
        fdput(f);
        return prog;
 }
+EXPORT_SYMBOL_GPL(bpf_prog_get);
 
 /* last field in 'union bpf_attr' used by this command */
 #define        BPF_PROG_LOAD_LAST_FIELD log_buf
@@ -508,7 +511,7 @@ static int bpf_prog_load(union bpf_attr *attr)
        prog->jited = false;
 
        atomic_set(&prog->aux->refcnt, 1);
-       prog->aux->is_gpl_compatible = is_gpl;
+       prog->gpl_compatible = is_gpl;
 
        /* find program type: socket_filter vs tracing_filter */
        err = find_prog_type(type, prog);
@@ -516,8 +519,7 @@ static int bpf_prog_load(union bpf_attr *attr)
                goto free_prog;
 
        /* run eBPF verifier */
-       err = bpf_check(prog, attr);
-
+       err = bpf_check(&prog, attr);
        if (err < 0)
                goto free_used_maps;
 
@@ -528,7 +530,6 @@ static int bpf_prog_load(union bpf_attr *attr)
        bpf_prog_select_runtime(prog);
 
        err = anon_inode_getfd("bpf-prog", &bpf_prog_fops, prog, O_RDWR | O_CLOEXEC);
-
        if (err < 0)
                /* failed to allocate fd */
                goto free_used_maps;
diff --git a/kernel/bpf/test_stub.c b/kernel/bpf/test_stub.c
deleted file mode 100644 (file)
index 0ceae1e..0000000
+++ /dev/null
@@ -1,78 +0,0 @@
-/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
- */
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/slab.h>
-#include <linux/err.h>
-#include <linux/bpf.h>
-
-/* test stubs for BPF_MAP_TYPE_UNSPEC and for BPF_PROG_TYPE_UNSPEC
- * to be used by user space verifier testsuite
- */
-struct bpf_context {
-       u64 arg1;
-       u64 arg2;
-};
-
-static const struct bpf_func_proto *test_func_proto(enum bpf_func_id func_id)
-{
-       switch (func_id) {
-       case BPF_FUNC_map_lookup_elem:
-               return &bpf_map_lookup_elem_proto;
-       case BPF_FUNC_map_update_elem:
-               return &bpf_map_update_elem_proto;
-       case BPF_FUNC_map_delete_elem:
-               return &bpf_map_delete_elem_proto;
-       default:
-               return NULL;
-       }
-}
-
-static const struct bpf_context_access {
-       int size;
-       enum bpf_access_type type;
-} test_ctx_access[] = {
-       [offsetof(struct bpf_context, arg1)] = {
-               FIELD_SIZEOF(struct bpf_context, arg1),
-               BPF_READ
-       },
-       [offsetof(struct bpf_context, arg2)] = {
-               FIELD_SIZEOF(struct bpf_context, arg2),
-               BPF_READ
-       },
-};
-
-static bool test_is_valid_access(int off, int size, enum bpf_access_type type)
-{
-       const struct bpf_context_access *access;
-
-       if (off < 0 || off >= ARRAY_SIZE(test_ctx_access))
-               return false;
-
-       access = &test_ctx_access[off];
-       if (access->size == size && (access->type & type))
-               return true;
-
-       return false;
-}
-
-static struct bpf_verifier_ops test_ops = {
-       .get_func_proto = test_func_proto,
-       .is_valid_access = test_is_valid_access,
-};
-
-static struct bpf_prog_type_list tl_prog = {
-       .ops = &test_ops,
-       .type = BPF_PROG_TYPE_UNSPEC,
-};
-
-static int __init register_test_ops(void)
-{
-       bpf_register_prog_type(&tl_prog);
-       return 0;
-}
-late_initcall(register_test_ops);
index a28e09c7825d76d5ab10530a88fa183994f48d1e..630a7bac1e513e330785283e38aa1dfbe9eb5c50 100644 (file)
@@ -755,7 +755,7 @@ static int check_func_arg(struct verifier_env *env, u32 regno,
        enum bpf_reg_type expected_type;
        int err = 0;
 
-       if (arg_type == ARG_ANYTHING)
+       if (arg_type == ARG_DONTCARE)
                return 0;
 
        if (reg->type == NOT_INIT) {
@@ -763,6 +763,9 @@ static int check_func_arg(struct verifier_env *env, u32 regno,
                return -EACCES;
        }
 
+       if (arg_type == ARG_ANYTHING)
+               return 0;
+
        if (arg_type == ARG_PTR_TO_STACK || arg_type == ARG_PTR_TO_MAP_KEY ||
            arg_type == ARG_PTR_TO_MAP_VALUE) {
                expected_type = PTR_TO_STACK;
@@ -770,6 +773,8 @@ static int check_func_arg(struct verifier_env *env, u32 regno,
                expected_type = CONST_IMM;
        } else if (arg_type == ARG_CONST_MAP_PTR) {
                expected_type = CONST_PTR_TO_MAP;
+       } else if (arg_type == ARG_PTR_TO_CTX) {
+               expected_type = PTR_TO_CTX;
        } else {
                verbose("unsupported arg_type %d\n", arg_type);
                return -EFAULT;
@@ -852,7 +857,7 @@ static int check_call(struct verifier_env *env, int func_id)
        }
 
        /* eBPF programs must be GPL compatible to use GPL-ed functions */
-       if (!env->prog->aux->is_gpl_compatible && fn->gpl_only) {
+       if (!env->prog->gpl_compatible && fn->gpl_only) {
                verbose("cannot call GPL only function from proprietary program\n");
                return -EINVAL;
        }
@@ -1172,6 +1177,18 @@ static int check_ld_imm(struct verifier_env *env, struct bpf_insn *insn)
        return 0;
 }
 
+static bool may_access_skb(enum bpf_prog_type type)
+{
+       switch (type) {
+       case BPF_PROG_TYPE_SOCKET_FILTER:
+       case BPF_PROG_TYPE_SCHED_CLS:
+       case BPF_PROG_TYPE_SCHED_ACT:
+               return true;
+       default:
+               return false;
+       }
+}
+
 /* verify safety of LD_ABS|LD_IND instructions:
  * - they can only appear in the programs where ctx == skb
  * - since they are wrappers of function calls, they scratch R1-R5 registers,
@@ -1194,8 +1211,8 @@ static int check_ld_abs(struct verifier_env *env, struct bpf_insn *insn)
        struct reg_state *reg;
        int i, err;
 
-       if (env->prog->aux->prog_type != BPF_PROG_TYPE_SOCKET_FILTER) {
-               verbose("BPF_LD_ABS|IND instructions are only allowed in socket filters\n");
+       if (!may_access_skb(env->prog->type)) {
+               verbose("BPF_LD_ABS|IND instructions not allowed for this program type\n");
                return -EINVAL;
        }
 
@@ -1606,11 +1623,10 @@ static int do_check(struct verifier_env *env)
                                return err;
 
                } else if (class == BPF_LDX) {
-                       if (BPF_MODE(insn->code) != BPF_MEM ||
-                           insn->imm != 0) {
-                               verbose("BPF_LDX uses reserved fields\n");
-                               return -EINVAL;
-                       }
+                       enum bpf_reg_type src_reg_type;
+
+                       /* check for reserved fields is already done */
+
                        /* check src operand */
                        err = check_reg_arg(regs, insn->src_reg, SRC_OP);
                        if (err)
@@ -1629,6 +1645,29 @@ static int do_check(struct verifier_env *env)
                        if (err)
                                return err;
 
+                       src_reg_type = regs[insn->src_reg].type;
+
+                       if (insn->imm == 0 && BPF_SIZE(insn->code) == BPF_W) {
+                               /* saw a valid insn
+                                * dst_reg = *(u32 *)(src_reg + off)
+                                * use reserved 'imm' field to mark this insn
+                                */
+                               insn->imm = src_reg_type;
+
+                       } else if (src_reg_type != insn->imm &&
+                                  (src_reg_type == PTR_TO_CTX ||
+                                   insn->imm == PTR_TO_CTX)) {
+                               /* ABuser program is trying to use the same insn
+                                * dst_reg = *(u32*) (src_reg + off)
+                                * with different pointer types:
+                                * src_reg == ctx in one branch and
+                                * src_reg == stack|map in some other branch.
+                                * Reject it.
+                                */
+                               verbose("same insn cannot be used with different pointers\n");
+                               return -EINVAL;
+                       }
+
                } else if (class == BPF_STX) {
                        if (BPF_MODE(insn->code) == BPF_XADD) {
                                err = check_xadd(env, insn);
@@ -1776,6 +1815,13 @@ static int replace_map_fd_with_map_ptr(struct verifier_env *env)
        int i, j;
 
        for (i = 0; i < insn_cnt; i++, insn++) {
+               if (BPF_CLASS(insn->code) == BPF_LDX &&
+                   (BPF_MODE(insn->code) != BPF_MEM ||
+                    insn->imm != 0)) {
+                       verbose("BPF_LDX uses reserved fields\n");
+                       return -EINVAL;
+               }
+
                if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW)) {
                        struct bpf_map *map;
                        struct fd f;
@@ -1867,6 +1913,92 @@ static void convert_pseudo_ld_imm64(struct verifier_env *env)
                        insn->src_reg = 0;
 }
 
+static void adjust_branches(struct bpf_prog *prog, int pos, int delta)
+{
+       struct bpf_insn *insn = prog->insnsi;
+       int insn_cnt = prog->len;
+       int i;
+
+       for (i = 0; i < insn_cnt; i++, insn++) {
+               if (BPF_CLASS(insn->code) != BPF_JMP ||
+                   BPF_OP(insn->code) == BPF_CALL ||
+                   BPF_OP(insn->code) == BPF_EXIT)
+                       continue;
+
+               /* adjust offset of jmps if necessary */
+               if (i < pos && i + insn->off + 1 > pos)
+                       insn->off += delta;
+               else if (i > pos && i + insn->off + 1 < pos)
+                       insn->off -= delta;
+       }
+}
+
+/* convert load instructions that access fields of 'struct __sk_buff'
+ * into sequence of instructions that access fields of 'struct sk_buff'
+ */
+static int convert_ctx_accesses(struct verifier_env *env)
+{
+       struct bpf_insn *insn = env->prog->insnsi;
+       int insn_cnt = env->prog->len;
+       struct bpf_insn insn_buf[16];
+       struct bpf_prog *new_prog;
+       u32 cnt;
+       int i;
+
+       if (!env->prog->aux->ops->convert_ctx_access)
+               return 0;
+
+       for (i = 0; i < insn_cnt; i++, insn++) {
+               if (insn->code != (BPF_LDX | BPF_MEM | BPF_W))
+                       continue;
+
+               if (insn->imm != PTR_TO_CTX) {
+                       /* clear internal mark */
+                       insn->imm = 0;
+                       continue;
+               }
+
+               cnt = env->prog->aux->ops->
+                       convert_ctx_access(insn->dst_reg, insn->src_reg,
+                                          insn->off, insn_buf);
+               if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf)) {
+                       verbose("bpf verifier is misconfigured\n");
+                       return -EINVAL;
+               }
+
+               if (cnt == 1) {
+                       memcpy(insn, insn_buf, sizeof(*insn));
+                       continue;
+               }
+
+               /* several new insns need to be inserted. Make room for them */
+               insn_cnt += cnt - 1;
+               new_prog = bpf_prog_realloc(env->prog,
+                                           bpf_prog_size(insn_cnt),
+                                           GFP_USER);
+               if (!new_prog)
+                       return -ENOMEM;
+
+               new_prog->len = insn_cnt;
+
+               memmove(new_prog->insnsi + i + cnt, new_prog->insns + i + 1,
+                       sizeof(*insn) * (insn_cnt - i - cnt));
+
+               /* copy substitute insns in place of load instruction */
+               memcpy(new_prog->insnsi + i, insn_buf, sizeof(*insn) * cnt);
+
+               /* adjust branches in the whole program */
+               adjust_branches(new_prog, i, cnt - 1);
+
+               /* keep walking new program and skip insns we just inserted */
+               env->prog = new_prog;
+               insn = new_prog->insnsi + i + cnt - 1;
+               i += cnt - 1;
+       }
+
+       return 0;
+}
+
 static void free_states(struct verifier_env *env)
 {
        struct verifier_state_list *sl, *sln;
@@ -1889,13 +2021,13 @@ static void free_states(struct verifier_env *env)
        kfree(env->explored_states);
 }
 
-int bpf_check(struct bpf_prog *prog, union bpf_attr *attr)
+int bpf_check(struct bpf_prog **prog, union bpf_attr *attr)
 {
        char __user *log_ubuf = NULL;
        struct verifier_env *env;
        int ret = -EINVAL;
 
-       if (prog->len <= 0 || prog->len > BPF_MAXINSNS)
+       if ((*prog)->len <= 0 || (*prog)->len > BPF_MAXINSNS)
                return -E2BIG;
 
        /* 'struct verifier_env' can be global, but since it's not small,
@@ -1905,7 +2037,7 @@ int bpf_check(struct bpf_prog *prog, union bpf_attr *attr)
        if (!env)
                return -ENOMEM;
 
-       env->prog = prog;
+       env->prog = *prog;
 
        /* grab the mutex to protect few globals used by verifier */
        mutex_lock(&bpf_verifier_lock);
@@ -1937,7 +2069,7 @@ int bpf_check(struct bpf_prog *prog, union bpf_attr *attr)
        if (ret < 0)
                goto skip_full_check;
 
-       env->explored_states = kcalloc(prog->len,
+       env->explored_states = kcalloc(env->prog->len,
                                       sizeof(struct verifier_state_list *),
                                       GFP_USER);
        ret = -ENOMEM;
@@ -1954,6 +2086,10 @@ skip_full_check:
        while (pop_stack(env, NULL) >= 0);
        free_states(env);
 
+       if (ret == 0)
+               /* program is valid, convert *(u32*)(ctx + off) accesses */
+               ret = convert_ctx_accesses(env);
+
        if (log_level && log_len >= log_size - 1) {
                BUG_ON(log_len >= log_size);
                /* verifier log exceeded user supplied buffer */
@@ -1969,18 +2105,18 @@ skip_full_check:
 
        if (ret == 0 && env->used_map_cnt) {
                /* if program passed verifier, update used_maps in bpf_prog_info */
-               prog->aux->used_maps = kmalloc_array(env->used_map_cnt,
-                                                    sizeof(env->used_maps[0]),
-                                                    GFP_KERNEL);
+               env->prog->aux->used_maps = kmalloc_array(env->used_map_cnt,
+                                                         sizeof(env->used_maps[0]),
+                                                         GFP_KERNEL);
 
-               if (!prog->aux->used_maps) {
+               if (!env->prog->aux->used_maps) {
                        ret = -ENOMEM;
                        goto free_log_buf;
                }
 
-               memcpy(prog->aux->used_maps, env->used_maps,
+               memcpy(env->prog->aux->used_maps, env->used_maps,
                       sizeof(env->used_maps[0]) * env->used_map_cnt);
-               prog->aux->used_map_cnt = env->used_map_cnt;
+               env->prog->aux->used_map_cnt = env->used_map_cnt;
 
                /* program is valid. Convert pseudo bpf_ld_imm64 into generic
                 * bpf_ld_imm64 instructions
@@ -1992,11 +2128,12 @@ free_log_buf:
        if (log_level)
                vfree(log_buf);
 free_env:
-       if (!prog->aux->used_maps)
+       if (!env->prog->aux->used_maps)
                /* if we didn't copy map pointers into bpf_prog_info, release
                 * them now. Otherwise free_bpf_prog_info() will release them.
                 */
                release_maps(env);
+       *prog = env->prog;
        kfree(env);
        mutex_unlock(&bpf_verifier_lock);
        return ret;
index bb0635bd74f26a2ecb9f651de9e0c4113e4f2476..879edfc5ee52d2985d4fb925ec820ba6b113d6d0 100644 (file)
@@ -32,7 +32,6 @@
 #include <linux/security.h>
 #include <linux/bootmem.h>
 #include <linux/memblock.h>
-#include <linux/aio.h>
 #include <linux/syscalls.h>
 #include <linux/kexec.h>
 #include <linux/kdb.h>
@@ -46,6 +45,7 @@
 #include <linux/irq_work.h>
 #include <linux/utsname.h>
 #include <linux/ctype.h>
+#include <linux/uio.h>
 
 #include <asm/uaccess.h>
 
@@ -521,7 +521,7 @@ static ssize_t devkmsg_write(struct kiocb *iocb, struct iov_iter *from)
        int i;
        int level = default_message_loglevel;
        int facility = 1;       /* LOG_USER */
-       size_t len = iocb->ki_nbytes;
+       size_t len = iov_iter_count(from);
        ssize_t ret = len;
 
        if (len > LOG_LINE_MAX)
index ce410bb9f2e103e0fcfda7d7b844948a0a28fbce..4012336de30f6fe88688bc0366bba216af72ea9d 100644 (file)
@@ -19,6 +19,7 @@
  */
 
 #include <linux/module.h>
+#include <linux/aio.h>
 #include <linux/mm.h>
 #include <linux/swap.h>
 #include <linux/slab.h>
index 9d96e283520cc7f3ec27714dfa4abfcb3800e319..75232ad0a5e7ead00e5d8396ed34763d84a0685c 100644 (file)
@@ -317,6 +317,32 @@ int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes)
 }
 EXPORT_SYMBOL(iov_iter_fault_in_readable);
 
+/*
+ * Fault in one or more iovecs of the given iov_iter, to a maximum length of
+ * bytes.  For each iovec, fault in each page that constitutes the iovec.
+ *
+ * Return 0 on success, or non-zero if the memory could not be accessed (i.e.
+ * because it is an invalid address).
+ */
+int iov_iter_fault_in_multipages_readable(struct iov_iter *i, size_t bytes)
+{
+       size_t skip = i->iov_offset;
+       const struct iovec *iov;
+       int err;
+       struct iovec v;
+
+       if (!(i->type & (ITER_BVEC|ITER_KVEC))) {
+               iterate_iovec(i, bytes, v, iov, skip, ({
+                       err = fault_in_multipages_readable(v.iov_base,
+                                       v.iov_len);
+                       if (unlikely(err))
+                       return err;
+               0;}))
+       }
+       return 0;
+}
+EXPORT_SYMBOL(iov_iter_fault_in_multipages_readable);
+
 void iov_iter_init(struct iov_iter *i, int direction,
                        const struct iovec *iov, unsigned long nr_segs,
                        size_t count)
@@ -766,3 +792,60 @@ const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags)
                                   flags);
 }
 EXPORT_SYMBOL(dup_iter);
+
+int import_iovec(int type, const struct iovec __user * uvector,
+                unsigned nr_segs, unsigned fast_segs,
+                struct iovec **iov, struct iov_iter *i)
+{
+       ssize_t n;
+       struct iovec *p;
+       n = rw_copy_check_uvector(type, uvector, nr_segs, fast_segs,
+                                 *iov, &p);
+       if (n < 0) {
+               if (p != *iov)
+                       kfree(p);
+               *iov = NULL;
+               return n;
+       }
+       iov_iter_init(i, type, p, nr_segs, n);
+       *iov = p == *iov ? NULL : p;
+       return 0;
+}
+EXPORT_SYMBOL(import_iovec);
+
+#ifdef CONFIG_COMPAT
+#include <linux/compat.h>
+
+int compat_import_iovec(int type, const struct compat_iovec __user * uvector,
+                unsigned nr_segs, unsigned fast_segs,
+                struct iovec **iov, struct iov_iter *i)
+{
+       ssize_t n;
+       struct iovec *p;
+       n = compat_rw_copy_check_uvector(type, uvector, nr_segs, fast_segs,
+                                 *iov, &p);
+       if (n < 0) {
+               if (p != *iov)
+                       kfree(p);
+               *iov = NULL;
+               return n;
+       }
+       iov_iter_init(i, type, p, nr_segs, n);
+       *iov = p == *iov ? NULL : p;
+       return 0;
+}
+#endif
+
+int import_single_range(int rw, void __user *buf, size_t len,
+                struct iovec *iov, struct iov_iter *i)
+{
+       if (len > MAX_RW_COUNT)
+               len = MAX_RW_COUNT;
+       if (unlikely(!access_ok(!rw, buf, len)))
+               return -EFAULT;
+
+       iov->iov_base = buf;
+       iov->iov_len = len;
+       iov_iter_init(i, rw, iov, 1, len);
+       return 0;
+}
index e97dbd51e7569f6a7ba273227752f2cdbcaebb49..03d7fcb420b5d60c564ad10935011ed8a6556b69 100644 (file)
--- a/lib/lcm.c
+++ b/lib/lcm.c
@@ -12,3 +12,14 @@ unsigned long lcm(unsigned long a, unsigned long b)
                return 0;
 }
 EXPORT_SYMBOL_GPL(lcm);
+
+unsigned long lcm_not_zero(unsigned long a, unsigned long b)
+{
+       unsigned long l = lcm(a, b);
+
+       if (l)
+               return l;
+
+       return (b ? : a);
+}
+EXPORT_SYMBOL_GPL(lcm_not_zero);
index b5344ef4c6846c4f9256c1d0d418f774284c8fcc..4898442b837fbd715f8d5079a1060e43c4b7a367 100644 (file)
@@ -1,13 +1,13 @@
 /*
  * Resizable, Scalable, Concurrent Hash Table
  *
+ * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au>
  * Copyright (c) 2014-2015 Thomas Graf <tgraf@suug.ch>
  * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net>
  *
- * Based on the following paper:
- * https://www.usenix.org/legacy/event/atc11/tech/final_files/Triplett.pdf
- *
  * Code partially derived from nft_hash
+ * Rewritten with rehash code from br_multicast plus single list
+ * pointer as suggested by Josh Triplett
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
 #include <linux/err.h>
 
 #define HASH_DEFAULT_SIZE      64UL
-#define HASH_MIN_SIZE          4UL
+#define HASH_MIN_SIZE          4U
 #define BUCKET_LOCKS_PER_CPU   128UL
 
-/* Base bits plus 1 bit for nulls marker */
-#define HASH_RESERVED_SPACE    (RHT_BASE_BITS + 1)
-
-enum {
-       RHT_LOCK_NORMAL,
-       RHT_LOCK_NESTED,
-};
-
-/* The bucket lock is selected based on the hash and protects mutations
- * on a group of hash buckets.
- *
- * A maximum of tbl->size/2 bucket locks is allocated. This ensures that
- * a single lock always covers both buckets which may both contains
- * entries which link to the same bucket of the old table during resizing.
- * This allows to simplify the locking as locking the bucket in both
- * tables during resize always guarantee protection.
- *
- * IMPORTANT: When holding the bucket lock of both the old and new table
- * during expansions and shrinking, the old bucket lock must always be
- * acquired first.
- */
-static spinlock_t *bucket_lock(const struct bucket_table *tbl, u32 hash)
-{
-       return &tbl->locks[hash & tbl->locks_mask];
-}
-
-static void *rht_obj(const struct rhashtable *ht, const struct rhash_head *he)
-{
-       return (void *) he - ht->p.head_offset;
-}
-
-static u32 rht_bucket_index(const struct bucket_table *tbl, u32 hash)
-{
-       return hash & (tbl->size - 1);
-}
-
-static u32 obj_raw_hashfn(const struct rhashtable *ht, const void *ptr)
-{
-       u32 hash;
-
-       if (unlikely(!ht->p.key_len))
-               hash = ht->p.obj_hashfn(ptr, ht->p.hash_rnd);
-       else
-               hash = ht->p.hashfn(ptr + ht->p.key_offset, ht->p.key_len,
-                                   ht->p.hash_rnd);
-
-       return hash >> HASH_RESERVED_SPACE;
-}
-
-static u32 key_hashfn(struct rhashtable *ht, const void *key, u32 len)
-{
-       return ht->p.hashfn(key, len, ht->p.hash_rnd) >> HASH_RESERVED_SPACE;
-}
-
-static u32 head_hashfn(const struct rhashtable *ht,
+static u32 head_hashfn(struct rhashtable *ht,
                       const struct bucket_table *tbl,
                       const struct rhash_head *he)
 {
-       return rht_bucket_index(tbl, obj_raw_hashfn(ht, rht_obj(ht, he)));
+       return rht_head_hashfn(ht, tbl, he, ht->p);
 }
 
 #ifdef CONFIG_PROVE_LOCKING
-static void debug_dump_buckets(const struct rhashtable *ht,
-                              const struct bucket_table *tbl)
-{
-       struct rhash_head *he;
-       unsigned int i, hash;
-
-       for (i = 0; i < tbl->size; i++) {
-               pr_warn(" [Bucket %d] ", i);
-               rht_for_each_rcu(he, tbl, i) {
-                       hash = head_hashfn(ht, tbl, he);
-                       pr_cont("[hash = %#x, lock = %p] ",
-                               hash, bucket_lock(tbl, hash));
-               }
-               pr_cont("\n");
-       }
-
-}
-
-static void debug_dump_table(struct rhashtable *ht,
-                            const struct bucket_table *tbl,
-                            unsigned int hash)
-{
-       struct bucket_table *old_tbl, *future_tbl;
-
-       pr_emerg("BUG: lock for hash %#x in table %p not held\n",
-                hash, tbl);
-
-       rcu_read_lock();
-       future_tbl = rht_dereference_rcu(ht->future_tbl, ht);
-       old_tbl = rht_dereference_rcu(ht->tbl, ht);
-       if (future_tbl != old_tbl) {
-               pr_warn("Future table %p (size: %zd)\n",
-                       future_tbl, future_tbl->size);
-               debug_dump_buckets(ht, future_tbl);
-       }
-
-       pr_warn("Table %p (size: %zd)\n", old_tbl, old_tbl->size);
-       debug_dump_buckets(ht, old_tbl);
-
-       rcu_read_unlock();
-}
-
 #define ASSERT_RHT_MUTEX(HT) BUG_ON(!lockdep_rht_mutex_is_held(HT))
-#define ASSERT_BUCKET_LOCK(HT, TBL, HASH)                              \
-       do {                                                            \
-               if (unlikely(!lockdep_rht_bucket_is_held(TBL, HASH))) { \
-                       debug_dump_table(HT, TBL, HASH);                \
-                       BUG();                                          \
-               }                                                       \
-       } while (0)
 
 int lockdep_rht_mutex_is_held(struct rhashtable *ht)
 {
@@ -151,30 +48,18 @@ EXPORT_SYMBOL_GPL(lockdep_rht_mutex_is_held);
 
 int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash)
 {
-       spinlock_t *lock = bucket_lock(tbl, hash);
+       spinlock_t *lock = rht_bucket_lock(tbl, hash);
 
        return (debug_locks) ? lockdep_is_held(lock) : 1;
 }
 EXPORT_SYMBOL_GPL(lockdep_rht_bucket_is_held);
 #else
 #define ASSERT_RHT_MUTEX(HT)
-#define ASSERT_BUCKET_LOCK(HT, TBL, HASH)
 #endif
 
 
-static struct rhash_head __rcu **bucket_tail(struct bucket_table *tbl, u32 n)
-{
-       struct rhash_head __rcu **pprev;
-
-       for (pprev = &tbl->buckets[n];
-            !rht_is_a_nulls(rht_dereference_bucket(*pprev, tbl, n));
-            pprev = &rht_dereference_bucket(*pprev, tbl, n)->next)
-               ;
-
-       return pprev;
-}
-
-static int alloc_bucket_locks(struct rhashtable *ht, struct bucket_table *tbl)
+static int alloc_bucket_locks(struct rhashtable *ht, struct bucket_table *tbl,
+                             gfp_t gfp)
 {
        unsigned int i, size;
 #if defined(CONFIG_PROVE_LOCKING)
@@ -191,12 +76,13 @@ static int alloc_bucket_locks(struct rhashtable *ht, struct bucket_table *tbl)
 
        if (sizeof(spinlock_t) != 0) {
 #ifdef CONFIG_NUMA
-               if (size * sizeof(spinlock_t) > PAGE_SIZE)
+               if (size * sizeof(spinlock_t) > PAGE_SIZE &&
+                   gfp == GFP_KERNEL)
                        tbl->locks = vmalloc(size * sizeof(spinlock_t));
                else
 #endif
                tbl->locks = kmalloc_array(size, sizeof(spinlock_t),
-                                          GFP_KERNEL);
+                                          gfp);
                if (!tbl->locks)
                        return -ENOMEM;
                for (i = 0; i < size; i++)
@@ -215,153 +101,181 @@ static void bucket_table_free(const struct bucket_table *tbl)
        kvfree(tbl);
 }
 
+static void bucket_table_free_rcu(struct rcu_head *head)
+{
+       bucket_table_free(container_of(head, struct bucket_table, rcu));
+}
+
 static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
-                                              size_t nbuckets)
+                                              size_t nbuckets,
+                                              gfp_t gfp)
 {
        struct bucket_table *tbl = NULL;
        size_t size;
        int i;
 
        size = sizeof(*tbl) + nbuckets * sizeof(tbl->buckets[0]);
-       if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER))
-               tbl = kzalloc(size, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
-       if (tbl == NULL)
+       if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER) ||
+           gfp != GFP_KERNEL)
+               tbl = kzalloc(size, gfp | __GFP_NOWARN | __GFP_NORETRY);
+       if (tbl == NULL && gfp == GFP_KERNEL)
                tbl = vzalloc(size);
        if (tbl == NULL)
                return NULL;
 
        tbl->size = nbuckets;
 
-       if (alloc_bucket_locks(ht, tbl) < 0) {
+       if (alloc_bucket_locks(ht, tbl, gfp) < 0) {
                bucket_table_free(tbl);
                return NULL;
        }
 
+       INIT_LIST_HEAD(&tbl->walkers);
+
+       get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd));
+
        for (i = 0; i < nbuckets; i++)
                INIT_RHT_NULLS_HEAD(tbl->buckets[i], ht, i);
 
        return tbl;
 }
 
-/**
- * rht_grow_above_75 - returns true if nelems > 0.75 * table-size
- * @ht:                hash table
- * @new_size:  new table size
- */
-static bool rht_grow_above_75(const struct rhashtable *ht, size_t new_size)
+static struct bucket_table *rhashtable_last_table(struct rhashtable *ht,
+                                                 struct bucket_table *tbl)
 {
-       /* Expand table when exceeding 75% load */
-       return atomic_read(&ht->nelems) > (new_size / 4 * 3) &&
-              (!ht->p.max_shift || atomic_read(&ht->shift) < ht->p.max_shift);
-}
+       struct bucket_table *new_tbl;
 
-/**
- * rht_shrink_below_30 - returns true if nelems < 0.3 * table-size
- * @ht:                hash table
- * @new_size:  new table size
- */
-static bool rht_shrink_below_30(const struct rhashtable *ht, size_t new_size)
-{
-       /* Shrink table beneath 30% load */
-       return atomic_read(&ht->nelems) < (new_size * 3 / 10) &&
-              (atomic_read(&ht->shift) > ht->p.min_shift);
-}
+       do {
+               new_tbl = tbl;
+               tbl = rht_dereference_rcu(tbl->future_tbl, ht);
+       } while (tbl);
 
-static void lock_buckets(struct bucket_table *new_tbl,
-                        struct bucket_table *old_tbl, unsigned int hash)
-       __acquires(old_bucket_lock)
-{
-       spin_lock_bh(bucket_lock(old_tbl, hash));
-       if (new_tbl != old_tbl)
-               spin_lock_bh_nested(bucket_lock(new_tbl, hash),
-                                   RHT_LOCK_NESTED);
+       return new_tbl;
 }
 
-static void unlock_buckets(struct bucket_table *new_tbl,
-                          struct bucket_table *old_tbl, unsigned int hash)
-       __releases(old_bucket_lock)
+static int rhashtable_rehash_one(struct rhashtable *ht, unsigned int old_hash)
 {
-       if (new_tbl != old_tbl)
-               spin_unlock_bh(bucket_lock(new_tbl, hash));
-       spin_unlock_bh(bucket_lock(old_tbl, hash));
+       struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
+       struct bucket_table *new_tbl = rhashtable_last_table(ht,
+               rht_dereference_rcu(old_tbl->future_tbl, ht));
+       struct rhash_head __rcu **pprev = &old_tbl->buckets[old_hash];
+       int err = -ENOENT;
+       struct rhash_head *head, *next, *entry;
+       spinlock_t *new_bucket_lock;
+       unsigned int new_hash;
+
+       rht_for_each(entry, old_tbl, old_hash) {
+               err = 0;
+               next = rht_dereference_bucket(entry->next, old_tbl, old_hash);
+
+               if (rht_is_a_nulls(next))
+                       break;
+
+               pprev = &entry->next;
+       }
+
+       if (err)
+               goto out;
+
+       new_hash = head_hashfn(ht, new_tbl, entry);
+
+       new_bucket_lock = rht_bucket_lock(new_tbl, new_hash);
+
+       spin_lock_nested(new_bucket_lock, SINGLE_DEPTH_NESTING);
+       head = rht_dereference_bucket(new_tbl->buckets[new_hash],
+                                     new_tbl, new_hash);
+
+       if (rht_is_a_nulls(head))
+               INIT_RHT_NULLS_HEAD(entry->next, ht, new_hash);
+       else
+               RCU_INIT_POINTER(entry->next, head);
+
+       rcu_assign_pointer(new_tbl->buckets[new_hash], entry);
+       spin_unlock(new_bucket_lock);
+
+       rcu_assign_pointer(*pprev, next);
+
+out:
+       return err;
 }
 
-/**
- * Unlink entries on bucket which hash to different bucket.
- *
- * Returns true if no more work needs to be performed on the bucket.
- */
-static bool hashtable_chain_unzip(struct rhashtable *ht,
-                                 const struct bucket_table *new_tbl,
-                                 struct bucket_table *old_tbl,
-                                 size_t old_hash)
+static void rhashtable_rehash_chain(struct rhashtable *ht,
+                                   unsigned int old_hash)
 {
-       struct rhash_head *he, *p, *next;
-       unsigned int new_hash, new_hash2;
-
-       ASSERT_BUCKET_LOCK(ht, old_tbl, old_hash);
+       struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
+       spinlock_t *old_bucket_lock;
 
-       /* Old bucket empty, no work needed. */
-       p = rht_dereference_bucket(old_tbl->buckets[old_hash], old_tbl,
-                                  old_hash);
-       if (rht_is_a_nulls(p))
-               return false;
+       old_bucket_lock = rht_bucket_lock(old_tbl, old_hash);
 
-       new_hash = head_hashfn(ht, new_tbl, p);
-       ASSERT_BUCKET_LOCK(ht, new_tbl, new_hash);
+       spin_lock_bh(old_bucket_lock);
+       while (!rhashtable_rehash_one(ht, old_hash))
+               ;
+       old_tbl->rehash++;
+       spin_unlock_bh(old_bucket_lock);
+}
 
-       /* Advance the old bucket pointer one or more times until it
-        * reaches a node that doesn't hash to the same bucket as the
-        * previous node p. Call the previous node p;
-        */
-       rht_for_each_continue(he, p->next, old_tbl, old_hash) {
-               new_hash2 = head_hashfn(ht, new_tbl, he);
-               ASSERT_BUCKET_LOCK(ht, new_tbl, new_hash2);
+static int rhashtable_rehash_attach(struct rhashtable *ht,
+                                   struct bucket_table *old_tbl,
+                                   struct bucket_table *new_tbl)
+{
+       /* Protect future_tbl using the first bucket lock. */
+       spin_lock_bh(old_tbl->locks);
 
-               if (new_hash != new_hash2)
-                       break;
-               p = he;
+       /* Did somebody beat us to it? */
+       if (rcu_access_pointer(old_tbl->future_tbl)) {
+               spin_unlock_bh(old_tbl->locks);
+               return -EEXIST;
        }
-       rcu_assign_pointer(old_tbl->buckets[old_hash], p->next);
 
-       /* Find the subsequent node which does hash to the same
-        * bucket as node P, or NULL if no such node exists.
+       /* Make insertions go into the new, empty table right away. Deletions
+        * and lookups will be attempted in both tables until we synchronize.
         */
-       INIT_RHT_NULLS_HEAD(next, ht, old_hash);
-       if (!rht_is_a_nulls(he)) {
-               rht_for_each_continue(he, he->next, old_tbl, old_hash) {
-                       if (head_hashfn(ht, new_tbl, he) == new_hash) {
-                               next = he;
-                               break;
-                       }
-               }
-       }
+       rcu_assign_pointer(old_tbl->future_tbl, new_tbl);
 
-       /* Set p's next pointer to that subsequent node pointer,
-        * bypassing the nodes which do not hash to p's bucket
-        */
-       rcu_assign_pointer(p->next, next);
+       /* Ensure the new table is visible to readers. */
+       smp_wmb();
 
-       p = rht_dereference_bucket(old_tbl->buckets[old_hash], old_tbl,
-                                  old_hash);
+       spin_unlock_bh(old_tbl->locks);
 
-       return !rht_is_a_nulls(p);
+       return 0;
 }
 
-static void link_old_to_new(struct rhashtable *ht, struct bucket_table *new_tbl,
-                           unsigned int new_hash, struct rhash_head *entry)
+static int rhashtable_rehash_table(struct rhashtable *ht)
 {
-       ASSERT_BUCKET_LOCK(ht, new_tbl, new_hash);
+       struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
+       struct bucket_table *new_tbl;
+       struct rhashtable_walker *walker;
+       unsigned int old_hash;
+
+       new_tbl = rht_dereference(old_tbl->future_tbl, ht);
+       if (!new_tbl)
+               return 0;
+
+       for (old_hash = 0; old_hash < old_tbl->size; old_hash++)
+               rhashtable_rehash_chain(ht, old_hash);
+
+       /* Publish the new table pointer. */
+       rcu_assign_pointer(ht->tbl, new_tbl);
+
+       spin_lock(&ht->lock);
+       list_for_each_entry(walker, &old_tbl->walkers, list)
+               walker->tbl = NULL;
+       spin_unlock(&ht->lock);
 
-       rcu_assign_pointer(*bucket_tail(new_tbl, new_hash), entry);
+       /* Wait for readers. All new readers will see the new
+        * table, and thus no references to the old table will
+        * remain.
+        */
+       call_rcu(&old_tbl->rcu, bucket_table_free_rcu);
+
+       return rht_dereference(new_tbl->future_tbl, ht) ? -EAGAIN : 0;
 }
 
 /**
  * rhashtable_expand - Expand hash table while allowing concurrent lookups
  * @ht:                the hash table to expand
  *
- * A secondary bucket array is allocated and the hash entries are migrated
- * while keeping them on both lists until the end of the RCU grace period.
+ * A secondary bucket array is allocated and the hash entries are migrated.
  *
  * This function may only be called in a context where it is safe to call
  * synchronize_rcu(), e.g. not within a rcu_read_lock() section.
@@ -372,89 +286,32 @@ static void link_old_to_new(struct rhashtable *ht, struct bucket_table *new_tbl,
  * It is valid to have concurrent insertions and deletions protected by per
  * bucket locks or concurrent RCU protected lookups and traversals.
  */
-int rhashtable_expand(struct rhashtable *ht)
+static int rhashtable_expand(struct rhashtable *ht)
 {
        struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht);
-       struct rhash_head *he;
-       unsigned int new_hash, old_hash;
-       bool complete = false;
+       int err;
 
        ASSERT_RHT_MUTEX(ht);
 
-       new_tbl = bucket_table_alloc(ht, old_tbl->size * 2);
+       old_tbl = rhashtable_last_table(ht, old_tbl);
+
+       new_tbl = bucket_table_alloc(ht, old_tbl->size * 2, GFP_KERNEL);
        if (new_tbl == NULL)
                return -ENOMEM;
 
-       atomic_inc(&ht->shift);
-
-       /* Make insertions go into the new, empty table right away. Deletions
-        * and lookups will be attempted in both tables until we synchronize.
-        * The synchronize_rcu() guarantees for the new table to be picked up
-        * so no new additions go into the old table while we relink.
-        */
-       rcu_assign_pointer(ht->future_tbl, new_tbl);
-       synchronize_rcu();
-
-       /* For each new bucket, search the corresponding old bucket for the
-        * first entry that hashes to the new bucket, and link the end of
-        * newly formed bucket chain (containing entries added to future
-        * table) to that entry. Since all the entries which will end up in
-        * the new bucket appear in the same old bucket, this constructs an
-        * entirely valid new hash table, but with multiple buckets
-        * "zipped" together into a single imprecise chain.
-        */
-       for (new_hash = 0; new_hash < new_tbl->size; new_hash++) {
-               old_hash = rht_bucket_index(old_tbl, new_hash);
-               lock_buckets(new_tbl, old_tbl, new_hash);
-               rht_for_each(he, old_tbl, old_hash) {
-                       if (head_hashfn(ht, new_tbl, he) == new_hash) {
-                               link_old_to_new(ht, new_tbl, new_hash, he);
-                               break;
-                       }
-               }
-               unlock_buckets(new_tbl, old_tbl, new_hash);
-               cond_resched();
-       }
-
-       /* Unzip interleaved hash chains */
-       while (!complete && !ht->being_destroyed) {
-               /* Wait for readers. All new readers will see the new
-                * table, and thus no references to the old table will
-                * remain.
-                */
-               synchronize_rcu();
-
-               /* For each bucket in the old table (each of which
-                * contains items from multiple buckets of the new
-                * table): ...
-                */
-               complete = true;
-               for (old_hash = 0; old_hash < old_tbl->size; old_hash++) {
-                       lock_buckets(new_tbl, old_tbl, old_hash);
-
-                       if (hashtable_chain_unzip(ht, new_tbl, old_tbl,
-                                                 old_hash))
-                               complete = false;
-
-                       unlock_buckets(new_tbl, old_tbl, old_hash);
-                       cond_resched();
-               }
-       }
+       err = rhashtable_rehash_attach(ht, old_tbl, new_tbl);
+       if (err)
+               bucket_table_free(new_tbl);
 
-       rcu_assign_pointer(ht->tbl, new_tbl);
-       synchronize_rcu();
-
-       bucket_table_free(old_tbl);
-       return 0;
+       return err;
 }
-EXPORT_SYMBOL_GPL(rhashtable_expand);
 
 /**
  * rhashtable_shrink - Shrink hash table while allowing concurrent lookups
  * @ht:                the hash table to shrink
  *
- * This function may only be called in a context where it is safe to call
- * synchronize_rcu(), e.g. not within a rcu_read_lock() section.
+ * This function shrinks the hash table to fit, i.e., the smallest
+ * size would not cause it to expand right away automatically.
  *
  * The caller must ensure that no concurrent resizing occurs by holding
  * ht->mutex.
@@ -465,395 +322,146 @@ EXPORT_SYMBOL_GPL(rhashtable_expand);
  * It is valid to have concurrent insertions and deletions protected by per
  * bucket locks or concurrent RCU protected lookups and traversals.
  */
-int rhashtable_shrink(struct rhashtable *ht)
+static int rhashtable_shrink(struct rhashtable *ht)
 {
-       struct bucket_table *new_tbl, *tbl = rht_dereference(ht->tbl, ht);
-       unsigned int new_hash;
+       struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht);
+       unsigned int size;
+       int err;
 
        ASSERT_RHT_MUTEX(ht);
 
-       new_tbl = bucket_table_alloc(ht, tbl->size / 2);
-       if (new_tbl == NULL)
-               return -ENOMEM;
-
-       rcu_assign_pointer(ht->future_tbl, new_tbl);
-       synchronize_rcu();
-
-       /* Link the first entry in the old bucket to the end of the
-        * bucket in the new table. As entries are concurrently being
-        * added to the new table, lock down the new bucket. As we
-        * always divide the size in half when shrinking, each bucket
-        * in the new table maps to exactly two buckets in the old
-        * table.
-        */
-       for (new_hash = 0; new_hash < new_tbl->size; new_hash++) {
-               lock_buckets(new_tbl, tbl, new_hash);
-
-               rcu_assign_pointer(*bucket_tail(new_tbl, new_hash),
-                                  tbl->buckets[new_hash]);
-               ASSERT_BUCKET_LOCK(ht, tbl, new_hash + new_tbl->size);
-               rcu_assign_pointer(*bucket_tail(new_tbl, new_hash),
-                                  tbl->buckets[new_hash + new_tbl->size]);
+       size = roundup_pow_of_two(atomic_read(&ht->nelems) * 3 / 2);
+       if (size < ht->p.min_size)
+               size = ht->p.min_size;
 
-               unlock_buckets(new_tbl, tbl, new_hash);
-               cond_resched();
-       }
+       if (old_tbl->size <= size)
+               return 0;
 
-       /* Publish the new, valid hash table */
-       rcu_assign_pointer(ht->tbl, new_tbl);
-       atomic_dec(&ht->shift);
+       if (rht_dereference(old_tbl->future_tbl, ht))
+               return -EEXIST;
 
-       /* Wait for readers. No new readers will have references to the
-        * old hash table.
-        */
-       synchronize_rcu();
+       new_tbl = bucket_table_alloc(ht, size, GFP_KERNEL);
+       if (new_tbl == NULL)
+               return -ENOMEM;
 
-       bucket_table_free(tbl);
+       err = rhashtable_rehash_attach(ht, old_tbl, new_tbl);
+       if (err)
+               bucket_table_free(new_tbl);
 
-       return 0;
+       return err;
 }
-EXPORT_SYMBOL_GPL(rhashtable_shrink);
 
 static void rht_deferred_worker(struct work_struct *work)
 {
        struct rhashtable *ht;
        struct bucket_table *tbl;
-       struct rhashtable_walker *walker;
+       int err = 0;
 
        ht = container_of(work, struct rhashtable, run_work);
        mutex_lock(&ht->mutex);
-       if (ht->being_destroyed)
-               goto unlock;
 
        tbl = rht_dereference(ht->tbl, ht);
+       tbl = rhashtable_last_table(ht, tbl);
 
-       list_for_each_entry(walker, &ht->walkers, list)
-               walker->resize = true;
-
-       if (rht_grow_above_75(ht, tbl->size))
+       if (rht_grow_above_75(ht, tbl))
                rhashtable_expand(ht);
-       else if (rht_shrink_below_30(ht, tbl->size))
+       else if (ht->p.automatic_shrinking && rht_shrink_below_30(ht, tbl))
                rhashtable_shrink(ht);
-unlock:
-       mutex_unlock(&ht->mutex);
-}
 
-static void __rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj,
-                               struct bucket_table *tbl,
-                               const struct bucket_table *old_tbl, u32 hash)
-{
-       bool no_resize_running = tbl == old_tbl;
-       struct rhash_head *head;
+       err = rhashtable_rehash_table(ht);
 
-       hash = rht_bucket_index(tbl, hash);
-       head = rht_dereference_bucket(tbl->buckets[hash], tbl, hash);
-
-       ASSERT_BUCKET_LOCK(ht, tbl, hash);
-
-       if (rht_is_a_nulls(head))
-               INIT_RHT_NULLS_HEAD(obj->next, ht, hash);
-       else
-               RCU_INIT_POINTER(obj->next, head);
-
-       rcu_assign_pointer(tbl->buckets[hash], obj);
+       mutex_unlock(&ht->mutex);
 
-       atomic_inc(&ht->nelems);
-       if (no_resize_running && rht_grow_above_75(ht, tbl->size))
+       if (err)
                schedule_work(&ht->run_work);
 }
 
-/**
- * rhashtable_insert - insert object into hash table
- * @ht:                hash table
- * @obj:       pointer to hash head inside object
- *
- * Will take a per bucket spinlock to protect against mutual mutations
- * on the same bucket. Multiple insertions may occur in parallel unless
- * they map to the same bucket lock.
- *
- * It is safe to call this function from atomic context.
- *
- * Will trigger an automatic deferred table resizing if the size grows
- * beyond the watermark indicated by grow_decision() which can be passed
- * to rhashtable_init().
- */
-void rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj)
+static bool rhashtable_check_elasticity(struct rhashtable *ht,
+                                       struct bucket_table *tbl,
+                                       unsigned int hash)
 {
-       struct bucket_table *tbl, *old_tbl;
-       unsigned hash;
-
-       rcu_read_lock();
-
-       tbl = rht_dereference_rcu(ht->future_tbl, ht);
-       old_tbl = rht_dereference_rcu(ht->tbl, ht);
-       hash = obj_raw_hashfn(ht, rht_obj(ht, obj));
+       unsigned int elasticity = ht->elasticity;
+       struct rhash_head *head;
 
-       lock_buckets(tbl, old_tbl, hash);
-       __rhashtable_insert(ht, obj, tbl, old_tbl, hash);
-       unlock_buckets(tbl, old_tbl, hash);
+       rht_for_each(head, tbl, hash)
+               if (!--elasticity)
+                       return true;
 
-       rcu_read_unlock();
+       return false;
 }
-EXPORT_SYMBOL_GPL(rhashtable_insert);
 
-/**
- * rhashtable_remove - remove object from hash table
- * @ht:                hash table
- * @obj:       pointer to hash head inside object
- *
- * Since the hash chain is single linked, the removal operation needs to
- * walk the bucket chain upon removal. The removal operation is thus
- * considerable slow if the hash table is not correctly sized.
- *
- * Will automatically shrink the table via rhashtable_expand() if the
- * shrink_decision function specified at rhashtable_init() returns true.
- *
- * The caller must ensure that no concurrent table mutations occur. It is
- * however valid to have concurrent lookups if they are RCU protected.
- */
-bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *obj)
+int rhashtable_insert_rehash(struct rhashtable *ht)
 {
-       struct bucket_table *tbl, *new_tbl, *old_tbl;
-       struct rhash_head __rcu **pprev;
-       struct rhash_head *he, *he2;
-       unsigned int hash, new_hash;
-       bool ret = false;
+       struct bucket_table *old_tbl;
+       struct bucket_table *new_tbl;
+       struct bucket_table *tbl;
+       unsigned int size;
+       int err;
 
-       rcu_read_lock();
        old_tbl = rht_dereference_rcu(ht->tbl, ht);
-       tbl = new_tbl = rht_dereference_rcu(ht->future_tbl, ht);
-       new_hash = obj_raw_hashfn(ht, rht_obj(ht, obj));
-
-       lock_buckets(new_tbl, old_tbl, new_hash);
-restart:
-       hash = rht_bucket_index(tbl, new_hash);
-       pprev = &tbl->buckets[hash];
-       rht_for_each(he, tbl, hash) {
-               if (he != obj) {
-                       pprev = &he->next;
-                       continue;
-               }
-
-               ASSERT_BUCKET_LOCK(ht, tbl, hash);
-
-               if (old_tbl->size > new_tbl->size && tbl == old_tbl &&
-                   !rht_is_a_nulls(obj->next) &&
-                   head_hashfn(ht, tbl, obj->next) != hash) {
-                       rcu_assign_pointer(*pprev, (struct rhash_head *) rht_marker(ht, hash));
-               } else if (unlikely(old_tbl->size < new_tbl->size && tbl == new_tbl)) {
-                       rht_for_each_continue(he2, obj->next, tbl, hash) {
-                               if (head_hashfn(ht, tbl, he2) == hash) {
-                                       rcu_assign_pointer(*pprev, he2);
-                                       goto found;
-                               }
-                       }
-
-                       rcu_assign_pointer(*pprev, (struct rhash_head *) rht_marker(ht, hash));
-               } else {
-                       rcu_assign_pointer(*pprev, obj->next);
-               }
-
-found:
-               ret = true;
-               break;
-       }
-
-       /* The entry may be linked in either 'tbl', 'future_tbl', or both.
-        * 'future_tbl' only exists for a short period of time during
-        * resizing. Thus traversing both is fine and the added cost is
-        * very rare.
-        */
-       if (tbl != old_tbl) {
-               tbl = old_tbl;
-               goto restart;
-       }
-
-       unlock_buckets(new_tbl, old_tbl, new_hash);
-
-       if (ret) {
-               bool no_resize_running = new_tbl == old_tbl;
-
-               atomic_dec(&ht->nelems);
-               if (no_resize_running && rht_shrink_below_30(ht, new_tbl->size))
-                       schedule_work(&ht->run_work);
-       }
-
-       rcu_read_unlock();
+       tbl = rhashtable_last_table(ht, old_tbl);
 
-       return ret;
-}
-EXPORT_SYMBOL_GPL(rhashtable_remove);
-
-struct rhashtable_compare_arg {
-       struct rhashtable *ht;
-       const void *key;
-};
-
-static bool rhashtable_compare(void *ptr, void *arg)
-{
-       struct rhashtable_compare_arg *x = arg;
-       struct rhashtable *ht = x->ht;
-
-       return !memcmp(ptr + ht->p.key_offset, x->key, ht->p.key_len);
-}
-
-/**
- * rhashtable_lookup - lookup key in hash table
- * @ht:                hash table
- * @key:       pointer to key
- *
- * Computes the hash value for the key and traverses the bucket chain looking
- * for a entry with an identical key. The first matching entry is returned.
- *
- * This lookup function may only be used for fixed key hash table (key_len
- * parameter set). It will BUG() if used inappropriately.
- *
- * Lookups may occur in parallel with hashtable mutations and resizing.
- */
-void *rhashtable_lookup(struct rhashtable *ht, const void *key)
-{
-       struct rhashtable_compare_arg arg = {
-               .ht = ht,
-               .key = key,
-       };
-
-       BUG_ON(!ht->p.key_len);
-
-       return rhashtable_lookup_compare(ht, key, &rhashtable_compare, &arg);
-}
-EXPORT_SYMBOL_GPL(rhashtable_lookup);
-
-/**
- * rhashtable_lookup_compare - search hash table with compare function
- * @ht:                hash table
- * @key:       the pointer to the key
- * @compare:   compare function, must return true on match
- * @arg:       argument passed on to compare function
- *
- * Traverses the bucket chain behind the provided hash value and calls the
- * specified compare function for each entry.
- *
- * Lookups may occur in parallel with hashtable mutations and resizing.
- *
- * Returns the first entry on which the compare function returned true.
- */
-void *rhashtable_lookup_compare(struct rhashtable *ht, const void *key,
-                               bool (*compare)(void *, void *), void *arg)
-{
-       const struct bucket_table *tbl, *old_tbl;
-       struct rhash_head *he;
-       u32 hash;
+       size = tbl->size;
 
-       rcu_read_lock();
+       if (rht_grow_above_75(ht, tbl))
+               size *= 2;
+       /* More than two rehashes (not resizes) detected. */
+       else if (WARN_ON(old_tbl != tbl && old_tbl->size == size))
+               return -EBUSY;
 
-       old_tbl = rht_dereference_rcu(ht->tbl, ht);
-       tbl = rht_dereference_rcu(ht->future_tbl, ht);
-       hash = key_hashfn(ht, key, ht->p.key_len);
-restart:
-       rht_for_each_rcu(he, tbl, rht_bucket_index(tbl, hash)) {
-               if (!compare(rht_obj(ht, he), arg))
-                       continue;
-               rcu_read_unlock();
-               return rht_obj(ht, he);
-       }
+       new_tbl = bucket_table_alloc(ht, size, GFP_ATOMIC);
+       if (new_tbl == NULL)
+               return -ENOMEM;
 
-       if (unlikely(tbl != old_tbl)) {
-               tbl = old_tbl;
-               goto restart;
-       }
-       rcu_read_unlock();
+       err = rhashtable_rehash_attach(ht, tbl, new_tbl);
+       if (err) {
+               bucket_table_free(new_tbl);
+               if (err == -EEXIST)
+                       err = 0;
+       } else
+               schedule_work(&ht->run_work);
 
-       return NULL;
+       return err;
 }
-EXPORT_SYMBOL_GPL(rhashtable_lookup_compare);
+EXPORT_SYMBOL_GPL(rhashtable_insert_rehash);
 
-/**
- * rhashtable_lookup_insert - lookup and insert object into hash table
- * @ht:                hash table
- * @obj:       pointer to hash head inside object
- *
- * Locks down the bucket chain in both the old and new table if a resize
- * is in progress to ensure that writers can't remove from the old table
- * and can't insert to the new table during the atomic operation of search
- * and insertion. Searches for duplicates in both the old and new table if
- * a resize is in progress.
- *
- * This lookup function may only be used for fixed key hash table (key_len
- * parameter set). It will BUG() if used inappropriately.
- *
- * It is safe to call this function from atomic context.
- *
- * Will trigger an automatic deferred table resizing if the size grows
- * beyond the watermark indicated by grow_decision() which can be passed
- * to rhashtable_init().
- */
-bool rhashtable_lookup_insert(struct rhashtable *ht, struct rhash_head *obj)
+int rhashtable_insert_slow(struct rhashtable *ht, const void *key,
+                          struct rhash_head *obj,
+                          struct bucket_table *tbl)
 {
-       struct rhashtable_compare_arg arg = {
-               .ht = ht,
-               .key = rht_obj(ht, obj) + ht->p.key_offset,
-       };
+       struct rhash_head *head;
+       unsigned int hash;
+       int err;
 
-       BUG_ON(!ht->p.key_len);
+       tbl = rhashtable_last_table(ht, tbl);
+       hash = head_hashfn(ht, tbl, obj);
+       spin_lock_nested(rht_bucket_lock(tbl, hash), SINGLE_DEPTH_NESTING);
 
-       return rhashtable_lookup_compare_insert(ht, obj, &rhashtable_compare,
-                                               &arg);
-}
-EXPORT_SYMBOL_GPL(rhashtable_lookup_insert);
+       err = -EEXIST;
+       if (key && rhashtable_lookup_fast(ht, key, ht->p))
+               goto exit;
 
-/**
- * rhashtable_lookup_compare_insert - search and insert object to hash table
- *                                    with compare function
- * @ht:                hash table
- * @obj:       pointer to hash head inside object
- * @compare:   compare function, must return true on match
- * @arg:       argument passed on to compare function
- *
- * Locks down the bucket chain in both the old and new table if a resize
- * is in progress to ensure that writers can't remove from the old table
- * and can't insert to the new table during the atomic operation of search
- * and insertion. Searches for duplicates in both the old and new table if
- * a resize is in progress.
- *
- * Lookups may occur in parallel with hashtable mutations and resizing.
- *
- * Will trigger an automatic deferred table resizing if the size grows
- * beyond the watermark indicated by grow_decision() which can be passed
- * to rhashtable_init().
- */
-bool rhashtable_lookup_compare_insert(struct rhashtable *ht,
-                                     struct rhash_head *obj,
-                                     bool (*compare)(void *, void *),
-                                     void *arg)
-{
-       struct bucket_table *new_tbl, *old_tbl;
-       u32 new_hash;
-       bool success = true;
+       err = -EAGAIN;
+       if (rhashtable_check_elasticity(ht, tbl, hash) ||
+           rht_grow_above_100(ht, tbl))
+               goto exit;
 
-       BUG_ON(!ht->p.key_len);
+       err = 0;
 
-       rcu_read_lock();
-       old_tbl = rht_dereference_rcu(ht->tbl, ht);
-       new_tbl = rht_dereference_rcu(ht->future_tbl, ht);
-       new_hash = obj_raw_hashfn(ht, rht_obj(ht, obj));
+       head = rht_dereference_bucket(tbl->buckets[hash], tbl, hash);
 
-       lock_buckets(new_tbl, old_tbl, new_hash);
+       RCU_INIT_POINTER(obj->next, head);
 
-       if (rhashtable_lookup_compare(ht, rht_obj(ht, obj) + ht->p.key_offset,
-                                     compare, arg)) {
-               success = false;
-               goto exit;
-       }
+       rcu_assign_pointer(tbl->buckets[hash], obj);
 
-       __rhashtable_insert(ht, obj, new_tbl, old_tbl, new_hash);
+       atomic_inc(&ht->nelems);
 
 exit:
-       unlock_buckets(new_tbl, old_tbl, new_hash);
-       rcu_read_unlock();
+       spin_unlock(rht_bucket_lock(tbl, hash));
 
-       return success;
+       return err;
 }
-EXPORT_SYMBOL_GPL(rhashtable_lookup_compare_insert);
+EXPORT_SYMBOL_GPL(rhashtable_insert_slow);
 
 /**
  * rhashtable_walk_init - Initialise an iterator
@@ -887,11 +495,9 @@ int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter)
        if (!iter->walker)
                return -ENOMEM;
 
-       INIT_LIST_HEAD(&iter->walker->list);
-       iter->walker->resize = false;
-
        mutex_lock(&ht->mutex);
-       list_add(&iter->walker->list, &ht->walkers);
+       iter->walker->tbl = rht_dereference(ht->tbl, ht);
+       list_add(&iter->walker->list, &iter->walker->tbl->walkers);
        mutex_unlock(&ht->mutex);
 
        return 0;
@@ -907,7 +513,8 @@ EXPORT_SYMBOL_GPL(rhashtable_walk_init);
 void rhashtable_walk_exit(struct rhashtable_iter *iter)
 {
        mutex_lock(&iter->ht->mutex);
-       list_del(&iter->walker->list);
+       if (iter->walker->tbl)
+               list_del(&iter->walker->list);
        mutex_unlock(&iter->ht->mutex);
        kfree(iter->walker);
 }
@@ -928,13 +535,21 @@ EXPORT_SYMBOL_GPL(rhashtable_walk_exit);
  * by calling rhashtable_walk_next.
  */
 int rhashtable_walk_start(struct rhashtable_iter *iter)
+       __acquires(RCU)
 {
+       struct rhashtable *ht = iter->ht;
+
+       mutex_lock(&ht->mutex);
+
+       if (iter->walker->tbl)
+               list_del(&iter->walker->list);
+
        rcu_read_lock();
 
-       if (iter->walker->resize) {
-               iter->slot = 0;
-               iter->skip = 0;
-               iter->walker->resize = false;
+       mutex_unlock(&ht->mutex);
+
+       if (!iter->walker->tbl) {
+               iter->walker->tbl = rht_dereference_rcu(ht->tbl, ht);
                return -EAGAIN;
        }
 
@@ -956,13 +571,11 @@ EXPORT_SYMBOL_GPL(rhashtable_walk_start);
  */
 void *rhashtable_walk_next(struct rhashtable_iter *iter)
 {
-       const struct bucket_table *tbl;
+       struct bucket_table *tbl = iter->walker->tbl;
        struct rhashtable *ht = iter->ht;
        struct rhash_head *p = iter->p;
        void *obj = NULL;
 
-       tbl = rht_dereference_rcu(ht->tbl, ht);
-
        if (p) {
                p = rht_dereference_bucket_rcu(p->next, tbl, iter->slot);
                goto next;
@@ -988,17 +601,20 @@ next:
                iter->skip = 0;
        }
 
-       iter->p = NULL;
+       /* Ensure we see any new tables. */
+       smp_rmb();
 
-out:
-       if (iter->walker->resize) {
-               iter->p = NULL;
+       iter->walker->tbl = rht_dereference_rcu(tbl->future_tbl, ht);
+       if (iter->walker->tbl) {
                iter->slot = 0;
                iter->skip = 0;
-               iter->walker->resize = false;
                return ERR_PTR(-EAGAIN);
        }
 
+       iter->p = NULL;
+
+out:
+
        return obj;
 }
 EXPORT_SYMBOL_GPL(rhashtable_walk_next);
@@ -1010,16 +626,39 @@ EXPORT_SYMBOL_GPL(rhashtable_walk_next);
  * Finish a hash table walk.
  */
 void rhashtable_walk_stop(struct rhashtable_iter *iter)
+       __releases(RCU)
 {
-       rcu_read_unlock();
+       struct rhashtable *ht;
+       struct bucket_table *tbl = iter->walker->tbl;
+
+       if (!tbl)
+               goto out;
+
+       ht = iter->ht;
+
+       spin_lock(&ht->lock);
+       if (tbl->rehash < tbl->size)
+               list_add(&iter->walker->list, &tbl->walkers);
+       else
+               iter->walker->tbl = NULL;
+       spin_unlock(&ht->lock);
+
        iter->p = NULL;
+
+out:
+       rcu_read_unlock();
 }
 EXPORT_SYMBOL_GPL(rhashtable_walk_stop);
 
-static size_t rounded_hashtable_size(struct rhashtable_params *params)
+static size_t rounded_hashtable_size(const struct rhashtable_params *params)
 {
        return max(roundup_pow_of_two(params->nelem_hint * 4 / 3),
-                  1UL << params->min_shift);
+                  (unsigned long)params->min_size);
+}
+
+static u32 rhashtable_jhash2(const void *key, u32 length, u32 seed)
+{
+       return jhash2(key, length, seed);
 }
 
 /**
@@ -1052,7 +691,7 @@ static size_t rounded_hashtable_size(struct rhashtable_params *params)
  *     struct rhash_head       node;
  * };
  *
- * u32 my_hash_fn(const void *data, u32 seed)
+ * u32 my_hash_fn(const void *data, u32 len, u32 seed)
  * {
  *     struct test_obj *obj = data;
  *
@@ -1065,47 +704,74 @@ static size_t rounded_hashtable_size(struct rhashtable_params *params)
  *     .obj_hashfn = my_hash_fn,
  * };
  */
-int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params)
+int rhashtable_init(struct rhashtable *ht,
+                   const struct rhashtable_params *params)
 {
        struct bucket_table *tbl;
        size_t size;
 
        size = HASH_DEFAULT_SIZE;
 
-       if ((params->key_len && !params->hashfn) ||
-           (!params->key_len && !params->obj_hashfn))
+       if ((!params->key_len && !params->obj_hashfn) ||
+           (params->obj_hashfn && !params->obj_cmpfn))
                return -EINVAL;
 
        if (params->nulls_base && params->nulls_base < (1U << RHT_BASE_SHIFT))
                return -EINVAL;
 
-       params->min_shift = max_t(size_t, params->min_shift,
-                                 ilog2(HASH_MIN_SIZE));
-
        if (params->nelem_hint)
                size = rounded_hashtable_size(params);
 
        memset(ht, 0, sizeof(*ht));
        mutex_init(&ht->mutex);
+       spin_lock_init(&ht->lock);
        memcpy(&ht->p, params, sizeof(*params));
-       INIT_LIST_HEAD(&ht->walkers);
+
+       if (params->min_size)
+               ht->p.min_size = roundup_pow_of_two(params->min_size);
+
+       if (params->max_size)
+               ht->p.max_size = rounddown_pow_of_two(params->max_size);
+
+       ht->p.min_size = max(ht->p.min_size, HASH_MIN_SIZE);
+
+       /* The maximum (not average) chain length grows with the
+        * size of the hash table, at a rate of (log N)/(log log N).
+        * The value of 16 is selected so that even if the hash
+        * table grew to 2^32 you would not expect the maximum
+        * chain length to exceed it unless we are under attack
+        * (or extremely unlucky).
+        *
+        * As this limit is only to detect attacks, we don't need
+        * to set it to a lower value as you'd need the chain
+        * length to vastly exceed 16 to have any real effect
+        * on the system.
+        */
+       if (!params->insecure_elasticity)
+               ht->elasticity = 16;
 
        if (params->locks_mul)
                ht->p.locks_mul = roundup_pow_of_two(params->locks_mul);
        else
                ht->p.locks_mul = BUCKET_LOCKS_PER_CPU;
 
-       tbl = bucket_table_alloc(ht, size);
+       ht->key_len = ht->p.key_len;
+       if (!params->hashfn) {
+               ht->p.hashfn = jhash;
+
+               if (!(ht->key_len & (sizeof(u32) - 1))) {
+                       ht->key_len /= sizeof(u32);
+                       ht->p.hashfn = rhashtable_jhash2;
+               }
+       }
+
+       tbl = bucket_table_alloc(ht, size, GFP_KERNEL);
        if (tbl == NULL)
                return -ENOMEM;
 
        atomic_set(&ht->nelems, 0);
-       atomic_set(&ht->shift, ilog2(tbl->size));
-       RCU_INIT_POINTER(ht->tbl, tbl);
-       RCU_INIT_POINTER(ht->future_tbl, tbl);
 
-       if (!ht->p.hash_rnd)
-               get_random_bytes(&ht->p.hash_rnd, sizeof(ht->p.hash_rnd));
+       RCU_INIT_POINTER(ht->tbl, tbl);
 
        INIT_WORK(&ht->run_work, rht_deferred_worker);
 
@@ -1114,21 +780,53 @@ int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params)
 EXPORT_SYMBOL_GPL(rhashtable_init);
 
 /**
- * rhashtable_destroy - destroy hash table
+ * rhashtable_free_and_destroy - free elements and destroy hash table
  * @ht:                the hash table to destroy
+ * @free_fn:   callback to release resources of element
+ * @arg:       pointer passed to free_fn
+ *
+ * Stops an eventual async resize. If defined, invokes free_fn for each
+ * element to releasal resources. Please note that RCU protected
+ * readers may still be accessing the elements. Releasing of resources
+ * must occur in a compatible manner. Then frees the bucket array.
  *
- * Frees the bucket array. This function is not rcu safe, therefore the caller
- * has to make sure that no resizing may happen by unpublishing the hashtable
- * and waiting for the quiescent cycle before releasing the bucket array.
+ * This function will eventually sleep to wait for an async resize
+ * to complete. The caller is responsible that no further write operations
+ * occurs in parallel.
  */
-void rhashtable_destroy(struct rhashtable *ht)
+void rhashtable_free_and_destroy(struct rhashtable *ht,
+                                void (*free_fn)(void *ptr, void *arg),
+                                void *arg)
 {
-       ht->being_destroyed = true;
+       const struct bucket_table *tbl;
+       unsigned int i;
 
        cancel_work_sync(&ht->run_work);
 
        mutex_lock(&ht->mutex);
-       bucket_table_free(rht_dereference(ht->tbl, ht));
+       tbl = rht_dereference(ht->tbl, ht);
+       if (free_fn) {
+               for (i = 0; i < tbl->size; i++) {
+                       struct rhash_head *pos, *next;
+
+                       for (pos = rht_dereference(tbl->buckets[i], ht),
+                            next = !rht_is_a_nulls(pos) ?
+                                       rht_dereference(pos->next, ht) : NULL;
+                            !rht_is_a_nulls(pos);
+                            pos = next,
+                            next = !rht_is_a_nulls(pos) ?
+                                       rht_dereference(pos->next, ht) : NULL)
+                               free_fn(rht_obj(ht, pos), arg);
+               }
+       }
+
+       bucket_table_free(tbl);
        mutex_unlock(&ht->mutex);
 }
+EXPORT_SYMBOL_GPL(rhashtable_free_and_destroy);
+
+void rhashtable_destroy(struct rhashtable *ht)
+{
+       return rhashtable_free_and_destroy(ht, NULL, NULL);
+}
 EXPORT_SYMBOL_GPL(rhashtable_destroy);
index 1df191e04a24c800f59759f4445d799822f5d9e9..5a56dfd7b99de98ca47f75ab528dc8e3196efd3e 100644 (file)
@@ -198,3 +198,4 @@ void sha_init(__u32 *buf)
        buf[3] = 0x10325476;
        buf[4] = 0xc3d2e1f0;
 }
+EXPORT_SYMBOL(sha_init);
index 67c7593d1dd69c91f646e21e47b589c40c808837..b2957540d3c722d5c7b3d9f94e5b5a1d6c9d7975 100644 (file)
@@ -38,6 +38,15 @@ struct test_obj {
        struct rhash_head       node;
 };
 
+static const struct rhashtable_params test_rht_params = {
+       .nelem_hint = TEST_HT_SIZE,
+       .head_offset = offsetof(struct test_obj, node),
+       .key_offset = offsetof(struct test_obj, value),
+       .key_len = sizeof(int),
+       .hashfn = jhash,
+       .nulls_base = (3U << RHT_BASE_SHIFT),
+};
+
 static int __init test_rht_lookup(struct rhashtable *ht)
 {
        unsigned int i;
@@ -47,7 +56,7 @@ static int __init test_rht_lookup(struct rhashtable *ht)
                bool expected = !(i % 2);
                u32 key = i;
 
-               obj = rhashtable_lookup(ht, &key);
+               obj = rhashtable_lookup_fast(ht, &key, test_rht_params);
 
                if (expected && !obj) {
                        pr_warn("Test failed: Could not find key %u\n", key);
@@ -80,7 +89,7 @@ static void test_bucket_stats(struct rhashtable *ht, bool quiet)
                rcu_cnt = cnt = 0;
 
                if (!quiet)
-                       pr_info(" [%#4x/%zu]", i, tbl->size);
+                       pr_info(" [%#4x/%u]", i, tbl->size);
 
                rht_for_each_entry_rcu(obj, pos, tbl, i, node) {
                        cnt++;
@@ -133,7 +142,11 @@ static int __init test_rhashtable(struct rhashtable *ht)
                obj->ptr = TEST_PTR;
                obj->value = i * 2;
 
-               rhashtable_insert(ht, &obj->node);
+               err = rhashtable_insert_fast(ht, &obj->node, test_rht_params);
+               if (err) {
+                       kfree(obj);
+                       goto error;
+               }
        }
 
        rcu_read_lock();
@@ -141,30 +154,6 @@ static int __init test_rhashtable(struct rhashtable *ht)
        test_rht_lookup(ht);
        rcu_read_unlock();
 
-       for (i = 0; i < TEST_NEXPANDS; i++) {
-               pr_info("  Table expansion iteration %u...\n", i);
-               mutex_lock(&ht->mutex);
-               rhashtable_expand(ht);
-               mutex_unlock(&ht->mutex);
-
-               rcu_read_lock();
-               pr_info("  Verifying lookups...\n");
-               test_rht_lookup(ht);
-               rcu_read_unlock();
-       }
-
-       for (i = 0; i < TEST_NEXPANDS; i++) {
-               pr_info("  Table shrinkage iteration %u...\n", i);
-               mutex_lock(&ht->mutex);
-               rhashtable_shrink(ht);
-               mutex_unlock(&ht->mutex);
-
-               rcu_read_lock();
-               pr_info("  Verifying lookups...\n");
-               test_rht_lookup(ht);
-               rcu_read_unlock();
-       }
-
        rcu_read_lock();
        test_bucket_stats(ht, true);
        rcu_read_unlock();
@@ -173,10 +162,10 @@ static int __init test_rhashtable(struct rhashtable *ht)
        for (i = 0; i < TEST_ENTRIES; i++) {
                u32 key = i * 2;
 
-               obj = rhashtable_lookup(ht, &key);
+               obj = rhashtable_lookup_fast(ht, &key, test_rht_params);
                BUG_ON(!obj);
 
-               rhashtable_remove(ht, &obj->node);
+               rhashtable_remove_fast(ht, &obj->node, test_rht_params);
                kfree(obj);
        }
 
@@ -195,20 +184,11 @@ static struct rhashtable ht;
 
 static int __init test_rht_init(void)
 {
-       struct rhashtable_params params = {
-               .nelem_hint = TEST_HT_SIZE,
-               .head_offset = offsetof(struct test_obj, node),
-               .key_offset = offsetof(struct test_obj, value),
-               .key_len = sizeof(int),
-               .hashfn = jhash,
-               .max_shift = 1, /* we expand/shrink manually here */
-               .nulls_base = (3U << RHT_BASE_SHIFT),
-       };
        int err;
 
        pr_info("Running resizable hashtable tests...\n");
 
-       err = rhashtable_init(&ht, &params);
+       err = rhashtable_init(&ht, &test_rht_params);
        if (err < 0) {
                pr_warn("Test failed: Unable to initialize hashtable: %d\n",
                        err);
index ad7242043bdb8b74872e536b61d01ca05a1de6b3..dfc573c6ec25d3530570752cc1062e42fd345752 100644 (file)
@@ -13,7 +13,6 @@
 #include <linux/compiler.h>
 #include <linux/fs.h>
 #include <linux/uaccess.h>
-#include <linux/aio.h>
 #include <linux/capability.h>
 #include <linux/kernel_stat.h>
 #include <linux/gfp.h>
@@ -1708,7 +1707,7 @@ generic_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
                                        pos + count - 1);
                if (!retval) {
                        struct iov_iter data = *iter;
-                       retval = mapping->a_ops->direct_IO(READ, iocb, &data, pos);
+                       retval = mapping->a_ops->direct_IO(iocb, &data, pos);
                }
 
                if (retval > 0) {
@@ -2261,7 +2260,7 @@ EXPORT_SYMBOL(read_cache_page_gfp);
  * Returns appropriate error code that caller should return or
  * zero in case that write should be allowed.
  */
-inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, int isblk)
+inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count)
 {
        struct inode *inode = file->f_mapping->host;
        unsigned long limit = rlimit(RLIMIT_FSIZE);
@@ -2269,20 +2268,17 @@ inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, i
         if (unlikely(*pos < 0))
                 return -EINVAL;
 
-       if (!isblk) {
-               /* FIXME: this is for backwards compatibility with 2.4 */
-               if (file->f_flags & O_APPEND)
-                        *pos = i_size_read(inode);
+       /* FIXME: this is for backwards compatibility with 2.4 */
+       if (file->f_flags & O_APPEND)
+               *pos = i_size_read(inode);
 
-               if (limit != RLIM_INFINITY) {
-                       if (*pos >= limit) {
-                               send_sig(SIGXFSZ, current, 0);
-                               return -EFBIG;
-                       }
-                       if (*count > limit - (typeof(limit))*pos) {
-                               *count = limit - (typeof(limit))*pos;
-                       }
+       if (limit != RLIM_INFINITY) {
+               if (*pos >= limit) {
+                       send_sig(SIGXFSZ, current, 0);
+                       return -EFBIG;
                }
+               if (*count > limit - (typeof(limit))*pos)
+                       *count = limit - (typeof(limit))*pos;
        }
 
        /*
@@ -2290,12 +2286,10 @@ inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, i
         */
        if (unlikely(*pos + *count > MAX_NON_LFS &&
                                !(file->f_flags & O_LARGEFILE))) {
-               if (*pos >= MAX_NON_LFS) {
+               if (*pos >= MAX_NON_LFS)
                        return -EFBIG;
-               }
-               if (*count > MAX_NON_LFS - (unsigned long)*pos) {
+               if (*count > MAX_NON_LFS - (unsigned long)*pos)
                        *count = MAX_NON_LFS - (unsigned long)*pos;
-               }
        }
 
        /*
@@ -2305,33 +2299,15 @@ inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, i
         * exceeded without writing data we send a signal and return EFBIG.
         * Linus frestrict idea will clean these up nicely..
         */
-       if (likely(!isblk)) {
-               if (unlikely(*pos >= inode->i_sb->s_maxbytes)) {
-                       if (*count || *pos > inode->i_sb->s_maxbytes) {
-                               return -EFBIG;
-                       }
-                       /* zero-length writes at ->s_maxbytes are OK */
-               }
-
-               if (unlikely(*pos + *count > inode->i_sb->s_maxbytes))
-                       *count = inode->i_sb->s_maxbytes - *pos;
-       } else {
-#ifdef CONFIG_BLOCK
-               loff_t isize;
-               if (bdev_read_only(I_BDEV(inode)))
-                       return -EPERM;
-               isize = i_size_read(inode);
-               if (*pos >= isize) {
-                       if (*count || *pos > isize)
-                               return -ENOSPC;
+       if (unlikely(*pos >= inode->i_sb->s_maxbytes)) {
+               if (*count || *pos > inode->i_sb->s_maxbytes) {
+                       return -EFBIG;
                }
-
-               if (*pos + *count > isize)
-                       *count = isize - *pos;
-#else
-               return -EPERM;
-#endif
+               /* zero-length writes at ->s_maxbytes are OK */
        }
+
+       if (unlikely(*pos + *count > inode->i_sb->s_maxbytes))
+               *count = inode->i_sb->s_maxbytes - *pos;
        return 0;
 }
 EXPORT_SYMBOL(generic_write_checks);
@@ -2396,7 +2372,7 @@ generic_file_direct_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos)
        }
 
        data = *from;
-       written = mapping->a_ops->direct_IO(WRITE, iocb, &data, pos);
+       written = mapping->a_ops->direct_IO(iocb, &data, pos);
 
        /*
         * Finally, try again to invalidate clean pages which might have been
@@ -2558,23 +2534,12 @@ ssize_t __generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
        struct file *file = iocb->ki_filp;
        struct address_space * mapping = file->f_mapping;
        struct inode    *inode = mapping->host;
-       loff_t          pos = iocb->ki_pos;
        ssize_t         written = 0;
        ssize_t         err;
        ssize_t         status;
-       size_t          count = iov_iter_count(from);
 
        /* We can write back this queue in page reclaim */
        current->backing_dev_info = inode_to_bdi(inode);
-       err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
-       if (err)
-               goto out;
-
-       if (count == 0)
-               goto out;
-
-       iov_iter_truncate(from, count);
-
        err = file_remove_suid(file);
        if (err)
                goto out;
@@ -2584,9 +2549,9 @@ ssize_t __generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
                goto out;
 
        if (io_is_direct(file)) {
-               loff_t endbyte;
+               loff_t pos, endbyte;
 
-               written = generic_file_direct_write(iocb, from, pos);
+               written = generic_file_direct_write(iocb, from, iocb->ki_pos);
                /*
                 * If the write stopped short of completing, fall back to
                 * buffered writes.  Some filesystems do this for writes to
@@ -2594,13 +2559,10 @@ ssize_t __generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
                 * not succeed (even if it did, DAX does not handle dirty
                 * page-cache pages correctly).
                 */
-               if (written < 0 || written == count || IS_DAX(inode))
+               if (written < 0 || !iov_iter_count(from) || IS_DAX(inode))
                        goto out;
 
-               pos += written;
-               count -= written;
-
-               status = generic_perform_write(file, from, pos);
+               status = generic_perform_write(file, from, pos = iocb->ki_pos);
                /*
                 * If generic_perform_write() returned a synchronous error
                 * then we want to return the number of bytes which were
@@ -2612,15 +2574,15 @@ ssize_t __generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
                        err = status;
                        goto out;
                }
-               iocb->ki_pos = pos + status;
                /*
                 * We need to ensure that the page cache pages are written to
                 * disk and invalidated to preserve the expected O_DIRECT
                 * semantics.
                 */
                endbyte = pos + status - 1;
-               err = filemap_write_and_wait_range(file->f_mapping, pos, endbyte);
+               err = filemap_write_and_wait_range(mapping, pos, endbyte);
                if (err == 0) {
+                       iocb->ki_pos = endbyte + 1;
                        written += status;
                        invalidate_mapping_pages(mapping,
                                                 pos >> PAGE_CACHE_SHIFT,
@@ -2632,9 +2594,9 @@ ssize_t __generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
                         */
                }
        } else {
-               written = generic_perform_write(file, from, pos);
-               if (likely(written >= 0))
-                       iocb->ki_pos = pos + written;
+               written = generic_perform_write(file, from, iocb->ki_pos);
+               if (likely(written > 0))
+                       iocb->ki_pos += written;
        }
 out:
        current->backing_dev_info = NULL;
@@ -2656,9 +2618,14 @@ ssize_t generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
        struct file *file = iocb->ki_filp;
        struct inode *inode = file->f_mapping->host;
        ssize_t ret;
+       size_t count = iov_iter_count(from);
 
        mutex_lock(&inode->i_mutex);
-       ret = __generic_file_write_iter(iocb, from);
+       ret = generic_write_checks(file, &iocb->ki_pos, &count);
+       if (!ret && count) {
+               iov_iter_truncate(from, count);
+               ret = __generic_file_write_iter(iocb, from);
+       }
        mutex_unlock(&inode->i_mutex);
 
        if (ret > 0) {
index 3fba2dc97c44bece0d6fb5754afdafb1dba6353e..e544508e2a4bc3e3dfa6190abdd3faf0616c95f2 100644 (file)
@@ -1016,7 +1016,7 @@ static int validate_mmap_request(struct file *file,
                 * device */
                if (!file->f_op->get_unmapped_area)
                        capabilities &= ~NOMMU_MAP_DIRECT;
-               if (!file->f_op->read)
+               if (!(file->f_mode & FMODE_CAN_READ))
                        capabilities &= ~NOMMU_MAP_COPY;
 
                /* The file shall have been opened with read permission. */
@@ -1240,7 +1240,7 @@ static int do_mmap_private(struct vm_area_struct *vma,
 
                old_fs = get_fs();
                set_fs(KERNEL_DS);
-               ret = vma->vm_file->f_op->read(vma->vm_file, base, len, &fpos);
+               ret = __vfs_read(vma->vm_file, base, len, &fpos);
                set_fs(old_fs);
 
                if (ret < 0)
index e6045804c8d876db5c480d6c64e3c5f4e7bb7a84..6424869e275e2aa2d09debfa791ea08302ac68be 100644 (file)
@@ -20,8 +20,8 @@
 #include <linux/buffer_head.h>
 #include <linux/writeback.h>
 #include <linux/frontswap.h>
-#include <linux/aio.h>
 #include <linux/blkdev.h>
+#include <linux/uio.h>
 #include <asm/pgtable.h>
 
 static struct bio *get_swap_bio(gfp_t gfp_flags,
@@ -274,13 +274,10 @@ int __swap_writepage(struct page *page, struct writeback_control *wbc,
                iov_iter_bvec(&from, ITER_BVEC | WRITE, &bv, 1, PAGE_SIZE);
                init_sync_kiocb(&kiocb, swap_file);
                kiocb.ki_pos = page_file_offset(page);
-               kiocb.ki_nbytes = PAGE_SIZE;
 
                set_page_writeback(page);
                unlock_page(page);
-               ret = mapping->a_ops->direct_IO(ITER_BVEC | WRITE,
-                                               &kiocb, &from,
-                                               kiocb.ki_pos);
+               ret = mapping->a_ops->direct_IO(&kiocb, &from, kiocb.ki_pos);
                if (ret == PAGE_SIZE) {
                        count_vm_event(PSWPOUT);
                        ret = 0;
index b1597690530ce84644d8e405dab02740298706cd..e88d071648c2dece38b25d3fc8e57091d1fcd1d1 100644 (file)
@@ -257,22 +257,18 @@ static ssize_t process_vm_rw(pid_t pid,
        struct iovec *iov_r = iovstack_r;
        struct iov_iter iter;
        ssize_t rc;
+       int dir = vm_write ? WRITE : READ;
 
        if (flags != 0)
                return -EINVAL;
 
        /* Check iovecs */
-       if (vm_write)
-               rc = rw_copy_check_uvector(WRITE, lvec, liovcnt, UIO_FASTIOV,
-                                          iovstack_l, &iov_l);
-       else
-               rc = rw_copy_check_uvector(READ, lvec, liovcnt, UIO_FASTIOV,
-                                          iovstack_l, &iov_l);
-       if (rc <= 0)
+       rc = import_iovec(dir, lvec, liovcnt, UIO_FASTIOV, &iov_l, &iter);
+       if (rc < 0)
+               return rc;
+       if (!iov_iter_count(&iter))
                goto free_iovecs;
 
-       iov_iter_init(&iter, vm_write ? WRITE : READ, iov_l, liovcnt, rc);
-
        rc = rw_copy_check_uvector(CHECK_IOVEC_ONLY, rvec, riovcnt, UIO_FASTIOV,
                                   iovstack_r, &iov_r);
        if (rc <= 0)
@@ -283,8 +279,7 @@ static ssize_t process_vm_rw(pid_t pid,
 free_iovecs:
        if (iov_r != iovstack_r)
                kfree(iov_r);
-       if (iov_l != iovstack_l)
-               kfree(iov_l);
+       kfree(iov_l);
 
        return rc;
 }
@@ -320,21 +315,16 @@ compat_process_vm_rw(compat_pid_t pid,
        struct iovec *iov_r = iovstack_r;
        struct iov_iter iter;
        ssize_t rc = -EFAULT;
+       int dir = vm_write ? WRITE : READ;
 
        if (flags != 0)
                return -EINVAL;
 
-       if (vm_write)
-               rc = compat_rw_copy_check_uvector(WRITE, lvec, liovcnt,
-                                                 UIO_FASTIOV, iovstack_l,
-                                                 &iov_l);
-       else
-               rc = compat_rw_copy_check_uvector(READ, lvec, liovcnt,
-                                                 UIO_FASTIOV, iovstack_l,
-                                                 &iov_l);
-       if (rc <= 0)
+       rc = compat_import_iovec(dir, lvec, liovcnt, UIO_FASTIOV, &iov_l, &iter);
+       if (rc < 0)
+               return rc;
+       if (!iov_iter_count(&iter))
                goto free_iovecs;
-       iov_iter_init(&iter, vm_write ? WRITE : READ, iov_l, liovcnt, rc);
        rc = compat_rw_copy_check_uvector(CHECK_IOVEC_ONLY, rvec, riovcnt,
                                          UIO_FASTIOV, iovstack_r,
                                          &iov_r);
@@ -346,8 +336,7 @@ compat_process_vm_rw(compat_pid_t pid,
 free_iovecs:
        if (iov_r != iovstack_r)
                kfree(iov_r);
-       if (iov_l != iovstack_l)
-               kfree(iov_l);
+       kfree(iov_l);
        return rc;
 }
 
index cf2d0ca010bc52efd5ea86c7f6ba760a5c3ef286..1ea2400b52450fccdc526d569d1447657379609d 100644 (file)
@@ -31,7 +31,7 @@
 #include <linux/mm.h>
 #include <linux/export.h>
 #include <linux/swap.h>
-#include <linux/aio.h>
+#include <linux/uio.h>
 
 static struct vfsmount *shm_mnt;
 
@@ -3118,8 +3118,6 @@ static const struct file_operations shmem_file_operations = {
        .mmap           = shmem_mmap,
 #ifdef CONFIG_TMPFS
        .llseek         = shmem_file_llseek,
-       .read           = new_sync_read,
-       .write          = new_sync_write,
        .read_iter      = shmem_file_read_iter,
        .write_iter     = generic_file_write_iter,
        .fsync          = noop_fsync,
index e4a02ef551020ebb222a34ae1188430613ab2710..7fa0f382e7d1289725a76b98d3eecf70ccc6a1b6 100644 (file)
@@ -1,6 +1,61 @@
-config 6LOWPAN
+menuconfig 6LOWPAN
        tristate "6LoWPAN Support"
        depends on IPV6
        ---help---
          This enables IPv6 over Low power Wireless Personal Area Network -
          "6LoWPAN" which is supported by IEEE 802.15.4 or Bluetooth stacks.
+
+menuconfig 6LOWPAN_NHC
+       tristate "Next Header Compression Support"
+       depends on 6LOWPAN
+       default y
+       ---help---
+         Support for next header compression.
+
+if 6LOWPAN_NHC
+
+config 6LOWPAN_NHC_DEST
+       tristate "Destination Options Header Support"
+       default y
+       ---help---
+         6LoWPAN IPv6 Destination Options Header compression according to
+         RFC6282.
+
+config 6LOWPAN_NHC_FRAGMENT
+       tristate "Fragment Header Support"
+       default y
+       ---help---
+         6LoWPAN IPv6 Fragment Header compression according to RFC6282.
+
+config 6LOWPAN_NHC_HOP
+       tristate "Hop-by-Hop Options Header Support"
+       default y
+       ---help---
+         6LoWPAN IPv6 Hop-by-Hop Options Header compression according to
+         RFC6282.
+
+config 6LOWPAN_NHC_IPV6
+       tristate "IPv6 Header Support"
+       default y
+       ---help---
+         6LoWPAN IPv6 Header compression according to RFC6282.
+
+config 6LOWPAN_NHC_MOBILITY
+       tristate "Mobility Header Support"
+       default y
+       ---help---
+         6LoWPAN IPv6 Mobility Header compression according to RFC6282.
+
+config 6LOWPAN_NHC_ROUTING
+       tristate "Routing Header Support"
+       default y
+       ---help---
+         6LoWPAN IPv6 Routing Header compression according to RFC6282.
+
+config 6LOWPAN_NHC_UDP
+       tristate "UDP Header Support"
+       default y
+       ---help---
+         6LoWPAN IPv6 UDP Header compression according to RFC6282.
+
+endif
index 415886bb456a3fa4f908d03e502c08fdca9a829c..eb8baa72adc8ab2c84c390a54bcfa4db429d4c0d 100644 (file)
@@ -1,3 +1,12 @@
-obj-$(CONFIG_6LOWPAN) := 6lowpan.o
+obj-$(CONFIG_6LOWPAN) += 6lowpan.o
 
-6lowpan-y := iphc.o
+6lowpan-y := iphc.o nhc.o
+
+#rfc6282 nhcs
+obj-$(CONFIG_6LOWPAN_NHC_DEST) += nhc_dest.o
+obj-$(CONFIG_6LOWPAN_NHC_FRAGMENT) += nhc_fragment.o
+obj-$(CONFIG_6LOWPAN_NHC_HOP) += nhc_hop.o
+obj-$(CONFIG_6LOWPAN_NHC_IPV6) += nhc_ipv6.o
+obj-$(CONFIG_6LOWPAN_NHC_MOBILITY) += nhc_mobility.o
+obj-$(CONFIG_6LOWPAN_NHC_ROUTING) += nhc_routing.o
+obj-$(CONFIG_6LOWPAN_NHC_UDP) += nhc_udp.o
index 32ffec6ef1643427513529f33086e89fb751f991..94a375c04f21cd5a9b525f7dffb4a14657deea89 100644 (file)
@@ -54,6 +54,8 @@
 #include <net/ipv6.h>
 #include <net/af_ieee802154.h>
 
+#include "nhc.h"
+
 /* Uncompress address function for source and
  * destination address(non-multicast).
  *
@@ -224,77 +226,6 @@ static int lowpan_uncompress_multicast_daddr(struct sk_buff *skb,
        return 0;
 }
 
-static int uncompress_udp_header(struct sk_buff *skb, struct udphdr *uh)
-{
-       bool fail;
-       u8 tmp = 0, val = 0;
-
-       fail = lowpan_fetch_skb(skb, &tmp, sizeof(tmp));
-
-       if ((tmp & LOWPAN_NHC_UDP_MASK) == LOWPAN_NHC_UDP_ID) {
-               pr_debug("UDP header uncompression\n");
-               switch (tmp & LOWPAN_NHC_UDP_CS_P_11) {
-               case LOWPAN_NHC_UDP_CS_P_00:
-                       fail |= lowpan_fetch_skb(skb, &uh->source,
-                                                sizeof(uh->source));
-                       fail |= lowpan_fetch_skb(skb, &uh->dest,
-                                                sizeof(uh->dest));
-                       break;
-               case LOWPAN_NHC_UDP_CS_P_01:
-                       fail |= lowpan_fetch_skb(skb, &uh->source,
-                                                sizeof(uh->source));
-                       fail |= lowpan_fetch_skb(skb, &val, sizeof(val));
-                       uh->dest = htons(val + LOWPAN_NHC_UDP_8BIT_PORT);
-                       break;
-               case LOWPAN_NHC_UDP_CS_P_10:
-                       fail |= lowpan_fetch_skb(skb, &val, sizeof(val));
-                       uh->source = htons(val + LOWPAN_NHC_UDP_8BIT_PORT);
-                       fail |= lowpan_fetch_skb(skb, &uh->dest,
-                                                sizeof(uh->dest));
-                       break;
-               case LOWPAN_NHC_UDP_CS_P_11:
-                       fail |= lowpan_fetch_skb(skb, &val, sizeof(val));
-                       uh->source = htons(LOWPAN_NHC_UDP_4BIT_PORT +
-                                          (val >> 4));
-                       uh->dest = htons(LOWPAN_NHC_UDP_4BIT_PORT +
-                                        (val & 0x0f));
-                       break;
-               default:
-                       pr_debug("ERROR: unknown UDP format\n");
-                       goto err;
-               }
-
-               pr_debug("uncompressed UDP ports: src = %d, dst = %d\n",
-                        ntohs(uh->source), ntohs(uh->dest));
-
-               /* checksum */
-               if (tmp & LOWPAN_NHC_UDP_CS_C) {
-                       pr_debug_ratelimited("checksum elided currently not supported\n");
-                       goto err;
-               } else {
-                       fail |= lowpan_fetch_skb(skb, &uh->check,
-                                                sizeof(uh->check));
-               }
-
-               /* UDP length needs to be infered from the lower layers
-                * here, we obtain the hint from the remaining size of the
-                * frame
-                */
-               uh->len = htons(skb->len + sizeof(struct udphdr));
-               pr_debug("uncompressed UDP length: src = %d", ntohs(uh->len));
-       } else {
-               pr_debug("ERROR: unsupported NH format\n");
-               goto err;
-       }
-
-       if (fail)
-               goto err;
-
-       return 0;
-err:
-       return -EINVAL;
-}
-
 /* TTL uncompression values */
 static const u8 lowpan_ttl_values[] = { 0, 1, 64, 255 };
 
@@ -425,29 +356,11 @@ lowpan_header_decompress(struct sk_buff *skb, struct net_device *dev,
                        return -EINVAL;
        }
 
-       /* UDP data uncompression */
+       /* Next header data uncompression */
        if (iphc0 & LOWPAN_IPHC_NH_C) {
-               struct udphdr uh;
-               const int needed = sizeof(struct udphdr) + sizeof(hdr);
-
-               if (uncompress_udp_header(skb, &uh))
-                       return -EINVAL;
-
-               /* replace the compressed UDP head by the uncompressed UDP
-                * header
-                */
-               err = skb_cow(skb, needed);
-               if (unlikely(err))
+               err = lowpan_nhc_do_uncompression(skb, dev, &hdr);
+               if (err < 0)
                        return err;
-
-               skb_push(skb, sizeof(struct udphdr));
-               skb_reset_transport_header(skb);
-               skb_copy_to_linear_data(skb, &uh, sizeof(struct udphdr));
-
-               raw_dump_table(__func__, "raw UDP header dump",
-                              (u8 *)&uh, sizeof(uh));
-
-               hdr.nexthdr = UIP_PROTO_UDP;
        } else {
                err = skb_cow(skb, sizeof(hdr));
                if (unlikely(err))
@@ -500,71 +413,6 @@ static u8 lowpan_compress_addr_64(u8 **hc_ptr, u8 shift,
        return rol8(val, shift);
 }
 
-static void compress_udp_header(u8 **hc_ptr, struct sk_buff *skb)
-{
-       struct udphdr *uh;
-       u8 tmp;
-
-       /* In the case of RAW sockets the transport header is not set by
-        * the ip6 stack so we must set it ourselves
-        */
-       if (skb->transport_header == skb->network_header)
-               skb_set_transport_header(skb, sizeof(struct ipv6hdr));
-
-       uh = udp_hdr(skb);
-
-       if (((ntohs(uh->source) & LOWPAN_NHC_UDP_4BIT_MASK) ==
-            LOWPAN_NHC_UDP_4BIT_PORT) &&
-           ((ntohs(uh->dest) & LOWPAN_NHC_UDP_4BIT_MASK) ==
-            LOWPAN_NHC_UDP_4BIT_PORT)) {
-               pr_debug("UDP header: both ports compression to 4 bits\n");
-               /* compression value */
-               tmp = LOWPAN_NHC_UDP_CS_P_11;
-               lowpan_push_hc_data(hc_ptr, &tmp, sizeof(tmp));
-               /* source and destination port */
-               tmp = ntohs(uh->dest) - LOWPAN_NHC_UDP_4BIT_PORT +
-                     ((ntohs(uh->source) - LOWPAN_NHC_UDP_4BIT_PORT) << 4);
-               lowpan_push_hc_data(hc_ptr, &tmp, sizeof(tmp));
-       } else if ((ntohs(uh->dest) & LOWPAN_NHC_UDP_8BIT_MASK) ==
-                       LOWPAN_NHC_UDP_8BIT_PORT) {
-               pr_debug("UDP header: remove 8 bits of dest\n");
-               /* compression value */
-               tmp = LOWPAN_NHC_UDP_CS_P_01;
-               lowpan_push_hc_data(hc_ptr, &tmp, sizeof(tmp));
-               /* source port */
-               lowpan_push_hc_data(hc_ptr, &uh->source, sizeof(uh->source));
-               /* destination port */
-               tmp = ntohs(uh->dest) - LOWPAN_NHC_UDP_8BIT_PORT;
-               lowpan_push_hc_data(hc_ptr, &tmp, sizeof(tmp));
-       } else if ((ntohs(uh->source) & LOWPAN_NHC_UDP_8BIT_MASK) ==
-                       LOWPAN_NHC_UDP_8BIT_PORT) {
-               pr_debug("UDP header: remove 8 bits of source\n");
-               /* compression value */
-               tmp = LOWPAN_NHC_UDP_CS_P_10;
-               lowpan_push_hc_data(hc_ptr, &tmp, sizeof(tmp));
-               /* source port */
-               tmp = ntohs(uh->source) - LOWPAN_NHC_UDP_8BIT_PORT;
-               lowpan_push_hc_data(hc_ptr, &tmp, sizeof(tmp));
-               /* destination port */
-               lowpan_push_hc_data(hc_ptr, &uh->dest, sizeof(uh->dest));
-       } else {
-               pr_debug("UDP header: can't compress\n");
-               /* compression value */
-               tmp = LOWPAN_NHC_UDP_CS_P_00;
-               lowpan_push_hc_data(hc_ptr, &tmp, sizeof(tmp));
-               /* source port */
-               lowpan_push_hc_data(hc_ptr, &uh->source, sizeof(uh->source));
-               /* destination port */
-               lowpan_push_hc_data(hc_ptr, &uh->dest, sizeof(uh->dest));
-       }
-
-       /* checksum is always inline */
-       lowpan_push_hc_data(hc_ptr, &uh->check, sizeof(uh->check));
-
-       /* skip the UDP header */
-       skb_pull(skb, sizeof(struct udphdr));
-}
-
 int lowpan_header_compress(struct sk_buff *skb, struct net_device *dev,
                           unsigned short type, const void *_daddr,
                           const void *_saddr, unsigned int len)
@@ -572,7 +420,7 @@ int lowpan_header_compress(struct sk_buff *skb, struct net_device *dev,
        u8 tmp, iphc0, iphc1, *hc_ptr;
        struct ipv6hdr *hdr;
        u8 head[100] = {};
-       int addr_type;
+       int ret, addr_type;
 
        if (type != ETH_P_IPV6)
                return -EINVAL;
@@ -649,13 +497,12 @@ int lowpan_header_compress(struct sk_buff *skb, struct net_device *dev,
 
        /* NOTE: payload length is always compressed */
 
-       /* Next Header is compress if UDP */
-       if (hdr->nexthdr == UIP_PROTO_UDP)
-               iphc0 |= LOWPAN_IPHC_NH_C;
-
-       if ((iphc0 & LOWPAN_IPHC_NH_C) == 0)
-               lowpan_push_hc_data(&hc_ptr, &hdr->nexthdr,
-                                   sizeof(hdr->nexthdr));
+       /* Check if we provide the nhc format for nexthdr and compression
+        * functionality. If not nexthdr is handled inline and not compressed.
+        */
+       ret = lowpan_nhc_check_compression(skb, hdr, &hc_ptr, &iphc0);
+       if (ret < 0)
+               return ret;
 
        /* Hop limit
         * if 1:   compress, encoding is 01
@@ -741,9 +588,12 @@ int lowpan_header_compress(struct sk_buff *skb, struct net_device *dev,
                }
        }
 
-       /* UDP header compression */
-       if (hdr->nexthdr == UIP_PROTO_UDP)
-               compress_udp_header(&hc_ptr, skb);
+       /* next header compression */
+       if (iphc0 & LOWPAN_IPHC_NH_C) {
+               ret = lowpan_nhc_do_compression(skb, hdr, &hc_ptr);
+               if (ret < 0)
+                       return ret;
+       }
 
        head[0] = iphc0;
        head[1] = iphc1;
@@ -761,4 +611,18 @@ int lowpan_header_compress(struct sk_buff *skb, struct net_device *dev,
 }
 EXPORT_SYMBOL_GPL(lowpan_header_compress);
 
+static int __init lowpan_module_init(void)
+{
+       request_module_nowait("nhc_dest");
+       request_module_nowait("nhc_fragment");
+       request_module_nowait("nhc_hop");
+       request_module_nowait("nhc_ipv6");
+       request_module_nowait("nhc_mobility");
+       request_module_nowait("nhc_routing");
+       request_module_nowait("nhc_udp");
+
+       return 0;
+}
+module_init(lowpan_module_init);
+
 MODULE_LICENSE("GPL");
diff --git a/net/6lowpan/nhc.c b/net/6lowpan/nhc.c
new file mode 100644 (file)
index 0000000..fd20fc5
--- /dev/null
@@ -0,0 +1,241 @@
+/*
+ *     6LoWPAN next header compression
+ *
+ *
+ *     Authors:
+ *     Alexander Aring         <aar@pengutronix.de>
+ *
+ *     This program is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License
+ *     as published by the Free Software Foundation; either version
+ *     2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/netdevice.h>
+
+#include <net/ipv6.h>
+
+#include "nhc.h"
+
+static struct rb_root rb_root = RB_ROOT;
+static struct lowpan_nhc *lowpan_nexthdr_nhcs[NEXTHDR_MAX];
+static DEFINE_SPINLOCK(lowpan_nhc_lock);
+
+static int lowpan_nhc_insert(struct lowpan_nhc *nhc)
+{
+       struct rb_node **new = &rb_root.rb_node, *parent = NULL;
+
+       /* Figure out where to put new node */
+       while (*new) {
+               struct lowpan_nhc *this = container_of(*new, struct lowpan_nhc,
+                                                      node);
+               int result, len_dif, len;
+
+               len_dif = nhc->idlen - this->idlen;
+
+               if (nhc->idlen < this->idlen)
+                       len = nhc->idlen;
+               else
+                       len = this->idlen;
+
+               result = memcmp(nhc->id, this->id, len);
+               if (!result)
+                       result = len_dif;
+
+               parent = *new;
+               if (result < 0)
+                       new = &((*new)->rb_left);
+               else if (result > 0)
+                       new = &((*new)->rb_right);
+               else
+                       return -EEXIST;
+       }
+
+       /* Add new node and rebalance tree. */
+       rb_link_node(&nhc->node, parent, new);
+       rb_insert_color(&nhc->node, &rb_root);
+
+       return 0;
+}
+
+static void lowpan_nhc_remove(struct lowpan_nhc *nhc)
+{
+       rb_erase(&nhc->node, &rb_root);
+}
+
+static struct lowpan_nhc *lowpan_nhc_by_nhcid(const struct sk_buff *skb)
+{
+       struct rb_node *node = rb_root.rb_node;
+       const u8 *nhcid_skb_ptr = skb->data;
+
+       while (node) {
+               struct lowpan_nhc *nhc = container_of(node, struct lowpan_nhc,
+                                                     node);
+               u8 nhcid_skb_ptr_masked[LOWPAN_NHC_MAX_ID_LEN];
+               int result, i;
+
+               if (nhcid_skb_ptr + nhc->idlen > skb->data + skb->len)
+                       return NULL;
+
+               /* copy and mask afterwards the nhid value from skb */
+               memcpy(nhcid_skb_ptr_masked, nhcid_skb_ptr, nhc->idlen);
+               for (i = 0; i < nhc->idlen; i++)
+                       nhcid_skb_ptr_masked[i] &= nhc->idmask[i];
+
+               result = memcmp(nhcid_skb_ptr_masked, nhc->id, nhc->idlen);
+               if (result < 0)
+                       node = node->rb_left;
+               else if (result > 0)
+                       node = node->rb_right;
+               else
+                       return nhc;
+       }
+
+       return NULL;
+}
+
+int lowpan_nhc_check_compression(struct sk_buff *skb,
+                                const struct ipv6hdr *hdr, u8 **hc_ptr,
+                                u8 *iphc0)
+{
+       struct lowpan_nhc *nhc;
+
+       spin_lock_bh(&lowpan_nhc_lock);
+
+       nhc = lowpan_nexthdr_nhcs[hdr->nexthdr];
+       if (nhc && nhc->compress)
+               *iphc0 |= LOWPAN_IPHC_NH_C;
+       else
+               lowpan_push_hc_data(hc_ptr, &hdr->nexthdr,
+                                   sizeof(hdr->nexthdr));
+
+       spin_unlock_bh(&lowpan_nhc_lock);
+
+       return 0;
+}
+
+int lowpan_nhc_do_compression(struct sk_buff *skb, const struct ipv6hdr *hdr,
+                             u8 **hc_ptr)
+{
+       int ret;
+       struct lowpan_nhc *nhc;
+
+       spin_lock_bh(&lowpan_nhc_lock);
+
+       nhc = lowpan_nexthdr_nhcs[hdr->nexthdr];
+       /* check if the nhc module was removed in unlocked part.
+        * TODO: this is a workaround we should prevent unloading
+        * of nhc modules while unlocked part, this will always drop
+        * the lowpan packet but it's very unlikely.
+        *
+        * Solution isn't easy because we need to decide at
+        * lowpan_nhc_check_compression if we do a compression or not.
+        * Because the inline data which is added to skb, we can't move this
+        * handling.
+        */
+       if (unlikely(!nhc || !nhc->compress)) {
+               ret = -EINVAL;
+               goto out;
+       }
+
+       /* In the case of RAW sockets the transport header is not set by
+        * the ip6 stack so we must set it ourselves
+        */
+       if (skb->transport_header == skb->network_header)
+               skb_set_transport_header(skb, sizeof(struct ipv6hdr));
+
+       ret = nhc->compress(skb, hc_ptr);
+       if (ret < 0)
+               goto out;
+
+       /* skip the transport header */
+       skb_pull(skb, nhc->nexthdrlen);
+
+out:
+       spin_unlock_bh(&lowpan_nhc_lock);
+
+       return ret;
+}
+
+int lowpan_nhc_do_uncompression(struct sk_buff *skb, struct net_device *dev,
+                               struct ipv6hdr *hdr)
+{
+       struct lowpan_nhc *nhc;
+       int ret;
+
+       spin_lock_bh(&lowpan_nhc_lock);
+
+       nhc = lowpan_nhc_by_nhcid(skb);
+       if (nhc) {
+               if (nhc->uncompress) {
+                       ret = nhc->uncompress(skb, sizeof(struct ipv6hdr) +
+                                             nhc->nexthdrlen);
+                       if (ret < 0) {
+                               spin_unlock_bh(&lowpan_nhc_lock);
+                               return ret;
+                       }
+               } else {
+                       spin_unlock_bh(&lowpan_nhc_lock);
+                       netdev_warn(dev, "received nhc id for %s which is not implemented.\n",
+                                   nhc->name);
+                       return -ENOTSUPP;
+               }
+       } else {
+               spin_unlock_bh(&lowpan_nhc_lock);
+               netdev_warn(dev, "received unknown nhc id which was not found.\n");
+               return -ENOENT;
+       }
+
+       hdr->nexthdr = nhc->nexthdr;
+       skb_reset_transport_header(skb);
+       raw_dump_table(__func__, "raw transport header dump",
+                      skb_transport_header(skb), nhc->nexthdrlen);
+
+       spin_unlock_bh(&lowpan_nhc_lock);
+
+       return 0;
+}
+
+int lowpan_nhc_add(struct lowpan_nhc *nhc)
+{
+       int ret;
+
+       if (!nhc->idlen || !nhc->idsetup)
+               return -EINVAL;
+
+       WARN_ONCE(nhc->idlen > LOWPAN_NHC_MAX_ID_LEN,
+                 "LOWPAN_NHC_MAX_ID_LEN should be updated to %zd.\n",
+                 nhc->idlen);
+
+       nhc->idsetup(nhc);
+
+       spin_lock_bh(&lowpan_nhc_lock);
+
+       if (lowpan_nexthdr_nhcs[nhc->nexthdr]) {
+               ret = -EEXIST;
+               goto out;
+       }
+
+       ret = lowpan_nhc_insert(nhc);
+       if (ret < 0)
+               goto out;
+
+       lowpan_nexthdr_nhcs[nhc->nexthdr] = nhc;
+out:
+       spin_unlock_bh(&lowpan_nhc_lock);
+       return ret;
+}
+EXPORT_SYMBOL(lowpan_nhc_add);
+
+void lowpan_nhc_del(struct lowpan_nhc *nhc)
+{
+       spin_lock_bh(&lowpan_nhc_lock);
+
+       lowpan_nhc_remove(nhc);
+       lowpan_nexthdr_nhcs[nhc->nexthdr] = NULL;
+
+       spin_unlock_bh(&lowpan_nhc_lock);
+
+       synchronize_net();
+}
+EXPORT_SYMBOL(lowpan_nhc_del);
diff --git a/net/6lowpan/nhc.h b/net/6lowpan/nhc.h
new file mode 100644 (file)
index 0000000..ed44938
--- /dev/null
@@ -0,0 +1,146 @@
+#ifndef __6LOWPAN_NHC_H
+#define __6LOWPAN_NHC_H
+
+#include <linux/skbuff.h>
+#include <linux/rbtree.h>
+#include <linux/module.h>
+
+#include <net/6lowpan.h>
+#include <net/ipv6.h>
+
+#define LOWPAN_NHC_MAX_ID_LEN  1
+
+/**
+ * LOWPAN_NHC - helper macro to generate nh id fields and lowpan_nhc struct
+ *
+ * @__nhc: variable name of the lowpan_nhc struct.
+ * @_name: const char * of common header compression name.
+ * @_nexthdr: ipv6 nexthdr field for the header compression.
+ * @_nexthdrlen: ipv6 nexthdr len for the reserved space.
+ * @_idsetup: callback to setup id and mask values.
+ * @_idlen: len for the next header id and mask, should be always the same.
+ * @_uncompress: callback for uncompression call.
+ * @_compress: callback for compression call.
+ */
+#define LOWPAN_NHC(__nhc, _name, _nexthdr,     \
+                  _hdrlen, _idsetup, _idlen,   \
+                  _uncompress, _compress)      \
+static u8 __nhc##_val[_idlen];                 \
+static u8 __nhc##_mask[_idlen];                        \
+static struct lowpan_nhc __nhc = {             \
+       .name           = _name,                \
+       .nexthdr        = _nexthdr,             \
+       .nexthdrlen     = _hdrlen,              \
+       .id             = __nhc##_val,          \
+       .idmask         = __nhc##_mask,         \
+       .idlen          = _idlen,               \
+       .idsetup        = _idsetup,             \
+       .uncompress     = _uncompress,          \
+       .compress       = _compress,            \
+}
+
+#define module_lowpan_nhc(__nhc)               \
+static int __init __nhc##_init(void)           \
+{                                              \
+       return lowpan_nhc_add(&(__nhc));        \
+}                                              \
+module_init(__nhc##_init);                     \
+static void __exit __nhc##_exit(void)          \
+{                                              \
+       lowpan_nhc_del(&(__nhc));               \
+}                                              \
+module_exit(__nhc##_exit);
+
+/**
+ * struct lowpan_nhc - hold 6lowpan next hdr compression ifnformation
+ *
+ * @node: holder for the rbtree.
+ * @name: name of the specific next header compression
+ * @nexthdr: next header value of the protocol which should be compressed.
+ * @nexthdrlen: ipv6 nexthdr len for the reserved space.
+ * @id: array for nhc id. Note this need to be in network byteorder.
+ * @mask: array for nhc id mask. Note this need to be in network byteorder.
+ * @len: the length of the next header id and mask.
+ * @setup: callback to setup fill the next header id value and mask.
+ * @compress: callback to do the header compression.
+ * @uncompress: callback to do the header uncompression.
+ */
+struct lowpan_nhc {
+       struct rb_node  node;
+       const char      *name;
+       const u8        nexthdr;
+       const size_t    nexthdrlen;
+       u8              *id;
+       u8              *idmask;
+       const size_t    idlen;
+
+       void            (*idsetup)(struct lowpan_nhc *nhc);
+       int             (*uncompress)(struct sk_buff *skb, size_t needed);
+       int             (*compress)(struct sk_buff *skb, u8 **hc_ptr);
+};
+
+/**
+ * lowpan_nhc_by_nexthdr - return the 6lowpan nhc by ipv6 nexthdr.
+ *
+ * @nexthdr: ipv6 nexthdr value.
+ */
+struct lowpan_nhc *lowpan_nhc_by_nexthdr(u8 nexthdr);
+
+/**
+ * lowpan_nhc_check_compression - checks if we support compression format. If
+ *     we support the nhc by nexthdr field, the 6LoWPAN iphc NHC bit will be
+ *     set. If we don't support nexthdr will be added as inline data to the
+ *     6LoWPAN header.
+ *
+ * @skb: skb of 6LoWPAN header to read nhc and replace header.
+ * @hdr: ipv6hdr to check the nexthdr value
+ * @hc_ptr: pointer for 6LoWPAN header which should increment at the end of
+ *         replaced header.
+ * @iphc0: iphc0 pointer to set the 6LoWPAN NHC bit
+ */
+int lowpan_nhc_check_compression(struct sk_buff *skb,
+                                const struct ipv6hdr *hdr, u8 **hc_ptr,
+                                u8 *iphc0);
+
+/**
+ * lowpan_nhc_do_compression - calling compress callback for nhc
+ *
+ * @skb: skb of 6LoWPAN header to read nhc and replace header.
+ * @hdr: ipv6hdr to set the nexthdr value
+ * @hc_ptr: pointer for 6LoWPAN header which should increment at the end of
+ *         replaced header.
+ */
+int lowpan_nhc_do_compression(struct sk_buff *skb, const struct ipv6hdr *hdr,
+                             u8 **hc_ptr);
+
+/**
+ * lowpan_nhc_do_uncompression - calling uncompress callback for nhc
+ *
+ * @nhc: 6LoWPAN nhc context, get by lowpan_nhc_by_ functions.
+ * @skb: skb of 6LoWPAN header, skb->data should be pointed to nhc id value.
+ * @dev: netdevice for print logging information.
+ * @hdr: ipv6hdr for setting nexthdr value.
+ */
+int lowpan_nhc_do_uncompression(struct sk_buff *skb, struct net_device *dev,
+                               struct ipv6hdr *hdr);
+
+/**
+ * lowpan_nhc_add - register a next header compression to framework
+ *
+ * @nhc: nhc which should be add.
+ */
+int lowpan_nhc_add(struct lowpan_nhc *nhc);
+
+/**
+ * lowpan_nhc_del - delete a next header compression from framework
+ *
+ * @nhc: nhc which should be delete.
+ */
+void lowpan_nhc_del(struct lowpan_nhc *nhc);
+
+/**
+ * lowpan_nhc_init - adding all default nhcs
+ */
+void lowpan_nhc_init(void);
+
+#endif /* __6LOWPAN_NHC_H */
diff --git a/net/6lowpan/nhc_dest.c b/net/6lowpan/nhc_dest.c
new file mode 100644 (file)
index 0000000..0b292c9
--- /dev/null
@@ -0,0 +1,28 @@
+/*
+ *     6LoWPAN IPv6 Destination Options Header compression according to
+ *     RFC6282
+ *
+ *     This program is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License
+ *     as published by the Free Software Foundation; either version
+ *     2 of the License, or (at your option) any later version.
+ */
+
+#include "nhc.h"
+
+#define LOWPAN_NHC_DEST_IDLEN  1
+#define LOWPAN_NHC_DEST_ID_0   0xe6
+#define LOWPAN_NHC_DEST_MASK_0 0xfe
+
+static void dest_nhid_setup(struct lowpan_nhc *nhc)
+{
+       nhc->id[0] = LOWPAN_NHC_DEST_ID_0;
+       nhc->idmask[0] = LOWPAN_NHC_DEST_MASK_0;
+}
+
+LOWPAN_NHC(nhc_dest, "RFC6282 Destination Options", NEXTHDR_DEST, 0,
+          dest_nhid_setup, LOWPAN_NHC_DEST_IDLEN, NULL, NULL);
+
+module_lowpan_nhc(nhc_dest);
+MODULE_DESCRIPTION("6LoWPAN next header RFC6282 Destination Options compression");
+MODULE_LICENSE("GPL");
diff --git a/net/6lowpan/nhc_fragment.c b/net/6lowpan/nhc_fragment.c
new file mode 100644 (file)
index 0000000..473dbc5
--- /dev/null
@@ -0,0 +1,27 @@
+/*
+ *     6LoWPAN IPv6 Fragment Header compression according to RFC6282
+ *
+ *     This program is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License
+ *     as published by the Free Software Foundation; either version
+ *     2 of the License, or (at your option) any later version.
+ */
+
+#include "nhc.h"
+
+#define LOWPAN_NHC_FRAGMENT_IDLEN      1
+#define LOWPAN_NHC_FRAGMENT_ID_0       0xe4
+#define LOWPAN_NHC_FRAGMENT_MASK_0     0xfe
+
+static void fragment_nhid_setup(struct lowpan_nhc *nhc)
+{
+       nhc->id[0] = LOWPAN_NHC_FRAGMENT_ID_0;
+       nhc->idmask[0] = LOWPAN_NHC_FRAGMENT_MASK_0;
+}
+
+LOWPAN_NHC(nhc_fragment, "RFC6282 Fragment", NEXTHDR_FRAGMENT, 0,
+          fragment_nhid_setup, LOWPAN_NHC_FRAGMENT_IDLEN, NULL, NULL);
+
+module_lowpan_nhc(nhc_fragment);
+MODULE_DESCRIPTION("6LoWPAN next header RFC6282 Fragment compression");
+MODULE_LICENSE("GPL");
diff --git a/net/6lowpan/nhc_hop.c b/net/6lowpan/nhc_hop.c
new file mode 100644 (file)
index 0000000..1eb66be
--- /dev/null
@@ -0,0 +1,27 @@
+/*
+ *     6LoWPAN IPv6 Hop-by-Hop Options Header compression according to RFC6282
+ *
+ *     This program is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License
+ *     as published by the Free Software Foundation; either version
+ *     2 of the License, or (at your option) any later version.
+ */
+
+#include "nhc.h"
+
+#define LOWPAN_NHC_HOP_IDLEN   1
+#define LOWPAN_NHC_HOP_ID_0    0xe0
+#define LOWPAN_NHC_HOP_MASK_0  0xfe
+
+static void hop_nhid_setup(struct lowpan_nhc *nhc)
+{
+       nhc->id[0] = LOWPAN_NHC_HOP_ID_0;
+       nhc->idmask[0] = LOWPAN_NHC_HOP_MASK_0;
+}
+
+LOWPAN_NHC(nhc_hop, "RFC6282 Hop-by-Hop Options", NEXTHDR_HOP, 0,
+          hop_nhid_setup, LOWPAN_NHC_HOP_IDLEN, NULL, NULL);
+
+module_lowpan_nhc(nhc_hop);
+MODULE_DESCRIPTION("6LoWPAN next header RFC6282 Hop-by-Hop Options compression");
+MODULE_LICENSE("GPL");
diff --git a/net/6lowpan/nhc_ipv6.c b/net/6lowpan/nhc_ipv6.c
new file mode 100644 (file)
index 0000000..2313d16
--- /dev/null
@@ -0,0 +1,27 @@
+/*
+ *     6LoWPAN IPv6 Header compression according to RFC6282
+ *
+ *     This program is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License
+ *     as published by the Free Software Foundation; either version
+ *     2 of the License, or (at your option) any later version.
+ */
+
+#include "nhc.h"
+
+#define LOWPAN_NHC_IPV6_IDLEN  1
+#define LOWPAN_NHC_IPV6_ID_0   0xee
+#define LOWPAN_NHC_IPV6_MASK_0 0xfe
+
+static void ipv6_nhid_setup(struct lowpan_nhc *nhc)
+{
+       nhc->id[0] = LOWPAN_NHC_IPV6_ID_0;
+       nhc->idmask[0] = LOWPAN_NHC_IPV6_MASK_0;
+}
+
+LOWPAN_NHC(nhc_ipv6, "RFC6282 IPv6", NEXTHDR_IPV6, 0, ipv6_nhid_setup,
+          LOWPAN_NHC_IPV6_IDLEN, NULL, NULL);
+
+module_lowpan_nhc(nhc_ipv6);
+MODULE_DESCRIPTION("6LoWPAN next header RFC6282 IPv6 compression");
+MODULE_LICENSE("GPL");
diff --git a/net/6lowpan/nhc_mobility.c b/net/6lowpan/nhc_mobility.c
new file mode 100644 (file)
index 0000000..60d3f38
--- /dev/null
@@ -0,0 +1,27 @@
+/*
+ *     6LoWPAN IPv6 Mobility Header compression according to RFC6282
+ *
+ *     This program is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License
+ *     as published by the Free Software Foundation; either version
+ *     2 of the License, or (at your option) any later version.
+ */
+
+#include "nhc.h"
+
+#define LOWPAN_NHC_MOBILITY_IDLEN      1
+#define LOWPAN_NHC_MOBILITY_ID_0       0xe8
+#define LOWPAN_NHC_MOBILITY_MASK_0     0xfe
+
+static void mobility_nhid_setup(struct lowpan_nhc *nhc)
+{
+       nhc->id[0] = LOWPAN_NHC_MOBILITY_ID_0;
+       nhc->idmask[0] = LOWPAN_NHC_MOBILITY_MASK_0;
+}
+
+LOWPAN_NHC(nhc_mobility, "RFC6282 Mobility", NEXTHDR_MOBILITY, 0,
+          mobility_nhid_setup, LOWPAN_NHC_MOBILITY_IDLEN, NULL, NULL);
+
+module_lowpan_nhc(nhc_mobility);
+MODULE_DESCRIPTION("6LoWPAN next header RFC6282 Mobility compression");
+MODULE_LICENSE("GPL");
diff --git a/net/6lowpan/nhc_routing.c b/net/6lowpan/nhc_routing.c
new file mode 100644 (file)
index 0000000..c393280
--- /dev/null
@@ -0,0 +1,27 @@
+/*
+ *     6LoWPAN IPv6 Routing Header compression according to RFC6282
+ *
+ *     This program is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License
+ *     as published by the Free Software Foundation; either version
+ *     2 of the License, or (at your option) any later version.
+ */
+
+#include "nhc.h"
+
+#define LOWPAN_NHC_ROUTING_IDLEN       1
+#define LOWPAN_NHC_ROUTING_ID_0                0xe2
+#define LOWPAN_NHC_ROUTING_MASK_0      0xfe
+
+static void routing_nhid_setup(struct lowpan_nhc *nhc)
+{
+       nhc->id[0] = LOWPAN_NHC_ROUTING_ID_0;
+       nhc->idmask[0] = LOWPAN_NHC_ROUTING_MASK_0;
+}
+
+LOWPAN_NHC(nhc_routing, "RFC6282 Routing", NEXTHDR_ROUTING, 0,
+          routing_nhid_setup, LOWPAN_NHC_ROUTING_IDLEN, NULL, NULL);
+
+module_lowpan_nhc(nhc_routing);
+MODULE_DESCRIPTION("6LoWPAN next header RFC6282 Routing compression");
+MODULE_LICENSE("GPL");
diff --git a/net/6lowpan/nhc_udp.c b/net/6lowpan/nhc_udp.c
new file mode 100644 (file)
index 0000000..c6bcaeb
--- /dev/null
@@ -0,0 +1,157 @@
+/*
+ *     6LoWPAN IPv6 UDP compression according to RFC6282
+ *
+ *
+ *     Authors:
+ *     Alexander Aring <aar@pengutronix.de>
+ *
+ *     Orignal written by:
+ *     Alexander Smirnov <alex.bluesman.smirnov@gmail.com>
+ *     Jon Smirl <jonsmirl@gmail.com>
+ *
+ *     This program is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License
+ *     as published by the Free Software Foundation; either version
+ *     2 of the License, or (at your option) any later version.
+ */
+
+#include "nhc.h"
+
+#define LOWPAN_NHC_UDP_IDLEN   1
+
+static int udp_uncompress(struct sk_buff *skb, size_t needed)
+{
+       u8 tmp = 0, val = 0;
+       struct udphdr uh;
+       bool fail;
+       int err;
+
+       fail = lowpan_fetch_skb(skb, &tmp, sizeof(tmp));
+
+       pr_debug("UDP header uncompression\n");
+       switch (tmp & LOWPAN_NHC_UDP_CS_P_11) {
+       case LOWPAN_NHC_UDP_CS_P_00:
+               fail |= lowpan_fetch_skb(skb, &uh.source, sizeof(uh.source));
+               fail |= lowpan_fetch_skb(skb, &uh.dest, sizeof(uh.dest));
+               break;
+       case LOWPAN_NHC_UDP_CS_P_01:
+               fail |= lowpan_fetch_skb(skb, &uh.source, sizeof(uh.source));
+               fail |= lowpan_fetch_skb(skb, &val, sizeof(val));
+               uh.dest = htons(val + LOWPAN_NHC_UDP_8BIT_PORT);
+               break;
+       case LOWPAN_NHC_UDP_CS_P_10:
+               fail |= lowpan_fetch_skb(skb, &val, sizeof(val));
+               uh.source = htons(val + LOWPAN_NHC_UDP_8BIT_PORT);
+               fail |= lowpan_fetch_skb(skb, &uh.dest, sizeof(uh.dest));
+               break;
+       case LOWPAN_NHC_UDP_CS_P_11:
+               fail |= lowpan_fetch_skb(skb, &val, sizeof(val));
+               uh.source = htons(LOWPAN_NHC_UDP_4BIT_PORT + (val >> 4));
+               uh.dest = htons(LOWPAN_NHC_UDP_4BIT_PORT + (val & 0x0f));
+               break;
+       default:
+               BUG();
+       }
+
+       pr_debug("uncompressed UDP ports: src = %d, dst = %d\n",
+                ntohs(uh.source), ntohs(uh.dest));
+
+       /* checksum */
+       if (tmp & LOWPAN_NHC_UDP_CS_C) {
+               pr_debug_ratelimited("checksum elided currently not supported\n");
+               fail = true;
+       } else {
+               fail |= lowpan_fetch_skb(skb, &uh.check, sizeof(uh.check));
+       }
+
+       if (fail)
+               return -EINVAL;
+
+       /* UDP length needs to be infered from the lower layers
+        * here, we obtain the hint from the remaining size of the
+        * frame
+        */
+       uh.len = htons(skb->len + sizeof(struct udphdr));
+       pr_debug("uncompressed UDP length: src = %d", ntohs(uh.len));
+
+       /* replace the compressed UDP head by the uncompressed UDP
+        * header
+        */
+       err = skb_cow(skb, needed);
+       if (unlikely(err))
+               return err;
+
+       skb_push(skb, sizeof(struct udphdr));
+       skb_copy_to_linear_data(skb, &uh, sizeof(struct udphdr));
+
+       return 0;
+}
+
+static int udp_compress(struct sk_buff *skb, u8 **hc_ptr)
+{
+       const struct udphdr *uh = udp_hdr(skb);
+       u8 tmp;
+
+       if (((ntohs(uh->source) & LOWPAN_NHC_UDP_4BIT_MASK) ==
+            LOWPAN_NHC_UDP_4BIT_PORT) &&
+           ((ntohs(uh->dest) & LOWPAN_NHC_UDP_4BIT_MASK) ==
+            LOWPAN_NHC_UDP_4BIT_PORT)) {
+               pr_debug("UDP header: both ports compression to 4 bits\n");
+               /* compression value */
+               tmp = LOWPAN_NHC_UDP_CS_P_11;
+               lowpan_push_hc_data(hc_ptr, &tmp, sizeof(tmp));
+               /* source and destination port */
+               tmp = ntohs(uh->dest) - LOWPAN_NHC_UDP_4BIT_PORT +
+                     ((ntohs(uh->source) - LOWPAN_NHC_UDP_4BIT_PORT) << 4);
+               lowpan_push_hc_data(hc_ptr, &tmp, sizeof(tmp));
+       } else if ((ntohs(uh->dest) & LOWPAN_NHC_UDP_8BIT_MASK) ==
+                       LOWPAN_NHC_UDP_8BIT_PORT) {
+               pr_debug("UDP header: remove 8 bits of dest\n");
+               /* compression value */
+               tmp = LOWPAN_NHC_UDP_CS_P_01;
+               lowpan_push_hc_data(hc_ptr, &tmp, sizeof(tmp));
+               /* source port */
+               lowpan_push_hc_data(hc_ptr, &uh->source, sizeof(uh->source));
+               /* destination port */
+               tmp = ntohs(uh->dest) - LOWPAN_NHC_UDP_8BIT_PORT;
+               lowpan_push_hc_data(hc_ptr, &tmp, sizeof(tmp));
+       } else if ((ntohs(uh->source) & LOWPAN_NHC_UDP_8BIT_MASK) ==
+                       LOWPAN_NHC_UDP_8BIT_PORT) {
+               pr_debug("UDP header: remove 8 bits of source\n");
+               /* compression value */
+               tmp = LOWPAN_NHC_UDP_CS_P_10;
+               lowpan_push_hc_data(hc_ptr, &tmp, sizeof(tmp));
+               /* source port */
+               tmp = ntohs(uh->source) - LOWPAN_NHC_UDP_8BIT_PORT;
+               lowpan_push_hc_data(hc_ptr, &tmp, sizeof(tmp));
+               /* destination port */
+               lowpan_push_hc_data(hc_ptr, &uh->dest, sizeof(uh->dest));
+       } else {
+               pr_debug("UDP header: can't compress\n");
+               /* compression value */
+               tmp = LOWPAN_NHC_UDP_CS_P_00;
+               lowpan_push_hc_data(hc_ptr, &tmp, sizeof(tmp));
+               /* source port */
+               lowpan_push_hc_data(hc_ptr, &uh->source, sizeof(uh->source));
+               /* destination port */
+               lowpan_push_hc_data(hc_ptr, &uh->dest, sizeof(uh->dest));
+       }
+
+       /* checksum is always inline */
+       lowpan_push_hc_data(hc_ptr, &uh->check, sizeof(uh->check));
+
+       return 0;
+}
+
+static void udp_nhid_setup(struct lowpan_nhc *nhc)
+{
+       nhc->id[0] = LOWPAN_NHC_UDP_ID;
+       nhc->idmask[0] = LOWPAN_NHC_UDP_MASK;
+}
+
+LOWPAN_NHC(nhc_udp, "RFC6282 UDP", NEXTHDR_UDP, sizeof(struct udphdr),
+          udp_nhid_setup, LOWPAN_NHC_UDP_IDLEN, udp_uncompress, udp_compress);
+
+module_lowpan_nhc(nhc_udp);
+MODULE_DESCRIPTION("6LoWPAN next header RFC6282 UDP compression");
+MODULE_LICENSE("GPL");
index 7c174b6750cd654c49d7c1d2f621eb9fc3ed1ef0..7b9219022418b2e5a4686b9504f42e32cd091128 100644 (file)
@@ -75,29 +75,8 @@ static int fc_header(struct sk_buff *skb, struct net_device *dev,
        return -hdr_len;
 }
 
-/*
- *     A neighbour discovery of some species (eg arp) has completed. We
- *     can now send the packet.
- */
-
-static int fc_rebuild_header(struct sk_buff *skb)
-{
-#ifdef CONFIG_INET
-       struct fch_hdr *fch=(struct fch_hdr *)skb->data;
-       struct fcllc *fcllc=(struct fcllc *)(skb->data+sizeof(struct fch_hdr));
-       if(fcllc->ethertype != htons(ETH_P_IP)) {
-               printk("fc_rebuild_header: Don't know how to resolve type %04X addresses ?\n", ntohs(fcllc->ethertype));
-               return 0;
-       }
-       return arp_find(fch->daddr, skb);
-#else
-       return 0;
-#endif
-}
-
 static const struct header_ops fc_header_ops = {
        .create  = fc_header,
-       .rebuild = fc_rebuild_header,
 };
 
 static void fc_setup(struct net_device *dev)
index 59e7346f1193a612f9118b4df85426406aea5549..7d3a0af954e8f7b2eeb1a38dd98a60ce3c230aed 100644 (file)
@@ -87,31 +87,6 @@ static int fddi_header(struct sk_buff *skb, struct net_device *dev,
        return -hl;
 }
 
-
-/*
- * Rebuild the FDDI MAC header. This is called after an ARP
- * (or in future other address resolution) has completed on
- * this sk_buff.  We now let ARP fill in the other fields.
- */
-
-static int fddi_rebuild_header(struct sk_buff  *skb)
-{
-       struct fddihdr *fddi = (struct fddihdr *)skb->data;
-
-#ifdef CONFIG_INET
-       if (fddi->hdr.llc_snap.ethertype == htons(ETH_P_IP))
-               /* Try to get ARP to resolve the header and fill destination address */
-               return arp_find(fddi->daddr, skb);
-       else
-#endif
-       {
-               printk("%s: Don't know how to resolve type %04X addresses.\n",
-                      skb->dev->name, ntohs(fddi->hdr.llc_snap.ethertype));
-               return 0;
-       }
-}
-
-
 /*
  * Determine the packet's protocol ID and fill in skb fields.
  * This routine is called before an incoming packet is passed
@@ -177,7 +152,6 @@ EXPORT_SYMBOL(fddi_change_mtu);
 
 static const struct header_ops fddi_header_ops = {
        .create         = fddi_header,
-       .rebuild        = fddi_rebuild_header,
 };
 
 
index 2e03f8259dd55a575f1b147987631cdced3a91a9..ade1a52cdcff8e6a690b38bde8d7af1c140d7d9c 100644 (file)
@@ -90,33 +90,6 @@ static int hippi_header(struct sk_buff *skb, struct net_device *dev,
 }
 
 
-/*
- * Rebuild the HIPPI MAC header. This is called after an ARP has
- * completed on this sk_buff. We now let ARP fill in the other fields.
- */
-
-static int hippi_rebuild_header(struct sk_buff *skb)
-{
-       struct hippi_hdr *hip = (struct hippi_hdr *)skb->data;
-
-       /*
-        * Only IP is currently supported
-        */
-
-       if(hip->snap.ethertype != htons(ETH_P_IP))
-       {
-               printk(KERN_DEBUG "%s: unable to resolve type %X addresses.\n",skb->dev->name,ntohs(hip->snap.ethertype));
-               return 0;
-       }
-
-       /*
-        * We don't support dynamic ARP on HIPPI, but we use the ARP
-        * static ARP tables to hold the I-FIELDs.
-        */
-       return arp_find(hip->le.daddr, skb);
-}
-
-
 /*
  *     Determine the packet's protocol ID.
  */
@@ -186,7 +159,6 @@ EXPORT_SYMBOL(hippi_neigh_setup_dev);
 
 static const struct header_ops hippi_header_ops = {
        .create         = hippi_header,
-       .rebuild        = hippi_rebuild_header,
 };
 
 
index 64c6bed4a3d3aa4545f596a1a146f1d053db949c..98a30a5b866472b7421f5394636934bc23ec7f94 100644 (file)
@@ -413,7 +413,10 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event,
                        vlan_transfer_features(dev, vlandev);
                break;
 
-       case NETDEV_DOWN:
+       case NETDEV_DOWN: {
+               struct net_device *tmp;
+               LIST_HEAD(close_list);
+
                if (dev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
                        vlan_vid_del(dev, htons(ETH_P_8021Q), 0);
 
@@ -425,11 +428,18 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event,
 
                        vlan = vlan_dev_priv(vlandev);
                        if (!(vlan->flags & VLAN_FLAG_LOOSE_BINDING))
-                               dev_change_flags(vlandev, flgs & ~IFF_UP);
+                               list_add(&vlandev->close_list, &close_list);
+               }
+
+               dev_close_many(&close_list, false);
+
+               list_for_each_entry_safe(vlandev, tmp, &close_list, close_list) {
                        netif_stacked_transfer_operstate(dev, vlandev);
+                       list_del_init(&vlandev->close_list);
                }
+               list_del(&close_list);
                break;
-
+       }
        case NETDEV_UP:
                /* Put all VLANs for this dev in the up state too.  */
                vlan_group_for_each_dev(grp, i, vlandev) {
index 118956448cf6e4aa9a3609ccf7455a04596e24bf..01d7ba840df8dbf48b07e3c8697bb7c11f424a8d 100644 (file)
 #include <linux/if_vlan.h>
 #include <linux/netpoll.h>
 
-/*
- *     Rebuild the Ethernet MAC header. This is called after an ARP
- *     (or in future other address resolution) has completed on this
- *     sk_buff. We now let ARP fill in the other fields.
- *
- *     This routine CANNOT use cached dst->neigh!
- *     Really, it is used only when dst->neigh is wrong.
- *
- * TODO:  This needs a checkup, I'm ignorant here. --BLG
- */
-static int vlan_dev_rebuild_header(struct sk_buff *skb)
-{
-       struct net_device *dev = skb->dev;
-       struct vlan_ethhdr *veth = (struct vlan_ethhdr *)(skb->data);
-
-       switch (veth->h_vlan_encapsulated_proto) {
-#ifdef CONFIG_INET
-       case htons(ETH_P_IP):
-
-               /* TODO:  Confirm this will work with VLAN headers... */
-               return arp_find(veth->h_dest, skb);
-#endif
-       default:
-               pr_debug("%s: unable to resolve type %X addresses\n",
-                        dev->name, ntohs(veth->h_vlan_encapsulated_proto));
-
-               ether_addr_copy(veth->h_source, dev->dev_addr);
-               break;
-       }
-
-       return 0;
-}
-
 /*
  *     Create the VLAN header for an arbitrary protocol layer
  *
@@ -534,7 +501,6 @@ static int vlan_dev_get_lock_subclass(struct net_device *dev)
 
 static const struct header_ops vlan_header_ops = {
        .create  = vlan_dev_hard_header,
-       .rebuild = vlan_dev_rebuild_header,
        .parse   = eth_header_parse,
 };
 
@@ -554,7 +520,6 @@ static int vlan_passthru_hard_header(struct sk_buff *skb, struct net_device *dev
 
 static const struct header_ops vlan_passthru_header_ops = {
        .create  = vlan_passthru_hard_header,
-       .rebuild = dev_rebuild_header,
        .parse   = eth_header_parse,
 };
 
@@ -573,7 +538,6 @@ static int vlan_dev_init(struct net_device *dev)
        /* IFF_BROADCAST|IFF_MULTICAST; ??? */
        dev->flags  = real_dev->flags & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI |
                                          IFF_MASTER | IFF_SLAVE);
-       dev->iflink = real_dev->ifindex;
        dev->state  = (real_dev->state & ((1<<__LINK_STATE_NOCARRIER) |
                                          (1<<__LINK_STATE_DORMANT))) |
                      (1<<__LINK_STATE_PRESENT);
@@ -589,6 +553,7 @@ static int vlan_dev_init(struct net_device *dev)
        if (dev->features & NETIF_F_VLAN_FEATURES)
                netdev_warn(real_dev, "VLAN features are set incorrectly.  Q-in-Q configurations may not work correctly.\n");
 
+       dev->vlan_features = real_dev->vlan_features & ~NETIF_F_ALL_FCOE;
 
        /* ipv6 shared card related stuff */
        dev->dev_id = real_dev->dev_id;
@@ -767,6 +732,13 @@ static void vlan_dev_netpoll_cleanup(struct net_device *dev)
 }
 #endif /* CONFIG_NET_POLL_CONTROLLER */
 
+static int vlan_dev_get_iflink(const struct net_device *dev)
+{
+       struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
+
+       return real_dev->ifindex;
+}
+
 static const struct ethtool_ops vlan_ethtool_ops = {
        .get_settings           = vlan_ethtool_get_settings,
        .get_drvinfo            = vlan_ethtool_get_drvinfo,
@@ -803,6 +775,7 @@ static const struct net_device_ops vlan_netdev_ops = {
 #endif
        .ndo_fix_features       = vlan_dev_fix_features,
        .ndo_get_lock_subclass  = vlan_dev_get_lock_subclass,
+       .ndo_get_iflink         = vlan_dev_get_iflink,
 };
 
 static void vlan_dev_free(struct net_device *dev)
@@ -827,5 +800,5 @@ void vlan_setup(struct net_device *dev)
        dev->destructor         = vlan_dev_free;
        dev->ethtool_ops        = &vlan_ethtool_ops;
 
-       memset(dev->broadcast, 0, ETH_ALEN);
+       eth_zero_addr(dev->broadcast);
 }
index e86a9bea1d160ccc1a739eee576d0bdbf0483956..6f4c4c88db84ecb084ca5553113d9ad2963e4985 100644 (file)
@@ -34,6 +34,7 @@
 #include <linux/slab.h>
 #include <linux/sched.h>
 #include <linux/uaccess.h>
+#include <linux/uio.h>
 #include <net/9p/9p.h>
 #include <linux/parser.h>
 #include <net/9p/client.h>
@@ -555,7 +556,7 @@ out_err:
  */
 
 static int p9_check_zc_errors(struct p9_client *c, struct p9_req_t *req,
-                             char *uidata, int in_hdrlen, int kern_buf)
+                             struct iov_iter *uidata, int in_hdrlen)
 {
        int err;
        int ecode;
@@ -591,16 +592,11 @@ static int p9_check_zc_errors(struct p9_client *c, struct p9_req_t *req,
                ename = &req->rc->sdata[req->rc->offset];
                if (len > inline_len) {
                        /* We have error in external buffer */
-                       if (kern_buf) {
-                               memcpy(ename + inline_len, uidata,
-                                      len - inline_len);
-                       } else {
-                               err = copy_from_user(ename + inline_len,
-                                                    uidata, len - inline_len);
-                               if (err) {
-                                       err = -EFAULT;
-                                       goto out_err;
-                               }
+                       err = copy_from_iter(ename + inline_len,
+                                            len - inline_len, uidata);
+                       if (err != len - inline_len) {
+                               err = -EFAULT;
+                               goto out_err;
                        }
                }
                ename = NULL;
@@ -806,8 +802,8 @@ reterr:
  * p9_client_zc_rpc - issue a request and wait for a response
  * @c: client session
  * @type: type of request
- * @uidata: user bffer that should be ued for zero copy read
- * @uodata: user buffer that shoud be user for zero copy write
+ * @uidata: destination for zero copy read
+ * @uodata: source for zero copy write
  * @inlen: read buffer size
  * @olen: write buffer size
  * @hdrlen: reader header size, This is the size of response protocol data
@@ -816,9 +812,10 @@ reterr:
  * Returns request structure (which client must free using p9_free_req)
  */
 static struct p9_req_t *p9_client_zc_rpc(struct p9_client *c, int8_t type,
-                                        char *uidata, char *uodata,
+                                        struct iov_iter *uidata,
+                                        struct iov_iter *uodata,
                                         int inlen, int olen, int in_hdrlen,
-                                        int kern_buf, const char *fmt, ...)
+                                        const char *fmt, ...)
 {
        va_list ap;
        int sigpending, err;
@@ -841,12 +838,8 @@ static struct p9_req_t *p9_client_zc_rpc(struct p9_client *c, int8_t type,
        } else
                sigpending = 0;
 
-       /* If we are called with KERNEL_DS force kern_buf */
-       if (segment_eq(get_fs(), KERNEL_DS))
-               kern_buf = 1;
-
        err = c->trans_mod->zc_request(c, req, uidata, uodata,
-                                      inlen, olen, in_hdrlen, kern_buf);
+                                      inlen, olen, in_hdrlen);
        if (err < 0) {
                if (err == -EIO)
                        c->status = Disconnected;
@@ -876,7 +869,7 @@ static struct p9_req_t *p9_client_zc_rpc(struct p9_client *c, int8_t type,
        if (err < 0)
                goto reterr;
 
-       err = p9_check_zc_errors(c, req, uidata, in_hdrlen, kern_buf);
+       err = p9_check_zc_errors(c, req, uidata, in_hdrlen);
        trace_9p_client_res(c, type, req->rc->tag, err);
        if (!err)
                return req;
@@ -1123,6 +1116,7 @@ struct p9_fid *p9_client_attach(struct p9_client *clnt, struct p9_fid *afid,
                fid = NULL;
                goto error;
        }
+       fid->uid = n_uname;
 
        req = p9_client_rpc(clnt, P9_TATTACH, "ddss?u", fid->fid,
                        afid ? afid->fid : P9_NOFID, uname, aname, n_uname);
@@ -1541,142 +1535,128 @@ error:
 EXPORT_SYMBOL(p9_client_unlinkat);
 
 int
-p9_client_read(struct p9_fid *fid, char *data, char __user *udata, u64 offset,
-                                                               u32 count)
+p9_client_read(struct p9_fid *fid, u64 offset, struct iov_iter *to, int *err)
 {
-       char *dataptr;
-       int kernel_buf = 0;
+       struct p9_client *clnt = fid->clnt;
        struct p9_req_t *req;
-       struct p9_client *clnt;
-       int err, rsize, non_zc = 0;
-
+       int total = 0;
 
        p9_debug(P9_DEBUG_9P, ">>> TREAD fid %d offset %llu %d\n",
-                  fid->fid, (unsigned long long) offset, count);
-       err = 0;
-       clnt = fid->clnt;
-
-       rsize = fid->iounit;
-       if (!rsize || rsize > clnt->msize-P9_IOHDRSZ)
-               rsize = clnt->msize - P9_IOHDRSZ;
-
-       if (count < rsize)
-               rsize = count;
-
-       /* Don't bother zerocopy for small IO (< 1024) */
-       if (clnt->trans_mod->zc_request && rsize > 1024) {
-               char *indata;
-               if (data) {
-                       kernel_buf = 1;
-                       indata = data;
-               } else
-                       indata = (__force char *)udata;
-               /*
-                * response header len is 11
-                * PDU Header(7) + IO Size (4)
-                */
-               req = p9_client_zc_rpc(clnt, P9_TREAD, indata, NULL, rsize, 0,
-                                      11, kernel_buf, "dqd", fid->fid,
-                                      offset, rsize);
-       } else {
-               non_zc = 1;
-               req = p9_client_rpc(clnt, P9_TREAD, "dqd", fid->fid, offset,
-                                   rsize);
-       }
-       if (IS_ERR(req)) {
-               err = PTR_ERR(req);
-               goto error;
-       }
+                  fid->fid, (unsigned long long) offset, (int)iov_iter_count(to));
+
+       while (iov_iter_count(to)) {
+               int count = iov_iter_count(to);
+               int rsize, non_zc = 0;
+               char *dataptr;
+                       
+               rsize = fid->iounit;
+               if (!rsize || rsize > clnt->msize-P9_IOHDRSZ)
+                       rsize = clnt->msize - P9_IOHDRSZ;
+
+               if (count < rsize)
+                       rsize = count;
+
+               /* Don't bother zerocopy for small IO (< 1024) */
+               if (clnt->trans_mod->zc_request && rsize > 1024) {
+                       /*
+                        * response header len is 11
+                        * PDU Header(7) + IO Size (4)
+                        */
+                       req = p9_client_zc_rpc(clnt, P9_TREAD, to, NULL, rsize,
+                                              0, 11, "dqd", fid->fid,
+                                              offset, rsize);
+               } else {
+                       non_zc = 1;
+                       req = p9_client_rpc(clnt, P9_TREAD, "dqd", fid->fid, offset,
+                                           rsize);
+               }
+               if (IS_ERR(req)) {
+                       *err = PTR_ERR(req);
+                       break;
+               }
 
-       err = p9pdu_readf(req->rc, clnt->proto_version, "D", &count, &dataptr);
-       if (err) {
-               trace_9p_protocol_dump(clnt, req->rc);
-               goto free_and_error;
-       }
+               *err = p9pdu_readf(req->rc, clnt->proto_version,
+                                  "D", &count, &dataptr);
+               if (*err) {
+                       trace_9p_protocol_dump(clnt, req->rc);
+                       p9_free_req(clnt, req);
+                       break;
+               }
 
-       p9_debug(P9_DEBUG_9P, "<<< RREAD count %d\n", count);
+               p9_debug(P9_DEBUG_9P, "<<< RREAD count %d\n", count);
+               if (!count) {
+                       p9_free_req(clnt, req);
+                       break;
+               }
 
-       if (non_zc) {
-               if (data) {
-                       memmove(data, dataptr, count);
-               } else {
-                       err = copy_to_user(udata, dataptr, count);
-                       if (err) {
-                               err = -EFAULT;
-                               goto free_and_error;
+               if (non_zc) {
+                       int n = copy_to_iter(dataptr, count, to);
+                       total += n;
+                       offset += n;
+                       if (n != count) {
+                               *err = -EFAULT;
+                               p9_free_req(clnt, req);
+                               break;
                        }
+               } else {
+                       iov_iter_advance(to, count);
+                       total += count;
+                       offset += count;
                }
+               p9_free_req(clnt, req);
        }
-       p9_free_req(clnt, req);
-       return count;
-
-free_and_error:
-       p9_free_req(clnt, req);
-error:
-       return err;
+       return total;
 }
 EXPORT_SYMBOL(p9_client_read);
 
 int
-p9_client_write(struct p9_fid *fid, char *data, const char __user *udata,
-                                                       u64 offset, u32 count)
+p9_client_write(struct p9_fid *fid, u64 offset, struct iov_iter *from, int *err)
 {
-       int err, rsize;
-       int kernel_buf = 0;
-       struct p9_client *clnt;
+       struct p9_client *clnt = fid->clnt;
        struct p9_req_t *req;
+       int total = 0;
+
+       p9_debug(P9_DEBUG_9P, ">>> TWRITE fid %d offset %llu count %zd\n",
+                               fid->fid, (unsigned long long) offset,
+                               iov_iter_count(from));
+
+       while (iov_iter_count(from)) {
+               int count = iov_iter_count(from);
+               int rsize = fid->iounit;
+               if (!rsize || rsize > clnt->msize-P9_IOHDRSZ)
+                       rsize = clnt->msize - P9_IOHDRSZ;
+
+               if (count < rsize)
+                       rsize = count;
+
+               /* Don't bother zerocopy for small IO (< 1024) */
+               if (clnt->trans_mod->zc_request && rsize > 1024) {
+                       req = p9_client_zc_rpc(clnt, P9_TWRITE, NULL, from, 0,
+                                              rsize, P9_ZC_HDR_SZ, "dqd",
+                                              fid->fid, offset, rsize);
+               } else {
+                       req = p9_client_rpc(clnt, P9_TWRITE, "dqV", fid->fid,
+                                                   offset, rsize, from);
+               }
+               if (IS_ERR(req)) {
+                       *err = PTR_ERR(req);
+                       break;
+               }
 
-       p9_debug(P9_DEBUG_9P, ">>> TWRITE fid %d offset %llu count %d\n",
-                               fid->fid, (unsigned long long) offset, count);
-       err = 0;
-       clnt = fid->clnt;
-
-       rsize = fid->iounit;
-       if (!rsize || rsize > clnt->msize-P9_IOHDRSZ)
-               rsize = clnt->msize - P9_IOHDRSZ;
+               *err = p9pdu_readf(req->rc, clnt->proto_version, "d", &count);
+               if (*err) {
+                       trace_9p_protocol_dump(clnt, req->rc);
+                       p9_free_req(clnt, req);
+               }
 
-       if (count < rsize)
-               rsize = count;
+               p9_debug(P9_DEBUG_9P, "<<< RWRITE count %d\n", count);
 
-       /* Don't bother zerocopy for small IO (< 1024) */
-       if (clnt->trans_mod->zc_request && rsize > 1024) {
-               char *odata;
-               if (data) {
-                       kernel_buf = 1;
-                       odata = data;
-               } else
-                       odata = (char *)udata;
-               req = p9_client_zc_rpc(clnt, P9_TWRITE, NULL, odata, 0, rsize,
-                                      P9_ZC_HDR_SZ, kernel_buf, "dqd",
-                                      fid->fid, offset, rsize);
-       } else {
-               if (data)
-                       req = p9_client_rpc(clnt, P9_TWRITE, "dqD", fid->fid,
-                                           offset, rsize, data);
-               else
-                       req = p9_client_rpc(clnt, P9_TWRITE, "dqU", fid->fid,
-                                           offset, rsize, udata);
-       }
-       if (IS_ERR(req)) {
-               err = PTR_ERR(req);
-               goto error;
-       }
-
-       err = p9pdu_readf(req->rc, clnt->proto_version, "d", &count);
-       if (err) {
-               trace_9p_protocol_dump(clnt, req->rc);
-               goto free_and_error;
+               p9_free_req(clnt, req);
+               iov_iter_advance(from, count);
+               total += count;
+               offset += count;
        }
-
-       p9_debug(P9_DEBUG_9P, "<<< RWRITE count %d\n", count);
-
-       p9_free_req(clnt, req);
-       return count;
-
-free_and_error:
-       p9_free_req(clnt, req);
-error:
-       return err;
+       return total;
 }
 EXPORT_SYMBOL(p9_client_write);
 
@@ -2068,6 +2048,10 @@ int p9_client_readdir(struct p9_fid *fid, char *data, u32 count, u64 offset)
        struct p9_client *clnt;
        struct p9_req_t *req;
        char *dataptr;
+       struct kvec kv = {.iov_base = data, .iov_len = count};
+       struct iov_iter to;
+
+       iov_iter_kvec(&to, READ | ITER_KVEC, &kv, 1, count);
 
        p9_debug(P9_DEBUG_9P, ">>> TREADDIR fid %d offset %llu count %d\n",
                                fid->fid, (unsigned long long) offset, count);
@@ -2088,8 +2072,8 @@ int p9_client_readdir(struct p9_fid *fid, char *data, u32 count, u64 offset)
                 * response header len is 11
                 * PDU Header(7) + IO Size (4)
                 */
-               req = p9_client_zc_rpc(clnt, P9_TREADDIR, data, NULL, rsize, 0,
-                                      11, 1, "dqd", fid->fid, offset, rsize);
+               req = p9_client_zc_rpc(clnt, P9_TREADDIR, &to, NULL, rsize, 0,
+                                      11, "dqd", fid->fid, offset, rsize);
        } else {
                non_zc = 1;
                req = p9_client_rpc(clnt, P9_TREADDIR, "dqd", fid->fid,
index ab9127ec5b7a6881e7dd2116e49819186675562f..e9d0f0c1a04827f0d1cc0f554f6b4dfaabb2c414 100644 (file)
@@ -33,6 +33,7 @@
 #include <linux/sched.h>
 #include <linux/stddef.h>
 #include <linux/types.h>
+#include <linux/uio.h>
 #include <net/9p/9p.h>
 #include <net/9p/client.h>
 #include "protocol.h"
@@ -69,10 +70,11 @@ static size_t pdu_write(struct p9_fcall *pdu, const void *data, size_t size)
 }
 
 static size_t
-pdu_write_u(struct p9_fcall *pdu, const char __user *udata, size_t size)
+pdu_write_u(struct p9_fcall *pdu, struct iov_iter *from, size_t size)
 {
        size_t len = min(pdu->capacity - pdu->size, size);
-       if (copy_from_user(&pdu->sdata[pdu->size], udata, len))
+       struct iov_iter i = *from;
+       if (copy_from_iter(&pdu->sdata[pdu->size], len, &i) != len)
                len = 0;
 
        pdu->size += len;
@@ -437,23 +439,13 @@ p9pdu_vwritef(struct p9_fcall *pdu, int proto_version, const char *fmt,
                                                 stbuf->extension, stbuf->n_uid,
                                                 stbuf->n_gid, stbuf->n_muid);
                        } break;
-               case 'D':{
-                               uint32_t count = va_arg(ap, uint32_t);
-                               const void *data = va_arg(ap, const void *);
-
-                               errcode = p9pdu_writef(pdu, proto_version, "d",
-                                                                       count);
-                               if (!errcode && pdu_write(pdu, data, count))
-                                       errcode = -EFAULT;
-                       }
-                       break;
-               case 'U':{
+               case 'V':{
                                int32_t count = va_arg(ap, int32_t);
-                               const char __user *udata =
-                                               va_arg(ap, const void __user *);
+                               struct iov_iter *from =
+                                               va_arg(ap, struct iov_iter *);
                                errcode = p9pdu_writef(pdu, proto_version, "d",
                                                                        count);
-                               if (!errcode && pdu_write_u(pdu, udata, count))
+                               if (!errcode && pdu_write_u(pdu, from, count))
                                        errcode = -EFAULT;
                        }
                        break;
index 2ee3879161b1769f456501044a08c8ebfda420a0..38aa6345bdfa2ec9fc52155e5b0eb3b369fdf463 100644 (file)
  *
  */
 
-#include <linux/slab.h>
+#include <linux/mm.h>
 #include <linux/module.h>
-#include <net/9p/9p.h>
-#include <net/9p/client.h>
-#include <linux/scatterlist.h>
-#include "trans_common.h"
 
 /**
  *  p9_release_req_pages - Release pages after the transaction.
@@ -31,39 +27,3 @@ void p9_release_pages(struct page **pages, int nr_pages)
                        put_page(pages[i]);
 }
 EXPORT_SYMBOL(p9_release_pages);
-
-/**
- * p9_nr_pages - Return number of pages needed to accommodate the payload.
- */
-int p9_nr_pages(char *data, int len)
-{
-       unsigned long start_page, end_page;
-       start_page =  (unsigned long)data >> PAGE_SHIFT;
-       end_page = ((unsigned long)data + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
-       return end_page - start_page;
-}
-EXPORT_SYMBOL(p9_nr_pages);
-
-/**
- * payload_gup - Translates user buffer into kernel pages and
- * pins them either for read/write through get_user_pages_fast().
- * @req: Request to be sent to server.
- * @pdata_off: data offset into the first page after translation (gup).
- * @pdata_len: Total length of the IO. gup may not return requested # of pages.
- * @nr_pages: number of pages to accommodate the payload
- * @rw: Indicates if the pages are for read or write.
- */
-
-int p9_payload_gup(char *data, int *nr_pages, struct page **pages, int write)
-{
-       int nr_mapped_pages;
-
-       nr_mapped_pages = get_user_pages_fast((unsigned long)data,
-                                             *nr_pages, write, pages);
-       if (nr_mapped_pages <= 0)
-               return nr_mapped_pages;
-
-       *nr_pages = nr_mapped_pages;
-       return 0;
-}
-EXPORT_SYMBOL(p9_payload_gup);
index 173bb550a9eb163df30c3b46b8770d0a37c84aa3..c43babb3f6354ea15bdf125352b6de83fb670af1 100644 (file)
@@ -13,5 +13,3 @@
  */
 
 void p9_release_pages(struct page **, int);
-int p9_payload_gup(char *, int *, struct page **, int);
-int p9_nr_pages(char *, int);
index 80d08f6664cbb5611eef291b959facd5332a61ef..3e3d82d8ff70506c2878d4b55bf44269ed26b5cb 100644 (file)
@@ -940,7 +940,7 @@ p9_fd_create_tcp(struct p9_client *client, const char *addr, char *args)
        sin_server.sin_family = AF_INET;
        sin_server.sin_addr.s_addr = in_aton(addr);
        sin_server.sin_port = htons(opts.port);
-       err = __sock_create(read_pnet(&current->nsproxy->net_ns), PF_INET,
+       err = __sock_create(current->nsproxy->net_ns, PF_INET,
                            SOCK_STREAM, IPPROTO_TCP, &csocket, 1);
        if (err) {
                pr_err("%s (%d): problem creating socket\n",
@@ -988,7 +988,7 @@ p9_fd_create_unix(struct p9_client *client, const char *addr, char *args)
 
        sun_server.sun_family = PF_UNIX;
        strcpy(sun_server.sun_path, addr);
-       err = __sock_create(read_pnet(&current->nsproxy->net_ns), PF_UNIX,
+       err = __sock_create(current->nsproxy->net_ns, PF_UNIX,
                            SOCK_STREAM, 0, &csocket, 1);
        if (err < 0) {
                pr_err("%s (%d): problem creating socket\n",
index 36a1a739ad68ff57eace5ba4bc4166faf12c485b..e62bcbbabb5e3cd43717f7980fce3e280aba3ded 100644 (file)
@@ -217,15 +217,15 @@ static int p9_virtio_cancel(struct p9_client *client, struct p9_req_t *req)
  * @start: which segment of the sg_list to start at
  * @pdata: a list of pages to add into sg.
  * @nr_pages: number of pages to pack into the scatter/gather list
- * @data: data to pack into scatter/gather list
+ * @offs: amount of data in the beginning of first page _not_ to pack
  * @count: amount of data to pack into the scatter/gather list
  */
 static int
 pack_sg_list_p(struct scatterlist *sg, int start, int limit,
-              struct page **pdata, int nr_pages, char *data, int count)
+              struct page **pdata, int nr_pages, size_t offs, int count)
 {
        int i = 0, s;
-       int data_off;
+       int data_off = offs;
        int index = start;
 
        BUG_ON(nr_pages > (limit - start));
@@ -233,16 +233,14 @@ pack_sg_list_p(struct scatterlist *sg, int start, int limit,
         * if the first page doesn't start at
         * page boundary find the offset
         */
-       data_off = offset_in_page(data);
        while (nr_pages) {
-               s = rest_of_page(data);
+               s = PAGE_SIZE - data_off;
                if (s > count)
                        s = count;
                /* Make sure we don't terminate early. */
                sg_unmark_end(&sg[index]);
                sg_set_page(&sg[index++], pdata[i++], s, data_off);
                data_off = 0;
-               data += s;
                count -= s;
                nr_pages--;
        }
@@ -314,11 +312,20 @@ req_retry:
 }
 
 static int p9_get_mapped_pages(struct virtio_chan *chan,
-                              struct page **pages, char *data,
-                              int nr_pages, int write, int kern_buf)
+                              struct page ***pages,
+                              struct iov_iter *data,
+                              int count,
+                              size_t *offs,
+                              int *need_drop)
 {
+       int nr_pages;
        int err;
-       if (!kern_buf) {
+
+       if (!iov_iter_count(data))
+               return 0;
+
+       if (!(data->type & ITER_KVEC)) {
+               int n;
                /*
                 * We allow only p9_max_pages pinned. We wait for the
                 * Other zc request to finish here
@@ -329,26 +336,49 @@ static int p9_get_mapped_pages(struct virtio_chan *chan,
                        if (err == -ERESTARTSYS)
                                return err;
                }
-               err = p9_payload_gup(data, &nr_pages, pages, write);
-               if (err < 0)
-                       return err;
+               n = iov_iter_get_pages_alloc(data, pages, count, offs);
+               if (n < 0)
+                       return n;
+               *need_drop = 1;
+               nr_pages = DIV_ROUND_UP(n + *offs, PAGE_SIZE);
                atomic_add(nr_pages, &vp_pinned);
+               return n;
        } else {
                /* kernel buffer, no need to pin pages */
-               int s, index = 0;
-               int count = nr_pages;
-               while (nr_pages) {
-                       s = rest_of_page(data);
-                       if (is_vmalloc_addr(data))
-                               pages[index++] = vmalloc_to_page(data);
+               int index;
+               size_t len;
+               void *p;
+
+               /* we'd already checked that it's non-empty */
+               while (1) {
+                       len = iov_iter_single_seg_count(data);
+                       if (likely(len)) {
+                               p = data->kvec->iov_base + data->iov_offset;
+                               break;
+                       }
+                       iov_iter_advance(data, 0);
+               }
+               if (len > count)
+                       len = count;
+
+               nr_pages = DIV_ROUND_UP((unsigned long)p + len, PAGE_SIZE) -
+                          (unsigned long)p / PAGE_SIZE;
+
+               *pages = kmalloc(sizeof(struct page *) * nr_pages, GFP_NOFS);
+               if (!*pages)
+                       return -ENOMEM;
+
+               *need_drop = 0;
+               p -= (*offs = (unsigned long)p % PAGE_SIZE);
+               for (index = 0; index < nr_pages; index++) {
+                       if (is_vmalloc_addr(p))
+                               (*pages)[index] = vmalloc_to_page(p);
                        else
-                               pages[index++] = kmap_to_page(data);
-                       data += s;
-                       nr_pages--;
+                               (*pages)[index] = kmap_to_page(p);
+                       p += PAGE_SIZE;
                }
-               nr_pages = count;
+               return len;
        }
-       return nr_pages;
 }
 
 /**
@@ -364,8 +394,8 @@ static int p9_get_mapped_pages(struct virtio_chan *chan,
  */
 static int
 p9_virtio_zc_request(struct p9_client *client, struct p9_req_t *req,
-                    char *uidata, char *uodata, int inlen,
-                    int outlen, int in_hdr_len, int kern_buf)
+                    struct iov_iter *uidata, struct iov_iter *uodata,
+                    int inlen, int outlen, int in_hdr_len)
 {
        int in, out, err, out_sgs, in_sgs;
        unsigned long flags;
@@ -373,41 +403,32 @@ p9_virtio_zc_request(struct p9_client *client, struct p9_req_t *req,
        struct page **in_pages = NULL, **out_pages = NULL;
        struct virtio_chan *chan = client->trans;
        struct scatterlist *sgs[4];
+       size_t offs;
+       int need_drop = 0;
 
        p9_debug(P9_DEBUG_TRANS, "virtio request\n");
 
        if (uodata) {
-               out_nr_pages = p9_nr_pages(uodata, outlen);
-               out_pages = kmalloc(sizeof(struct page *) * out_nr_pages,
-                                   GFP_NOFS);
-               if (!out_pages) {
-                       err = -ENOMEM;
-                       goto err_out;
-               }
-               out_nr_pages = p9_get_mapped_pages(chan, out_pages, uodata,
-                                                  out_nr_pages, 0, kern_buf);
-               if (out_nr_pages < 0) {
-                       err = out_nr_pages;
-                       kfree(out_pages);
-                       out_pages = NULL;
-                       goto err_out;
+               int n = p9_get_mapped_pages(chan, &out_pages, uodata,
+                                           outlen, &offs, &need_drop);
+               if (n < 0)
+                       return n;
+               out_nr_pages = DIV_ROUND_UP(n + offs, PAGE_SIZE);
+               if (n != outlen) {
+                       __le32 v = cpu_to_le32(n);
+                       memcpy(&req->tc->sdata[req->tc->size - 4], &v, 4);
+                       outlen = n;
                }
-       }
-       if (uidata) {
-               in_nr_pages = p9_nr_pages(uidata, inlen);
-               in_pages = kmalloc(sizeof(struct page *) * in_nr_pages,
-                                  GFP_NOFS);
-               if (!in_pages) {
-                       err = -ENOMEM;
-                       goto err_out;
-               }
-               in_nr_pages = p9_get_mapped_pages(chan, in_pages, uidata,
-                                                 in_nr_pages, 1, kern_buf);
-               if (in_nr_pages < 0) {
-                       err = in_nr_pages;
-                       kfree(in_pages);
-                       in_pages = NULL;
-                       goto err_out;
+       } else if (uidata) {
+               int n = p9_get_mapped_pages(chan, &in_pages, uidata,
+                                           inlen, &offs, &need_drop);
+               if (n < 0)
+                       return n;
+               in_nr_pages = DIV_ROUND_UP(n + offs, PAGE_SIZE);
+               if (n != inlen) {
+                       __le32 v = cpu_to_le32(n);
+                       memcpy(&req->tc->sdata[req->tc->size - 4], &v, 4);
+                       inlen = n;
                }
        }
        req->status = REQ_STATUS_SENT;
@@ -426,7 +447,7 @@ req_retry_pinned:
        if (out_pages) {
                sgs[out_sgs++] = chan->sg + out;
                out += pack_sg_list_p(chan->sg, out, VIRTQUEUE_NUM,
-                                     out_pages, out_nr_pages, uodata, outlen);
+                                     out_pages, out_nr_pages, offs, outlen);
        }
                
        /*
@@ -444,7 +465,7 @@ req_retry_pinned:
        if (in_pages) {
                sgs[out_sgs + in_sgs++] = chan->sg + out + in;
                in += pack_sg_list_p(chan->sg, out + in, VIRTQUEUE_NUM,
-                                    in_pages, in_nr_pages, uidata, inlen);
+                                    in_pages, in_nr_pages, offs, inlen);
        }
 
        BUG_ON(out_sgs + in_sgs > ARRAY_SIZE(sgs));
@@ -478,7 +499,7 @@ req_retry_pinned:
         * Non kernel buffers are pinned, unpin them
         */
 err_out:
-       if (!kern_buf) {
+       if (need_drop) {
                if (in_pages) {
                        p9_release_pages(in_pages, in_nr_pages);
                        atomic_sub(in_nr_pages, &vp_pinned);
index 38704bdf941ad6697db492959a909ca716ea9d4c..3995613e5510cfd6a05ebfe5cbbd9da3bcb5589f 100644 (file)
@@ -69,7 +69,7 @@ obj-$(CONFIG_BATMAN_ADV)      += batman-adv/
 obj-$(CONFIG_NFC)              += nfc/
 obj-$(CONFIG_OPENVSWITCH)      += openvswitch/
 obj-$(CONFIG_VSOCKETS) += vmw_vsock/
-obj-$(CONFIG_NET_MPLS_GSO)     += mpls/
+obj-$(CONFIG_MPLS)             += mpls/
 obj-$(CONFIG_HSR)              += hsr/
 ifneq ($(CONFIG_NET_SWITCHDEV),)
 obj-y                          += switchdev/
index d1c55d8dd0a2538eaabe403ceeb26896c00adf66..8ad3ec2610b6499b92b2f3bc97ac02d2d043dd45 100644 (file)
@@ -141,7 +141,7 @@ static void __aarp_send_query(struct aarp_entry *a)
        eah->pa_src_net  = sat->s_net;
        eah->pa_src_node = sat->s_node;
 
-       memset(eah->hw_dst, '\0', ETH_ALEN);
+       eth_zero_addr(eah->hw_dst);
 
        eah->pa_dst_zero = 0;
        eah->pa_dst_net  = a->target_addr.s_net;
@@ -189,7 +189,7 @@ static void aarp_send_reply(struct net_device *dev, struct atalk_addr *us,
        eah->pa_src_node = us->s_node;
 
        if (!sha)
-               memset(eah->hw_dst, '\0', ETH_ALEN);
+               eth_zero_addr(eah->hw_dst);
        else
                ether_addr_copy(eah->hw_dst, sha);
 
@@ -239,7 +239,7 @@ static void aarp_send_probe(struct net_device *dev, struct atalk_addr *us)
        eah->pa_src_net  = us->s_net;
        eah->pa_src_node = us->s_node;
 
-       memset(eah->hw_dst, '\0', ETH_ALEN);
+       eth_zero_addr(eah->hw_dst);
 
        eah->pa_dst_zero = 0;
        eah->pa_dst_net  = us->s_net;
index 0d0766ea5ab104c5bba47f3b8ca32b1858e9c94d..3b7ad43c7dad948d192ace962f3a4d4422c47319 100644 (file)
@@ -1559,8 +1559,7 @@ freeit:
        return 0;
 }
 
-static int atalk_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
-                        size_t len)
+static int atalk_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
 {
        struct sock *sk = sock->sk;
        struct atalk_sock *at = at_sk(sk);
@@ -1728,8 +1727,8 @@ out:
        return err ? : len;
 }
 
-static int atalk_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
-                        size_t size, int flags)
+static int atalk_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
+                        int flags)
 {
        struct sock *sk = sock->sk;
        struct ddpehdr *ddp;
index b84057e41bd6a364281ea85f24e60b82e4ec6cfd..ed0466637e13326be6796e81cdd182d4096a1509 100644 (file)
@@ -523,8 +523,8 @@ int vcc_connect(struct socket *sock, int itf, short vpi, int vci)
        return 0;
 }
 
-int vcc_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
-               size_t size, int flags)
+int vcc_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
+               int flags)
 {
        struct sock *sk = sock->sk;
        struct atm_vcc *vcc;
@@ -569,8 +569,7 @@ int vcc_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
        return copied;
 }
 
-int vcc_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
-               size_t size)
+int vcc_sendmsg(struct socket *sock, struct msghdr *m, size_t size)
 {
        struct sock *sk = sock->sk;
        DEFINE_WAIT(wait);
index cc3c2dae4d793427259e614acfec579d42f0c2a1..4d6f5b2068ac704a7bbbc720b0d99174a7d523b6 100644 (file)
 int vcc_create(struct net *net, struct socket *sock, int protocol, int family);
 int vcc_release(struct socket *sock);
 int vcc_connect(struct socket *sock, int itf, short vpi, int vci);
-int vcc_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
-               size_t size, int flags);
-int vcc_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
-               size_t total_len);
+int vcc_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
+               int flags);
+int vcc_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len);
 unsigned int vcc_poll(struct file *file, struct socket *sock, poll_table *wait);
 int vcc_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
 int vcc_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
index 4b98f897044aa6a364392bc1ec5b68a2a672a2d2..cd3b37989057fd0b1c5a8b1f49a224fe96d7ba87 100644 (file)
@@ -2001,7 +2001,7 @@ lec_vcc_added(struct lec_priv *priv, const struct atmlec_ioc *ioc_data,
                if (entry == NULL)
                        goto out;
                memcpy(entry->atm_addr, ioc_data->atm_addr, ATM_ESA_LEN);
-               memset(entry->mac_addr, 0, ETH_ALEN);
+               eth_zero_addr(entry->mac_addr);
                entry->recv_vcc = vcc;
                entry->old_recv_push = old_push;
                entry->status = ESI_UNKNOWN;
@@ -2086,7 +2086,7 @@ lec_vcc_added(struct lec_priv *priv, const struct atmlec_ioc *ioc_data,
        entry->vcc = vcc;
        entry->old_push = old_push;
        memcpy(entry->atm_addr, ioc_data->atm_addr, ATM_ESA_LEN);
-       memset(entry->mac_addr, 0, ETH_ALEN);
+       eth_zero_addr(entry->mac_addr);
        entry->status = ESI_UNKNOWN;
        hlist_add_head(&entry->next, &priv->lec_arp_empty_ones);
        entry->timer.expires = jiffies + priv->vcc_timeout_period;
index 523bce72f698ef2a34cfc66da4dbe69cde954fe9..4fd6af47383a014b72b3377fc1556c6bff18e304 100644 (file)
 #include "resources.h"
 #include "signaling.h"
 
-#undef WAIT_FOR_DEMON          /* #define this if system calls on SVC sockets
-                                  should block until the demon runs.
-                                  Danger: may cause nasty hangs if the demon
-                                  crashes. */
-
 struct atm_vcc *sigd = NULL;
-#ifdef WAIT_FOR_DEMON
-static DECLARE_WAIT_QUEUE_HEAD(sigd_sleep);
-#endif
 
 static void sigd_put_skb(struct sk_buff *skb)
 {
-#ifdef WAIT_FOR_DEMON
-       DECLARE_WAITQUEUE(wait, current);
-
-       add_wait_queue(&sigd_sleep, &wait);
-       while (!sigd) {
-               set_current_state(TASK_UNINTERRUPTIBLE);
-               pr_debug("atmsvc: waiting for signaling daemon...\n");
-               schedule();
-       }
-       current->state = TASK_RUNNING;
-       remove_wait_queue(&sigd_sleep, &wait);
-#else
        if (!sigd) {
                pr_debug("atmsvc: no signaling daemon\n");
                kfree_skb(skb);
                return;
        }
-#endif
        atm_force_charge(sigd, skb->truesize);
        skb_queue_tail(&sk_atm(sigd)->sk_receive_queue, skb);
        sk_atm(sigd)->sk_data_ready(sk_atm(sigd));
@@ -261,8 +240,5 @@ int sigd_attach(struct atm_vcc *vcc)
        vcc_insert_socket(sk_atm(vcc));
        set_bit(ATM_VF_META, &vcc->flags);
        set_bit(ATM_VF_READY, &vcc->flags);
-#ifdef WAIT_FOR_DEMON
-       wake_up(&sigd_sleep);
-#endif
        return 0;
 }
index ca049a7c9287d703f789b5842472a768152dd7ca..330c1f4a5a0b6edfca55bece27b38d1e30482d55 100644 (file)
@@ -1432,8 +1432,7 @@ out:
        return err;
 }
 
-static int ax25_sendmsg(struct kiocb *iocb, struct socket *sock,
-                       struct msghdr *msg, size_t len)
+static int ax25_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
 {
        DECLARE_SOCKADDR(struct sockaddr_ax25 *, usax, msg->msg_name);
        struct sock *sk = sock->sk;
@@ -1599,8 +1598,8 @@ out:
        return err;
 }
 
-static int ax25_recvmsg(struct kiocb *iocb, struct socket *sock,
-       struct msghdr *msg, size_t size, int flags)
+static int ax25_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
+                       int flags)
 {
        struct sock *sk = sock->sk;
        struct sk_buff *skb;
index 67de6b33f2c309ad4426b2075b1bd16d8ce27e11..7c646bb2c6f70246e83c8701f8771b716f8194d7 100644 (file)
@@ -46,9 +46,9 @@
 
 #ifdef CONFIG_INET
 
-int ax25_hard_header(struct sk_buff *skb, struct net_device *dev,
-                    unsigned short type, const void *daddr,
-                    const void *saddr, unsigned int len)
+static int ax25_hard_header(struct sk_buff *skb, struct net_device *dev,
+                           unsigned short type, const void *daddr,
+                           const void *saddr, unsigned int len)
 {
        unsigned char *buff;
 
@@ -100,7 +100,7 @@ int ax25_hard_header(struct sk_buff *skb, struct net_device *dev,
        return -AX25_HEADER_LEN;        /* Unfinished header */
 }
 
-int ax25_rebuild_header(struct sk_buff *skb)
+netdev_tx_t ax25_ip_xmit(struct sk_buff *skb)
 {
        struct sk_buff *ourskb;
        unsigned char *bp  = skb->data;
@@ -115,9 +115,6 @@ int ax25_rebuild_header(struct sk_buff *skb)
        dst = (ax25_address *)(bp + 1);
        src = (ax25_address *)(bp + 8);
 
-       if (arp_find(bp + 1, skb))
-               return 1;
-
        route = ax25_get_route(dst, NULL);
        if (route) {
                digipeat = route->digipeat;
@@ -129,6 +126,7 @@ int ax25_rebuild_header(struct sk_buff *skb)
                dev = skb->dev;
 
        if ((ax25_dev = ax25_dev_ax25dev(dev)) == NULL) {
+               kfree_skb(skb);
                goto put;
        }
 
@@ -212,31 +210,29 @@ put:
        if (route)
                ax25_put_route(route);
 
-       return 1;
+       return NETDEV_TX_OK;
 }
 
 #else  /* INET */
 
-int ax25_hard_header(struct sk_buff *skb, struct net_device *dev,
-                    unsigned short type, const void *daddr,
-                    const void *saddr, unsigned int len)
+static int ax25_hard_header(struct sk_buff *skb, struct net_device *dev,
+                           unsigned short type, const void *daddr,
+                           const void *saddr, unsigned int len)
 {
        return -AX25_HEADER_LEN;
 }
 
-int ax25_rebuild_header(struct sk_buff *skb)
+netdev_tx_t ax25_ip_xmit(struct sk_buff *skb)
 {
-       return 1;
+       kfree_skb(skb);
+       return NETDEV_TX_OK;
 }
-
 #endif
 
 const struct header_ops ax25_header_ops = {
        .create = ax25_hard_header,
-       .rebuild = ax25_rebuild_header,
 };
 
-EXPORT_SYMBOL(ax25_hard_header);
-EXPORT_SYMBOL(ax25_rebuild_header);
 EXPORT_SYMBOL(ax25_header_ops);
+EXPORT_SYMBOL(ax25_ip_xmit);
 
index 27649e85f3f666b6131ae4becf367ce3a7b108d8..090828cf1fa7a5999a0dcec4f98bbf227b54d078 100644 (file)
@@ -592,15 +592,16 @@ static int batadv_write_buffer_text(struct batadv_priv *bat_priv,
 
        curr_gw = batadv_gw_get_selected_gw_node(bat_priv);
 
-       ret = seq_printf(seq, "%s %pM (%3i) %pM [%10s]: %u.%u/%u.%u MBit\n",
-                        (curr_gw == gw_node ? "=>" : "  "),
-                        gw_node->orig_node->orig,
-                        router_ifinfo->bat_iv.tq_avg, router->addr,
-                        router->if_incoming->net_dev->name,
-                        gw_node->bandwidth_down / 10,
-                        gw_node->bandwidth_down % 10,
-                        gw_node->bandwidth_up / 10,
-                        gw_node->bandwidth_up % 10);
+       seq_printf(seq, "%s %pM (%3i) %pM [%10s]: %u.%u/%u.%u MBit\n",
+                  (curr_gw == gw_node ? "=>" : "  "),
+                  gw_node->orig_node->orig,
+                  router_ifinfo->bat_iv.tq_avg, router->addr,
+                  router->if_incoming->net_dev->name,
+                  gw_node->bandwidth_down / 10,
+                  gw_node->bandwidth_down % 10,
+                  gw_node->bandwidth_up / 10,
+                  gw_node->bandwidth_up % 10);
+       ret = seq_has_overflowed(seq) ? -1 : 0;
 
        if (curr_gw)
                batadv_gw_node_free_ref(curr_gw);
index fbda6b54baffccf798375cb8add49bb179738386..baf1f9843f2c42a78c7df31b0c60b3012d7fe22d 100644 (file)
@@ -83,11 +83,12 @@ static bool batadv_is_on_batman_iface(const struct net_device *net_dev)
                return true;
 
        /* no more parents..stop recursion */
-       if (net_dev->iflink == 0 || net_dev->iflink == net_dev->ifindex)
+       if (dev_get_iflink(net_dev) == 0 ||
+           dev_get_iflink(net_dev) == net_dev->ifindex)
                return false;
 
        /* recurse over the parent device */
-       parent_dev = __dev_get_by_index(&init_net, net_dev->iflink);
+       parent_dev = __dev_get_by_index(&init_net, dev_get_iflink(net_dev));
        /* if we got a NULL parent_dev there is something broken.. */
        if (WARN(!parent_dev, "Cannot find parent device"))
                return false;
index 7de74635a110cbbecbaaddf31ac5d39708362397..b8c794b87523857b9a658526ebb92dd21b22dd57 100644 (file)
@@ -91,4 +91,12 @@ config BT_SELFTEST_SMP
          Run test cases for SMP cryptographic functionality, including both
          legacy SMP as well as the Secure Connections features.
 
+config BT_DEBUGFS
+       bool "Export Bluetooth internals in debugfs"
+       depends on BT && DEBUG_FS
+       default y
+       help
+         Provide extensive information about internal Bluetooth states
+         in debugfs.
+
 source "drivers/bluetooth/Kconfig"
index 8e96e30722668a8fa92663c1ee1ba17f2f74abd1..9a8ea232d28ff3364c9f0b6917489524fdb93dab 100644 (file)
@@ -13,8 +13,9 @@ bluetooth_6lowpan-y := 6lowpan.o
 
 bluetooth-y := af_bluetooth.o hci_core.o hci_conn.o hci_event.o mgmt.o \
        hci_sock.o hci_sysfs.o l2cap_core.o l2cap_sock.o smp.o sco.o lib.o \
-       a2mp.o amp.o ecc.o hci_request.o hci_debugfs.o
+       a2mp.o amp.o ecc.o hci_request.o mgmt_util.o
 
+bluetooth-$(CONFIG_BT_DEBUGFS) += hci_debugfs.o
 bluetooth-$(CONFIG_BT_SELFTEST) += selftest.o
 
 subdir-ccflags-y += -D__CHECK_ENDIAN__
index cedfbda15dad8514b606d3f62c86c81352f6c09d..5a04eb1a7e5762c82109255c2aa035bec9a840dc 100644 (file)
 #include "a2mp.h"
 #include "amp.h"
 
+#define A2MP_FEAT_EXT  0x8000
+
 /* Global AMP Manager list */
-LIST_HEAD(amp_mgr_list);
-DEFINE_MUTEX(amp_mgr_list_lock);
+static LIST_HEAD(amp_mgr_list);
+static DEFINE_MUTEX(amp_mgr_list_lock);
 
 /* A2MP build & send command helper functions */
 static struct a2mp_cmd *__a2mp_build(u8 code, u8 ident, u16 len, void *data)
@@ -43,7 +45,7 @@ static struct a2mp_cmd *__a2mp_build(u8 code, u8 ident, u16 len, void *data)
        return cmd;
 }
 
-void a2mp_send(struct amp_mgr *mgr, u8 code, u8 ident, u16 len, void *data)
+static void a2mp_send(struct amp_mgr *mgr, u8 code, u8 ident, u16 len, void *data)
 {
        struct l2cap_chan *chan = mgr->a2mp_chan;
        struct a2mp_cmd *cmd;
@@ -67,7 +69,7 @@ void a2mp_send(struct amp_mgr *mgr, u8 code, u8 ident, u16 len, void *data)
        kfree(cmd);
 }
 
-u8 __next_ident(struct amp_mgr *mgr)
+static u8 __next_ident(struct amp_mgr *mgr)
 {
        if (++mgr->ident == 0)
                mgr->ident = 1;
@@ -75,6 +77,23 @@ u8 __next_ident(struct amp_mgr *mgr)
        return mgr->ident;
 }
 
+static struct amp_mgr *amp_mgr_lookup_by_state(u8 state)
+{
+       struct amp_mgr *mgr;
+
+       mutex_lock(&amp_mgr_list_lock);
+       list_for_each_entry(mgr, &amp_mgr_list, list) {
+               if (test_and_clear_bit(state, &mgr->state)) {
+                       amp_mgr_get(mgr);
+                       mutex_unlock(&amp_mgr_list_lock);
+                       return mgr;
+               }
+       }
+       mutex_unlock(&amp_mgr_list_lock);
+
+       return NULL;
+}
+
 /* hci_dev_list shall be locked */
 static void __a2mp_add_cl(struct amp_mgr *mgr, struct a2mp_cl *cl)
 {
@@ -860,23 +879,6 @@ struct l2cap_chan *a2mp_channel_create(struct l2cap_conn *conn,
        return mgr->a2mp_chan;
 }
 
-struct amp_mgr *amp_mgr_lookup_by_state(u8 state)
-{
-       struct amp_mgr *mgr;
-
-       mutex_lock(&amp_mgr_list_lock);
-       list_for_each_entry(mgr, &amp_mgr_list, list) {
-               if (test_and_clear_bit(state, &mgr->state)) {
-                       amp_mgr_get(mgr);
-                       mutex_unlock(&amp_mgr_list_lock);
-                       return mgr;
-               }
-       }
-       mutex_unlock(&amp_mgr_list_lock);
-
-       return NULL;
-}
-
 void a2mp_send_getinfo_rsp(struct hci_dev *hdev)
 {
        struct amp_mgr *mgr;
index 487b54c1308fdec26ddc48e46b4973200b45022a..296f665adb09d01c0ffc7fe421bf8a75115bf1ff 100644 (file)
@@ -17,8 +17,6 @@
 
 #include <net/bluetooth/l2cap.h>
 
-#define A2MP_FEAT_EXT  0x8000
-
 enum amp_mgr_state {
        READ_LOC_AMP_INFO,
        READ_LOC_AMP_ASSOC,
@@ -131,16 +129,10 @@ struct a2mp_physlink_rsp {
 #define A2MP_STATUS_PHYS_LINK_EXISTS           0x05
 #define A2MP_STATUS_SECURITY_VIOLATION         0x06
 
-extern struct list_head amp_mgr_list;
-extern struct mutex amp_mgr_list_lock;
-
 struct amp_mgr *amp_mgr_get(struct amp_mgr *mgr);
 int amp_mgr_put(struct amp_mgr *mgr);
-u8 __next_ident(struct amp_mgr *mgr);
 struct l2cap_chan *a2mp_channel_create(struct l2cap_conn *conn,
                                       struct sk_buff *skb);
-struct amp_mgr *amp_mgr_lookup_by_state(u8 state);
-void a2mp_send(struct amp_mgr *mgr, u8 code, u8 ident, u16 len, void *data);
 void a2mp_discover_amp(struct l2cap_chan *chan);
 void a2mp_send_getinfo_rsp(struct hci_dev *hdev);
 void a2mp_send_getampassoc_rsp(struct hci_dev *hdev, u8 status);
index ce22e0cfa923fb947256f8c36405b7e8bce5c888..70f9d945faf7b439ce7a5283e94bc9687e8e67af 100644 (file)
@@ -210,8 +210,8 @@ struct sock *bt_accept_dequeue(struct sock *parent, struct socket *newsock)
 }
 EXPORT_SYMBOL(bt_accept_dequeue);
 
-int bt_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
-                               struct msghdr *msg, size_t len, int flags)
+int bt_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
+                   int flags)
 {
        int noblock = flags & MSG_DONTWAIT;
        struct sock *sk = sock->sk;
@@ -283,8 +283,8 @@ static long bt_sock_data_wait(struct sock *sk, long timeo)
        return timeo;
 }
 
-int bt_sock_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
-                              struct msghdr *msg, size_t size, int flags)
+int bt_sock_stream_recvmsg(struct socket *sock, struct msghdr *msg,
+                          size_t size, int flags)
 {
        struct sock *sk = sock->sk;
        int err = 0;
@@ -711,10 +711,9 @@ EXPORT_SYMBOL_GPL(bt_debugfs);
 
 static int __init bt_init(void)
 {
-       struct sk_buff *skb;
        int err;
 
-       BUILD_BUG_ON(sizeof(struct bt_skb_cb) > sizeof(skb->cb));
+       sock_skb_cb_check_size(sizeof(struct bt_skb_cb));
 
        BT_INFO("Core ver %s", VERSION);
 
@@ -750,6 +749,13 @@ static int __init bt_init(void)
                goto sock_err;
        }
 
+       err = mgmt_init();
+       if (err < 0) {
+               sco_exit();
+               l2cap_exit();
+               goto sock_err;
+       }
+
        return 0;
 
 sock_err:
@@ -764,6 +770,8 @@ error:
 
 static void __exit bt_exit(void)
 {
+       mgmt_exit();
+
        sco_exit();
 
        l2cap_exit();
index 5a5b16f365e9baae89f345b22930f1109e5f0b4c..40854c99bc1ecff42e8943ac506b77155ebc4fe1 100644 (file)
@@ -111,6 +111,10 @@ struct bnep_ext_hdr {
 #define BNEPCONNDEL    _IOW('B', 201, int)
 #define BNEPGETCONNLIST        _IOR('B', 210, int)
 #define BNEPGETCONNINFO        _IOR('B', 211, int)
+#define BNEPGETSUPPFEAT        _IOR('B', 212, int)
+
+#define BNEP_SETUP_RESPONSE    0
+#define BNEP_SETUP_RSP_SENT    10
 
 struct bnep_connadd_req {
        int   sock;             /* Connected socket */
index 05f57e491ccbd614a1d306c49df891e4a2ec00c6..1641367e54cadb461903e554c39fabf05997c9de 100644 (file)
@@ -231,7 +231,14 @@ static int bnep_rx_control(struct bnep_session *s, void *data, int len)
                break;
 
        case BNEP_SETUP_CONN_REQ:
-               err = bnep_send_rsp(s, BNEP_SETUP_CONN_RSP, BNEP_CONN_NOT_ALLOWED);
+               /* Successful response should be sent only once */
+               if (test_bit(BNEP_SETUP_RESPONSE, &s->flags) &&
+                   !test_and_set_bit(BNEP_SETUP_RSP_SENT, &s->flags))
+                       err = bnep_send_rsp(s, BNEP_SETUP_CONN_RSP,
+                                           BNEP_SUCCESS);
+               else
+                       err = bnep_send_rsp(s, BNEP_SETUP_CONN_RSP,
+                                           BNEP_CONN_NOT_ALLOWED);
                break;
 
        default: {
@@ -239,7 +246,7 @@ static int bnep_rx_control(struct bnep_session *s, void *data, int len)
                        pkt[0] = BNEP_CONTROL;
                        pkt[1] = BNEP_CMD_NOT_UNDERSTOOD;
                        pkt[2] = cmd;
-                       bnep_send(s, pkt, sizeof(pkt));
+                       err = bnep_send(s, pkt, sizeof(pkt));
                }
                break;
        }
@@ -292,29 +299,55 @@ static int bnep_rx_frame(struct bnep_session *s, struct sk_buff *skb)
 {
        struct net_device *dev = s->dev;
        struct sk_buff *nskb;
-       u8 type;
+       u8 type, ctrl_type;
 
        dev->stats.rx_bytes += skb->len;
 
        type = *(u8 *) skb->data;
        skb_pull(skb, 1);
+       ctrl_type = *(u8 *)skb->data;
 
        if ((type & BNEP_TYPE_MASK) >= sizeof(__bnep_rx_hlen))
                goto badframe;
 
        if ((type & BNEP_TYPE_MASK) == BNEP_CONTROL) {
-               bnep_rx_control(s, skb->data, skb->len);
-               kfree_skb(skb);
-               return 0;
-       }
+               if (bnep_rx_control(s, skb->data, skb->len) < 0) {
+                       dev->stats.tx_errors++;
+                       kfree_skb(skb);
+                       return 0;
+               }
 
-       skb_reset_mac_header(skb);
+               if (!(type & BNEP_EXT_HEADER)) {
+                       kfree_skb(skb);
+                       return 0;
+               }
 
-       /* Verify and pull out header */
-       if (!skb_pull(skb, __bnep_rx_hlen[type & BNEP_TYPE_MASK]))
-               goto badframe;
+               /* Verify and pull ctrl message since it's already processed */
+               switch (ctrl_type) {
+               case BNEP_SETUP_CONN_REQ:
+                       /* Pull: ctrl type (1 b), len (1 b), data (len bytes) */
+                       if (!skb_pull(skb, 2 + *(u8 *)(skb->data + 1) * 2))
+                               goto badframe;
+                       break;
+               case BNEP_FILTER_MULTI_ADDR_SET:
+               case BNEP_FILTER_NET_TYPE_SET:
+                       /* Pull: ctrl type (1 b), len (2 b), data (len bytes) */
+                       if (!skb_pull(skb, 3 + *(u16 *)(skb->data + 1) * 2))
+                               goto badframe;
+                       break;
+               default:
+                       kfree_skb(skb);
+                       return 0;
+               }
+       } else {
+               skb_reset_mac_header(skb);
 
-       s->eh.h_proto = get_unaligned((__be16 *) (skb->data - 2));
+               /* Verify and pull out header */
+               if (!skb_pull(skb, __bnep_rx_hlen[type & BNEP_TYPE_MASK]))
+                       goto badframe;
+
+               s->eh.h_proto = get_unaligned((__be16 *) (skb->data - 2));
+       }
 
        if (type & BNEP_EXT_HEADER) {
                if (bnep_rx_extension(s, skb) < 0)
@@ -525,6 +558,7 @@ static struct device_type bnep_type = {
 
 int bnep_add_connection(struct bnep_connadd_req *req, struct socket *sock)
 {
+       u32 valid_flags = BIT(BNEP_SETUP_RESPONSE);
        struct net_device *dev;
        struct bnep_session *s, *ss;
        u8 dst[ETH_ALEN], src[ETH_ALEN];
@@ -535,6 +569,9 @@ int bnep_add_connection(struct bnep_connadd_req *req, struct socket *sock)
        if (!l2cap_is_socket(sock))
                return -EBADFD;
 
+       if (req->flags & ~valid_flags)
+               return -EINVAL;
+
        baswap((void *) dst, &l2cap_pi(sock->sk)->chan->dst);
        baswap((void *) src, &l2cap_pi(sock->sk)->chan->src);
 
@@ -566,6 +603,7 @@ int bnep_add_connection(struct bnep_connadd_req *req, struct socket *sock)
        s->sock  = sock;
        s->role  = req->role;
        s->state = BT_CONNECTED;
+       s->flags = req->flags;
 
        s->msg.msg_flags = MSG_NOSIGNAL;
 
@@ -611,11 +649,15 @@ failed:
 
 int bnep_del_connection(struct bnep_conndel_req *req)
 {
+       u32 valid_flags = 0;
        struct bnep_session *s;
        int  err = 0;
 
        BT_DBG("");
 
+       if (req->flags & ~valid_flags)
+               return -EINVAL;
+
        down_read(&bnep_session_sem);
 
        s = __bnep_get_session(req->dst);
@@ -631,10 +673,12 @@ int bnep_del_connection(struct bnep_conndel_req *req)
 
 static void __bnep_copy_ci(struct bnep_conninfo *ci, struct bnep_session *s)
 {
+       u32 valid_flags = BIT(BNEP_SETUP_RESPONSE);
+
        memset(ci, 0, sizeof(*ci));
        memcpy(ci->dst, s->eh.h_source, ETH_ALEN);
        strcpy(ci->device, s->dev->name);
-       ci->flags = s->flags;
+       ci->flags = s->flags & valid_flags;
        ci->state = s->state;
        ci->role  = s->role;
 }
index 4b488ec261054830c6f45bfe3a6a3a9cfd65f316..6ceb5d36a32bdc375e635d34085a9b016568e16a 100644 (file)
@@ -218,7 +218,7 @@ static const struct net_device_ops bnep_netdev_ops = {
 void bnep_net_setup(struct net_device *dev)
 {
 
-       memset(dev->broadcast, 0xff, ETH_ALEN);
+       eth_broadcast_addr(dev->broadcast);
        dev->addr_len = ETH_ALEN;
 
        ether_setup(dev);
index 5f051290dabab83ec76422dbb7cd44732b22eb6e..bde2bdd9e929e854c9e2d001a7bf9e6a364d6a2c 100644 (file)
@@ -57,6 +57,7 @@ static int bnep_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long
        struct bnep_conninfo ci;
        struct socket *nsock;
        void __user *argp = (void __user *)arg;
+       __u32 supp_feat = BIT(BNEP_SETUP_RESPONSE);
        int err;
 
        BT_DBG("cmd %x arg %lx", cmd, arg);
@@ -120,6 +121,12 @@ static int bnep_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long
 
                return err;
 
+       case BNEPGETSUPPFEAT:
+               if (copy_to_user(argp, &supp_feat, sizeof(supp_feat)))
+                       return -EFAULT;
+
+               return 0;
+
        default:
                return -EINVAL;
        }
index 75bd2c42e3e791024abf9d4014fbc41d12dea0da..b0c6c6af76ef07c311ea940d482b3d45ab83696d 100644 (file)
@@ -333,7 +333,7 @@ void cmtp_recv_capimsg(struct cmtp_session *session, struct sk_buff *skb)
                return;
        }
 
-       if (session->flags & (1 << CMTP_LOOPBACK)) {
+       if (session->flags & BIT(CMTP_LOOPBACK)) {
                kfree_skb(skb);
                return;
        }
index 278a194e6af488f67197c3725ca937f554989498..298ed37010e691a6f2bb72c506b6e50f9e8676f9 100644 (file)
@@ -75,10 +75,11 @@ static void __cmtp_unlink_session(struct cmtp_session *session)
 
 static void __cmtp_copy_session(struct cmtp_session *session, struct cmtp_conninfo *ci)
 {
+       u32 valid_flags = BIT(CMTP_LOOPBACK);
        memset(ci, 0, sizeof(*ci));
        bacpy(&ci->bdaddr, &session->bdaddr);
 
-       ci->flags = session->flags;
+       ci->flags = session->flags & valid_flags;
        ci->state = session->state;
 
        ci->num = session->num;
@@ -313,7 +314,7 @@ static int cmtp_session(void *arg)
 
        down_write(&cmtp_session_sem);
 
-       if (!(session->flags & (1 << CMTP_LOOPBACK)))
+       if (!(session->flags & BIT(CMTP_LOOPBACK)))
                cmtp_detach_device(session);
 
        fput(session->sock->file);
@@ -329,6 +330,7 @@ static int cmtp_session(void *arg)
 
 int cmtp_add_connection(struct cmtp_connadd_req *req, struct socket *sock)
 {
+       u32 valid_flags = BIT(CMTP_LOOPBACK);
        struct cmtp_session *session, *s;
        int i, err;
 
@@ -337,6 +339,9 @@ int cmtp_add_connection(struct cmtp_connadd_req *req, struct socket *sock)
        if (!l2cap_is_socket(sock))
                return -EBADFD;
 
+       if (req->flags & ~valid_flags)
+               return -EINVAL;
+
        session = kzalloc(sizeof(struct cmtp_session), GFP_KERNEL);
        if (!session)
                return -ENOMEM;
@@ -385,7 +390,7 @@ int cmtp_add_connection(struct cmtp_connadd_req *req, struct socket *sock)
                goto unlink;
        }
 
-       if (!(session->flags & (1 << CMTP_LOOPBACK))) {
+       if (!(session->flags & BIT(CMTP_LOOPBACK))) {
                err = cmtp_attach_device(session);
                if (err < 0) {
                        atomic_inc(&session->terminate);
@@ -409,11 +414,15 @@ failed:
 
 int cmtp_del_connection(struct cmtp_conndel_req *req)
 {
+       u32 valid_flags = 0;
        struct cmtp_session *session;
        int err = 0;
 
        BT_DBG("");
 
+       if (req->flags & ~valid_flags)
+               return -EINVAL;
+
        down_read(&cmtp_session_sem);
 
        session = __cmtp_get_session(&req->bdaddr);
index c9b8fa544785df83d1afa02629b6be00a7325934..ee5e59839b0294a6db0ea95f9f3eda4db0e4c503 100644 (file)
@@ -309,7 +309,7 @@ void hci_sco_setup(struct hci_conn *conn, __u8 status)
                else
                        hci_add_sco(sco, conn->handle);
        } else {
-               hci_proto_connect_cfm(sco, status);
+               hci_connect_cfm(sco, status);
                hci_conn_del(sco);
        }
 }
@@ -571,7 +571,7 @@ struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src)
 
        list_for_each_entry(d, &hci_dev_list, list) {
                if (!test_bit(HCI_UP, &d->flags) ||
-                   test_bit(HCI_USER_CHANNEL, &d->dev_flags) ||
+                   hci_dev_test_flag(d, HCI_USER_CHANNEL) ||
                    d->dev_type != HCI_BREDR)
                        continue;
 
@@ -618,7 +618,7 @@ void hci_le_conn_failed(struct hci_conn *conn, u8 status)
        mgmt_connect_failed(hdev, &conn->dst, conn->type, conn->dst_type,
                            status);
 
-       hci_proto_connect_cfm(conn, status);
+       hci_connect_cfm(conn, status);
 
        hci_conn_del(conn);
 
@@ -700,7 +700,7 @@ static void hci_req_directed_advertising(struct hci_request *req,
         * and write a new random address. The flag will be set back on
         * as soon as the SET_ADV_ENABLE HCI command completes.
         */
-       clear_bit(HCI_LE_ADV, &hdev->dev_flags);
+       hci_dev_clear_flag(hdev, HCI_LE_ADV);
 
        /* Set require_privacy to false so that the remote device has a
         * chance of identifying us.
@@ -733,6 +733,14 @@ struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
        struct hci_request req;
        int err;
 
+       /* Let's make sure that le is enabled.*/
+       if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
+               if (lmp_le_capable(hdev))
+                       return ERR_PTR(-ECONNREFUSED);
+
+               return ERR_PTR(-EOPNOTSUPP);
+       }
+
        /* Some devices send ATT messages as soon as the physical link is
         * established. To be able to handle these ATT messages, the user-
         * space first establishes the connection and then starts the pairing
@@ -791,7 +799,7 @@ struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
         * anyway have to disable it in order to start directed
         * advertising.
         */
-       if (test_bit(HCI_LE_ADV, &hdev->dev_flags)) {
+       if (hci_dev_test_flag(hdev, HCI_LE_ADV)) {
                u8 enable = 0x00;
                hci_req_add(&req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable),
                            &enable);
@@ -802,7 +810,7 @@ struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
                /* If we're active scanning most controllers are unable
                 * to initiate advertising. Simply reject the attempt.
                 */
-               if (test_bit(HCI_LE_SCAN, &hdev->dev_flags) &&
+               if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
                    hdev->le_scan_type == LE_SCAN_ACTIVE) {
                        skb_queue_purge(&req.cmd_q);
                        hci_conn_del(conn);
@@ -832,9 +840,9 @@ struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
         * handler for scan disabling knows to set the correct discovery
         * state.
         */
-       if (test_bit(HCI_LE_SCAN, &hdev->dev_flags)) {
+       if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
                hci_req_add_le_scan_disable(&req);
-               set_bit(HCI_LE_SCAN_INTERRUPTED, &hdev->dev_flags);
+               hci_dev_set_flag(hdev, HCI_LE_SCAN_INTERRUPTED);
        }
 
        hci_req_add_le_create_conn(&req, conn);
@@ -856,8 +864,12 @@ struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst,
 {
        struct hci_conn *acl;
 
-       if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
+       if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
+               if (lmp_bredr_capable(hdev))
+                       return ERR_PTR(-ECONNREFUSED);
+
                return ERR_PTR(-EOPNOTSUPP);
+       }
 
        acl = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst);
        if (!acl) {
@@ -930,7 +942,7 @@ int hci_conn_check_link_mode(struct hci_conn *conn)
         * Connections is used and the link is encrypted with AES-CCM
         * using a P-256 authenticated combination key.
         */
-       if (test_bit(HCI_SC_ONLY, &conn->hdev->flags)) {
+       if (hci_dev_test_flag(conn->hdev, HCI_SC_ONLY)) {
                if (!hci_conn_sc_enabled(conn) ||
                    !test_bit(HCI_CONN_AES_CCM, &conn->flags) ||
                    conn->key_type != HCI_LK_AUTH_COMBINATION_P256)
@@ -1139,7 +1151,7 @@ void hci_conn_hash_flush(struct hci_dev *hdev)
        list_for_each_entry_safe(c, n, &h->list, list) {
                c->state = BT_CLOSED;
 
-               hci_proto_disconn_cfm(c, HCI_ERROR_LOCAL_HOST_TERM);
+               hci_disconn_cfm(c, HCI_ERROR_LOCAL_HOST_TERM);
                hci_conn_del(c);
        }
 }
index 3322d3f4c85a25eb4bed8dbfaa1802b907b3f1d3..46b114c0140bf6901f94fda107e799a87a10f4db 100644 (file)
@@ -51,7 +51,7 @@ DEFINE_RWLOCK(hci_dev_list_lock);
 
 /* HCI callback list */
 LIST_HEAD(hci_cb_list);
-DEFINE_RWLOCK(hci_cb_list_lock);
+DEFINE_MUTEX(hci_cb_list_lock);
 
 /* HCI ID Numbering */
 static DEFINE_IDA(hci_index_ida);
@@ -80,7 +80,7 @@ static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
        struct hci_dev *hdev = file->private_data;
        char buf[3];
 
-       buf[0] = test_bit(HCI_DUT_MODE, &hdev->dbg_flags) ? 'Y': 'N';
+       buf[0] = hci_dev_test_flag(hdev, HCI_DUT_MODE) ? 'Y': 'N';
        buf[1] = '\n';
        buf[2] = '\0';
        return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
@@ -106,7 +106,7 @@ static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
        if (strtobool(buf, &enable))
                return -EINVAL;
 
-       if (enable == test_bit(HCI_DUT_MODE, &hdev->dbg_flags))
+       if (enable == hci_dev_test_flag(hdev, HCI_DUT_MODE))
                return -EALREADY;
 
        hci_req_lock(hdev);
@@ -127,7 +127,7 @@ static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
        if (err < 0)
                return err;
 
-       change_bit(HCI_DUT_MODE, &hdev->dbg_flags);
+       hci_dev_change_flag(hdev, HCI_DUT_MODE);
 
        return count;
 }
@@ -141,13 +141,16 @@ static const struct file_operations dut_mode_fops = {
 
 /* ---- HCI requests ---- */
 
-static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode)
+static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
+                                 struct sk_buff *skb)
 {
        BT_DBG("%s result 0x%2.2x", hdev->name, result);
 
        if (hdev->req_status == HCI_REQ_PEND) {
                hdev->req_result = result;
                hdev->req_status = HCI_REQ_DONE;
+               if (skb)
+                       hdev->req_skb = skb_get(skb);
                wake_up_interruptible(&hdev->req_wait_q);
        }
 }
@@ -163,66 +166,12 @@ static void hci_req_cancel(struct hci_dev *hdev, int err)
        }
 }
 
-static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
-                                           u8 event)
-{
-       struct hci_ev_cmd_complete *ev;
-       struct hci_event_hdr *hdr;
-       struct sk_buff *skb;
-
-       hci_dev_lock(hdev);
-
-       skb = hdev->recv_evt;
-       hdev->recv_evt = NULL;
-
-       hci_dev_unlock(hdev);
-
-       if (!skb)
-               return ERR_PTR(-ENODATA);
-
-       if (skb->len < sizeof(*hdr)) {
-               BT_ERR("Too short HCI event");
-               goto failed;
-       }
-
-       hdr = (void *) skb->data;
-       skb_pull(skb, HCI_EVENT_HDR_SIZE);
-
-       if (event) {
-               if (hdr->evt != event)
-                       goto failed;
-               return skb;
-       }
-
-       if (hdr->evt != HCI_EV_CMD_COMPLETE) {
-               BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
-               goto failed;
-       }
-
-       if (skb->len < sizeof(*ev)) {
-               BT_ERR("Too short cmd_complete event");
-               goto failed;
-       }
-
-       ev = (void *) skb->data;
-       skb_pull(skb, sizeof(*ev));
-
-       if (opcode == __le16_to_cpu(ev->opcode))
-               return skb;
-
-       BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
-              __le16_to_cpu(ev->opcode));
-
-failed:
-       kfree_skb(skb);
-       return ERR_PTR(-ENODATA);
-}
-
 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
                                  const void *param, u8 event, u32 timeout)
 {
        DECLARE_WAITQUEUE(wait, current);
        struct hci_request req;
+       struct sk_buff *skb;
        int err = 0;
 
        BT_DBG("%s", hdev->name);
@@ -236,7 +185,7 @@ struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
        add_wait_queue(&hdev->req_wait_q, &wait);
        set_current_state(TASK_INTERRUPTIBLE);
 
-       err = hci_req_run(&req, hci_req_sync_complete);
+       err = hci_req_run_skb(&req, hci_req_sync_complete);
        if (err < 0) {
                remove_wait_queue(&hdev->req_wait_q, &wait);
                set_current_state(TASK_RUNNING);
@@ -265,13 +214,20 @@ struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
        }
 
        hdev->req_status = hdev->req_result = 0;
+       skb = hdev->req_skb;
+       hdev->req_skb = NULL;
 
        BT_DBG("%s end: err %d", hdev->name, err);
 
-       if (err < 0)
+       if (err < 0) {
+               kfree_skb(skb);
                return ERR_PTR(err);
+       }
+
+       if (!skb)
+               return ERR_PTR(-ENODATA);
 
-       return hci_get_cmd_complete(hdev, opcode, event);
+       return skb;
 }
 EXPORT_SYMBOL(__hci_cmd_sync_ev);
 
@@ -303,7 +259,7 @@ static int __hci_req_sync(struct hci_dev *hdev,
        add_wait_queue(&hdev->req_wait_q, &wait);
        set_current_state(TASK_INTERRUPTIBLE);
 
-       err = hci_req_run(&req, hci_req_sync_complete);
+       err = hci_req_run_skb(&req, hci_req_sync_complete);
        if (err < 0) {
                hdev->req_status = 0;
 
@@ -390,7 +346,7 @@ static void bredr_init(struct hci_request *req)
        hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
 }
 
-static void amp_init(struct hci_request *req)
+static void amp_init1(struct hci_request *req)
 {
        req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
 
@@ -400,9 +356,6 @@ static void amp_init(struct hci_request *req)
        /* Read Local Supported Commands */
        hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
 
-       /* Read Local Supported Features */
-       hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
-
        /* Read Local AMP Info */
        hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
 
@@ -416,6 +369,16 @@ static void amp_init(struct hci_request *req)
        hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
 }
 
+static void amp_init2(struct hci_request *req)
+{
+       /* Read Local Supported Features. Not all AMP controllers
+        * support this so it's placed conditionally in the second
+        * stage init.
+        */
+       if (req->hdev->commands[14] & 0x20)
+               hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
+}
+
 static void hci_init1_req(struct hci_request *req, unsigned long opt)
 {
        struct hci_dev *hdev = req->hdev;
@@ -432,7 +395,7 @@ static void hci_init1_req(struct hci_request *req, unsigned long opt)
                break;
 
        case HCI_AMP:
-               amp_init(req);
+               amp_init1(req);
                break;
 
        default:
@@ -494,7 +457,7 @@ static void le_setup(struct hci_request *req)
 
        /* LE-only controllers have LE implicitly enabled */
        if (!lmp_bredr_capable(hdev))
-               set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
+               hci_dev_set_flag(hdev, HCI_LE_ENABLED);
 }
 
 static void hci_setup_event_mask(struct hci_request *req)
@@ -578,10 +541,13 @@ static void hci_init2_req(struct hci_request *req, unsigned long opt)
 {
        struct hci_dev *hdev = req->hdev;
 
+       if (hdev->dev_type == HCI_AMP)
+               return amp_init2(req);
+
        if (lmp_bredr_capable(hdev))
                bredr_setup(req);
        else
-               clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
+               hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
 
        if (lmp_le_capable(hdev))
                le_setup(req);
@@ -607,7 +573,7 @@ static void hci_init2_req(struct hci_request *req, unsigned long opt)
                 */
                hdev->max_page = 0x01;
 
-               if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
+               if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
                        u8 mode = 0x01;
 
                        hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
@@ -646,7 +612,7 @@ static void hci_init2_req(struct hci_request *req, unsigned long opt)
                            sizeof(cp), &cp);
        }
 
-       if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
+       if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
                u8 enable = 1;
                hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
                            &enable);
@@ -683,7 +649,7 @@ static void hci_set_le_support(struct hci_request *req)
 
        memset(&cp, 0, sizeof(cp));
 
-       if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
+       if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
                cp.le = 0x01;
                cp.simul = 0x00;
        }
@@ -871,7 +837,7 @@ static void hci_init4_req(struct hci_request *req, unsigned long opt)
                hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
 
        /* Enable Secure Connections if supported and configured */
-       if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) &&
+       if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
            bredr_sc_enabled(hdev)) {
                u8 support = 0x01;
 
@@ -891,22 +857,22 @@ static int __hci_init(struct hci_dev *hdev)
        /* The Device Under Test (DUT) mode is special and available for
         * all controller types. So just create it early on.
         */
-       if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
+       if (hci_dev_test_flag(hdev, HCI_SETUP)) {
                debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
                                    &dut_mode_fops);
        }
 
+       err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
+       if (err < 0)
+               return err;
+
        /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
         * BR/EDR/LE type controllers. AMP controllers only need the
-        * first stage init.
+        * first two stages of init.
         */
        if (hdev->dev_type != HCI_BREDR)
                return 0;
 
-       err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
-       if (err < 0)
-               return err;
-
        err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
        if (err < 0)
                return err;
@@ -927,8 +893,8 @@ static int __hci_init(struct hci_dev *hdev)
         * So only when in setup phase or config phase, create the debugfs
         * entries and register the SMP channels.
         */
-       if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
-           !test_bit(HCI_CONFIG, &hdev->dev_flags))
+       if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
+           !hci_dev_test_flag(hdev, HCI_CONFIG))
                return 0;
 
        hci_debugfs_create_common(hdev);
@@ -1290,12 +1256,12 @@ int hci_inquiry(void __user *arg)
        if (!hdev)
                return -ENODEV;
 
-       if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
+       if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
                err = -EBUSY;
                goto done;
        }
 
-       if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
+       if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
                err = -EOPNOTSUPP;
                goto done;
        }
@@ -1305,7 +1271,7 @@ int hci_inquiry(void __user *arg)
                goto done;
        }
 
-       if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
+       if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
                err = -EOPNOTSUPP;
                goto done;
        }
@@ -1377,17 +1343,17 @@ static int hci_dev_do_open(struct hci_dev *hdev)
 
        hci_req_lock(hdev);
 
-       if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
+       if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
                ret = -ENODEV;
                goto done;
        }
 
-       if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
-           !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
+       if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
+           !hci_dev_test_flag(hdev, HCI_CONFIG)) {
                /* Check for rfkill but allow the HCI setup stage to
                 * proceed (which in itself doesn't cause any RF activity).
                 */
-               if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
+               if (hci_dev_test_flag(hdev, HCI_RFKILLED)) {
                        ret = -ERFKILL;
                        goto done;
                }
@@ -1404,7 +1370,7 @@ static int hci_dev_do_open(struct hci_dev *hdev)
                 * This check is only valid for BR/EDR controllers
                 * since AMP controllers do not have an address.
                 */
-               if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
+               if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
                    hdev->dev_type == HCI_BREDR &&
                    !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
                    !bacmp(&hdev->static_addr, BDADDR_ANY)) {
@@ -1426,7 +1392,7 @@ static int hci_dev_do_open(struct hci_dev *hdev)
        atomic_set(&hdev->cmd_cnt, 1);
        set_bit(HCI_INIT, &hdev->flags);
 
-       if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
+       if (hci_dev_test_flag(hdev, HCI_SETUP)) {
                if (hdev->setup)
                        ret = hdev->setup(hdev);
 
@@ -1438,7 +1404,7 @@ static int hci_dev_do_open(struct hci_dev *hdev)
                 */
                if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
                    test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
-                       set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
+                       hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
 
                /* For an unconfigured controller it is required to
                 * read at least the version information provided by
@@ -1448,11 +1414,11 @@ static int hci_dev_do_open(struct hci_dev *hdev)
                 * also the original Bluetooth public device address
                 * will be read using the Read BD Address command.
                 */
-               if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
+               if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
                        ret = __hci_unconf_init(hdev);
        }
 
-       if (test_bit(HCI_CONFIG, &hdev->dev_flags)) {
+       if (hci_dev_test_flag(hdev, HCI_CONFIG)) {
                /* If public address change is configured, ensure that
                 * the address gets programmed. If the driver does not
                 * support changing the public address, fail the power
@@ -1466,8 +1432,8 @@ static int hci_dev_do_open(struct hci_dev *hdev)
        }
 
        if (!ret) {
-               if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
-                   !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
+               if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
+                   !hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
                        ret = __hci_init(hdev);
        }
 
@@ -1475,13 +1441,13 @@ static int hci_dev_do_open(struct hci_dev *hdev)
 
        if (!ret) {
                hci_dev_hold(hdev);
-               set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
+               hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
                set_bit(HCI_UP, &hdev->flags);
                hci_notify(hdev, HCI_DEV_UP);
-               if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
-                   !test_bit(HCI_CONFIG, &hdev->dev_flags) &&
-                   !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
-                   !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
+               if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
+                   !hci_dev_test_flag(hdev, HCI_CONFIG) &&
+                   !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
+                   !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
                    hdev->dev_type == HCI_BREDR) {
                        hci_dev_lock(hdev);
                        mgmt_powered(hdev, 1);
@@ -1533,8 +1499,8 @@ int hci_dev_open(__u16 dev)
         * HCI_USER_CHANNEL will be set first before attempting to
         * open the device.
         */
-       if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
-           !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
+       if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
+           !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
                err = -EOPNOTSUPP;
                goto done;
        }
@@ -1544,7 +1510,7 @@ int hci_dev_open(__u16 dev)
         * particularly important if the setup procedure has not yet
         * completed.
         */
-       if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
+       if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
                cancel_delayed_work(&hdev->power_off);
 
        /* After this call it is guaranteed that the setup procedure
@@ -1559,9 +1525,9 @@ int hci_dev_open(__u16 dev)
         * is in use this bit will be cleared again and userspace has
         * to explicitly enable it.
         */
-       if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
-           !test_bit(HCI_MGMT, &hdev->dev_flags))
-               set_bit(HCI_BONDABLE, &hdev->dev_flags);
+       if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
+           !hci_dev_test_flag(hdev, HCI_MGMT))
+               hci_dev_set_flag(hdev, HCI_BONDABLE);
 
        err = hci_dev_do_open(hdev);
 
@@ -1591,6 +1557,12 @@ static int hci_dev_do_close(struct hci_dev *hdev)
 {
        BT_DBG("%s %p", hdev->name, hdev);
 
+       if (!hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
+               /* Execute vendor specific shutdown routine */
+               if (hdev->shutdown)
+                       hdev->shutdown(hdev);
+       }
+
        cancel_delayed_work(&hdev->power_off);
 
        hci_req_cancel(hdev, ENODEV);
@@ -1609,17 +1581,17 @@ static int hci_dev_do_close(struct hci_dev *hdev)
        if (hdev->discov_timeout > 0) {
                cancel_delayed_work(&hdev->discov_off);
                hdev->discov_timeout = 0;
-               clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
-               clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
+               hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
+               hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
        }
 
-       if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
+       if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
                cancel_delayed_work(&hdev->service_cache);
 
        cancel_delayed_work_sync(&hdev->le_scan_disable);
        cancel_delayed_work_sync(&hdev->le_scan_restart);
 
-       if (test_bit(HCI_MGMT, &hdev->dev_flags))
+       if (hci_dev_test_flag(hdev, HCI_MGMT))
                cancel_delayed_work_sync(&hdev->rpa_expired);
 
        /* Avoid potential lockdep warnings from the *_flush() calls by
@@ -1631,7 +1603,7 @@ static int hci_dev_do_close(struct hci_dev *hdev)
 
        hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
 
-       if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
+       if (!hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
                if (hdev->dev_type == HCI_BREDR)
                        mgmt_powered(hdev, 0);
        }
@@ -1651,8 +1623,8 @@ static int hci_dev_do_close(struct hci_dev *hdev)
        /* Reset device */
        skb_queue_purge(&hdev->cmd_q);
        atomic_set(&hdev->cmd_cnt, 1);
-       if (!test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
-           !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
+       if (!hci_dev_test_flag(hdev, HCI_AUTO_OFF) &&
+           !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
            test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
                set_bit(HCI_INIT, &hdev->flags);
                __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
@@ -1674,16 +1646,13 @@ static int hci_dev_do_close(struct hci_dev *hdev)
                hdev->sent_cmd = NULL;
        }
 
-       kfree_skb(hdev->recv_evt);
-       hdev->recv_evt = NULL;
-
        /* After this point our queues are empty
         * and no tasks are scheduled. */
        hdev->close(hdev);
 
        /* Clear flags */
        hdev->flags &= BIT(HCI_RAW);
-       hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
+       hci_dev_clear_volatile_flags(hdev);
 
        /* Controller radio is available but is currently powered down */
        hdev->amp_status = AMP_STATUS_POWERED_DOWN;
@@ -1707,12 +1676,12 @@ int hci_dev_close(__u16 dev)
        if (!hdev)
                return -ENODEV;
 
-       if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
+       if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
                err = -EBUSY;
                goto done;
        }
 
-       if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
+       if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
                cancel_delayed_work(&hdev->power_off);
 
        err = hci_dev_do_close(hdev);
@@ -1770,12 +1739,12 @@ int hci_dev_reset(__u16 dev)
                goto done;
        }
 
-       if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
+       if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
                err = -EBUSY;
                goto done;
        }
 
-       if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
+       if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
                err = -EOPNOTSUPP;
                goto done;
        }
@@ -1796,12 +1765,12 @@ int hci_dev_reset_stat(__u16 dev)
        if (!hdev)
                return -ENODEV;
 
-       if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
+       if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
                ret = -EBUSY;
                goto done;
        }
 
-       if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
+       if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
                ret = -EOPNOTSUPP;
                goto done;
        }
@@ -1820,29 +1789,29 @@ static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
        BT_DBG("%s scan 0x%02x", hdev->name, scan);
 
        if ((scan & SCAN_PAGE))
-               conn_changed = !test_and_set_bit(HCI_CONNECTABLE,
-                                                &hdev->dev_flags);
+               conn_changed = !hci_dev_test_and_set_flag(hdev,
+                                                         HCI_CONNECTABLE);
        else
-               conn_changed = test_and_clear_bit(HCI_CONNECTABLE,
-                                                 &hdev->dev_flags);
+               conn_changed = hci_dev_test_and_clear_flag(hdev,
+                                                          HCI_CONNECTABLE);
 
        if ((scan & SCAN_INQUIRY)) {
-               discov_changed = !test_and_set_bit(HCI_DISCOVERABLE,
-                                                  &hdev->dev_flags);
+               discov_changed = !hci_dev_test_and_set_flag(hdev,
+                                                           HCI_DISCOVERABLE);
        } else {
-               clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
-               discov_changed = test_and_clear_bit(HCI_DISCOVERABLE,
-                                                   &hdev->dev_flags);
+               hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
+               discov_changed = hci_dev_test_and_clear_flag(hdev,
+                                                            HCI_DISCOVERABLE);
        }
 
-       if (!test_bit(HCI_MGMT, &hdev->dev_flags))
+       if (!hci_dev_test_flag(hdev, HCI_MGMT))
                return;
 
        if (conn_changed || discov_changed) {
                /* In case this was disabled through mgmt */
-               set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
+               hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
 
-               if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
+               if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
                        mgmt_update_adv_data(hdev);
 
                mgmt_new_settings(hdev);
@@ -1862,12 +1831,12 @@ int hci_dev_cmd(unsigned int cmd, void __user *arg)
        if (!hdev)
                return -ENODEV;
 
-       if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
+       if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
                err = -EBUSY;
                goto done;
        }
 
-       if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
+       if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
                err = -EOPNOTSUPP;
                goto done;
        }
@@ -1877,7 +1846,7 @@ int hci_dev_cmd(unsigned int cmd, void __user *arg)
                goto done;
        }
 
-       if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
+       if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
                err = -EOPNOTSUPP;
                goto done;
        }
@@ -1981,7 +1950,7 @@ int hci_get_dev_list(void __user *arg)
                 * is running, but in that case still indicate that the
                 * device is actually down.
                 */
-               if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
+               if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
                        flags &= ~BIT(HCI_UP);
 
                (dr + n)->dev_id  = hdev->id;
@@ -2019,7 +1988,7 @@ int hci_get_dev_info(void __user *arg)
         * is running, but in that case still indicate that the
         * device is actually down.
         */
-       if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
+       if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
                flags = hdev->flags & ~BIT(HCI_UP);
        else
                flags = hdev->flags;
@@ -2062,16 +2031,16 @@ static int hci_rfkill_set_block(void *data, bool blocked)
 
        BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
 
-       if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
+       if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
                return -EBUSY;
 
        if (blocked) {
-               set_bit(HCI_RFKILLED, &hdev->dev_flags);
-               if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
-                   !test_bit(HCI_CONFIG, &hdev->dev_flags))
+               hci_dev_set_flag(hdev, HCI_RFKILLED);
+               if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
+                   !hci_dev_test_flag(hdev, HCI_CONFIG))
                        hci_dev_do_close(hdev);
        } else {
-               clear_bit(HCI_RFKILLED, &hdev->dev_flags);
+               hci_dev_clear_flag(hdev, HCI_RFKILLED);
        }
 
        return 0;
@@ -2100,23 +2069,23 @@ static void hci_power_on(struct work_struct *work)
         * ignored and they need to be checked now. If they are still
         * valid, it is important to turn the device back off.
         */
-       if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
-           test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) ||
+       if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
+           hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
            (hdev->dev_type == HCI_BREDR &&
             !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
             !bacmp(&hdev->static_addr, BDADDR_ANY))) {
-               clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
+               hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
                hci_dev_do_close(hdev);
-       } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
+       } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
                queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
                                   HCI_AUTO_OFF_TIMEOUT);
        }
 
-       if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags)) {
+       if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
                /* For unconfigured devices, set the HCI_RAW flag
                 * so that userspace can easily identify them.
                 */
-               if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
+               if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
                        set_bit(HCI_RAW, &hdev->flags);
 
                /* For fully configured devices, this will send
@@ -2127,11 +2096,11 @@ static void hci_power_on(struct work_struct *work)
                 * and no event will be send.
                 */
                mgmt_index_added(hdev);
-       } else if (test_and_clear_bit(HCI_CONFIG, &hdev->dev_flags)) {
+       } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
                /* When the controller is now configured, then it
                 * is important to clear the HCI_RAW flag.
                 */
-               if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
+               if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
                        clear_bit(HCI_RAW, &hdev->flags);
 
                /* Powering on the controller with HCI_CONFIG set only
@@ -2500,6 +2469,42 @@ void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
        }
 }
 
+bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
+{
+       struct smp_ltk *k;
+       struct smp_irk *irk;
+       u8 addr_type;
+
+       if (type == BDADDR_BREDR) {
+               if (hci_find_link_key(hdev, bdaddr))
+                       return true;
+               return false;
+       }
+
+       /* Convert to HCI addr type which struct smp_ltk uses */
+       if (type == BDADDR_LE_PUBLIC)
+               addr_type = ADDR_LE_DEV_PUBLIC;
+       else
+               addr_type = ADDR_LE_DEV_RANDOM;
+
+       irk = hci_get_irk(hdev, bdaddr, addr_type);
+       if (irk) {
+               bdaddr = &irk->bdaddr;
+               addr_type = irk->addr_type;
+       }
+
+       rcu_read_lock();
+       list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
+               if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
+                       rcu_read_unlock();
+                       return true;
+               }
+       }
+       rcu_read_unlock();
+
+       return false;
+}
+
 /* HCI command timer function */
 static void hci_cmd_timeout(struct work_struct *work)
 {
@@ -2822,7 +2827,6 @@ static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status,
 {
        /* General inquiry access code (GIAC) */
        u8 lap[3] = { 0x33, 0x8b, 0x9e };
-       struct hci_request req;
        struct hci_cp_inquiry cp;
        int err;
 
@@ -2841,21 +2845,37 @@ static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status,
                break;
 
        case DISCOV_TYPE_INTERLEAVED:
-               hci_req_init(&req, hdev);
+               hci_dev_lock(hdev);
 
-               memset(&cp, 0, sizeof(cp));
-               memcpy(&cp.lap, lap, sizeof(cp.lap));
-               cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
-               hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
+               if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
+                            &hdev->quirks)) {
+                       /* If we were running LE only scan, change discovery
+                        * state. If we were running both LE and BR/EDR inquiry
+                        * simultaneously, and BR/EDR inquiry is already
+                        * finished, stop discovery, otherwise BR/EDR inquiry
+                        * will stop discovery when finished.
+                        */
+                       if (!test_bit(HCI_INQUIRY, &hdev->flags))
+                               hci_discovery_set_state(hdev,
+                                                       DISCOVERY_STOPPED);
+               } else {
+                       struct hci_request req;
 
-               hci_dev_lock(hdev);
+                       hci_inquiry_cache_flush(hdev);
 
-               hci_inquiry_cache_flush(hdev);
+                       hci_req_init(&req, hdev);
 
-               err = hci_req_run(&req, inquiry_complete);
-               if (err) {
-                       BT_ERR("Inquiry request failed: err %d", err);
-                       hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
+                       memset(&cp, 0, sizeof(cp));
+                       memcpy(&cp.lap, lap, sizeof(cp.lap));
+                       cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
+                       hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
+
+                       err = hci_req_run(&req, inquiry_complete);
+                       if (err) {
+                               BT_ERR("Inquiry request failed: err %d", err);
+                               hci_discovery_set_state(hdev,
+                                                       DISCOVERY_STOPPED);
+                       }
                }
 
                hci_dev_unlock(hdev);
@@ -2934,7 +2954,7 @@ static void le_scan_restart_work(struct work_struct *work)
        BT_DBG("%s", hdev->name);
 
        /* If controller is not scanning we are done. */
-       if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
+       if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
                return;
 
        hci_req_init(&req, hdev);
@@ -2967,9 +2987,9 @@ static void le_scan_restart_work(struct work_struct *work)
 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
                               u8 *bdaddr_type)
 {
-       if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
+       if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
            !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
-           (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) &&
+           (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
             bacmp(&hdev->static_addr, BDADDR_ANY))) {
                bacpy(bdaddr, &hdev->static_addr);
                *bdaddr_type = ADDR_LE_DEV_RANDOM;
@@ -3059,6 +3079,7 @@ struct hci_dev *hci_alloc_dev(void)
 
        hci_init_sysfs(hdev);
        discovery_init(hdev);
+       adv_info_init(hdev);
 
        return hdev;
 }
@@ -3137,16 +3158,16 @@ int hci_register_dev(struct hci_dev *hdev)
        }
 
        if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
-               set_bit(HCI_RFKILLED, &hdev->dev_flags);
+               hci_dev_set_flag(hdev, HCI_RFKILLED);
 
-       set_bit(HCI_SETUP, &hdev->dev_flags);
-       set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
+       hci_dev_set_flag(hdev, HCI_SETUP);
+       hci_dev_set_flag(hdev, HCI_AUTO_OFF);
 
        if (hdev->dev_type == HCI_BREDR) {
                /* Assume BR/EDR support until proven otherwise (such as
                 * through reading supported features during init.
                 */
-               set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
+               hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
        }
 
        write_lock(&hci_dev_list_lock);
@@ -3157,7 +3178,7 @@ int hci_register_dev(struct hci_dev *hdev)
         * and should not be included in normal operation.
         */
        if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
-               set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
+               hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
 
        hci_notify(hdev, HCI_DEV_REG);
        hci_dev_hold(hdev);
@@ -3183,7 +3204,7 @@ void hci_unregister_dev(struct hci_dev *hdev)
 
        BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
 
-       set_bit(HCI_UNREGISTER, &hdev->dev_flags);
+       hci_dev_set_flag(hdev, HCI_UNREGISTER);
 
        id = hdev->id;
 
@@ -3199,8 +3220,8 @@ void hci_unregister_dev(struct hci_dev *hdev)
        cancel_work_sync(&hdev->power_on);
 
        if (!test_bit(HCI_INIT, &hdev->flags) &&
-           !test_bit(HCI_SETUP, &hdev->dev_flags) &&
-           !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
+           !hci_dev_test_flag(hdev, HCI_SETUP) &&
+           !hci_dev_test_flag(hdev, HCI_CONFIG)) {
                hci_dev_lock(hdev);
                mgmt_index_removed(hdev);
                hci_dev_unlock(hdev);
@@ -3448,9 +3469,9 @@ int hci_register_cb(struct hci_cb *cb)
 {
        BT_DBG("%p name %s", cb, cb->name);
 
-       write_lock(&hci_cb_list_lock);
-       list_add(&cb->list, &hci_cb_list);
-       write_unlock(&hci_cb_list_lock);
+       mutex_lock(&hci_cb_list_lock);
+       list_add_tail(&cb->list, &hci_cb_list);
+       mutex_unlock(&hci_cb_list_lock);
 
        return 0;
 }
@@ -3460,9 +3481,9 @@ int hci_unregister_cb(struct hci_cb *cb)
 {
        BT_DBG("%p name %s", cb, cb->name);
 
-       write_lock(&hci_cb_list_lock);
+       mutex_lock(&hci_cb_list_lock);
        list_del(&cb->list);
-       write_unlock(&hci_cb_list_lock);
+       mutex_unlock(&hci_cb_list_lock);
 
        return 0;
 }
@@ -3495,11 +3516,6 @@ static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
        }
 }
 
-bool hci_req_pending(struct hci_dev *hdev)
-{
-       return (hdev->req_status == HCI_REQ_PEND);
-}
-
 /* Send HCI command */
 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
                 const void *param)
@@ -3874,7 +3890,7 @@ static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
 
 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
 {
-       if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
+       if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
                /* ACL tx timeout must be longer than maximum
                 * link supervision timeout (40.9 seconds) */
                if (!cnt && time_after(jiffies, hdev->acl_last_tx +
@@ -4057,7 +4073,7 @@ static void hci_sched_le(struct hci_dev *hdev)
        if (!hci_conn_num(hdev, LE_LINK))
                return;
 
-       if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
+       if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
                /* LE tx timeout must be longer than maximum
                 * link supervision timeout (40.9 seconds) */
                if (!hdev->le_cnt && hdev->le_pkts &&
@@ -4105,7 +4121,7 @@ static void hci_tx_work(struct work_struct *work)
        BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
               hdev->sco_cnt, hdev->le_cnt);
 
-       if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
+       if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
                /* Schedule queues and send stuff to HCI driver */
                hci_sched_acl(hdev);
                hci_sched_sco(hdev);
@@ -4220,9 +4236,10 @@ static void hci_resend_last(struct hci_dev *hdev)
        queue_work(hdev->workqueue, &hdev->cmd_work);
 }
 
-void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
+void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
+                         hci_req_complete_t *req_complete,
+                         hci_req_complete_skb_t *req_complete_skb)
 {
-       hci_req_complete_t req_complete = NULL;
        struct sk_buff *skb;
        unsigned long flags;
 
@@ -4254,18 +4271,14 @@ void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
         * callback would be found in hdev->sent_cmd instead of the
         * command queue (hdev->cmd_q).
         */
-       if (hdev->sent_cmd) {
-               req_complete = bt_cb(hdev->sent_cmd)->req.complete;
-
-               if (req_complete) {
-                       /* We must set the complete callback to NULL to
-                        * avoid calling the callback more than once if
-                        * this function gets called again.
-                        */
-                       bt_cb(hdev->sent_cmd)->req.complete = NULL;
+       if (bt_cb(hdev->sent_cmd)->req.complete) {
+               *req_complete = bt_cb(hdev->sent_cmd)->req.complete;
+               return;
+       }
 
-                       goto call_complete;
-               }
+       if (bt_cb(hdev->sent_cmd)->req.complete_skb) {
+               *req_complete_skb = bt_cb(hdev->sent_cmd)->req.complete_skb;
+               return;
        }
 
        /* Remove all pending commands belonging to this request */
@@ -4276,14 +4289,11 @@ void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
                        break;
                }
 
-               req_complete = bt_cb(skb)->req.complete;
+               *req_complete = bt_cb(skb)->req.complete;
+               *req_complete_skb = bt_cb(skb)->req.complete_skb;
                kfree_skb(skb);
        }
        spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
-
-call_complete:
-       if (req_complete)
-               req_complete(hdev, status, status ? opcode : HCI_OP_NOP);
 }
 
 static void hci_rx_work(struct work_struct *work)
@@ -4302,7 +4312,7 @@ static void hci_rx_work(struct work_struct *work)
                        hci_send_to_sock(hdev, skb);
                }
 
-               if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
+               if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
                        kfree_skb(skb);
                        continue;
                }
index 65261e5d4b84bbbdc1da1d11083b9f32b6233d7a..7db4220941cc60dfed6c5ca4043e7158e4458387 100644 (file)
 
 #include "hci_debugfs.h"
 
+#define DEFINE_QUIRK_ATTRIBUTE(__name, __quirk)                                      \
+static ssize_t __name ## _read(struct file *file,                            \
+                               char __user *user_buf,                        \
+                               size_t count, loff_t *ppos)                   \
+{                                                                            \
+       struct hci_dev *hdev = file->private_data;                            \
+       char buf[3];                                                          \
+                                                                             \
+       buf[0] = test_bit(__quirk, &hdev->quirks) ? 'Y' : 'N';                \
+       buf[1] = '\n';                                                        \
+       buf[2] = '\0';                                                        \
+       return simple_read_from_buffer(user_buf, count, ppos, buf, 2);        \
+}                                                                            \
+                                                                             \
+static ssize_t __name ## _write(struct file *file,                           \
+                                const char __user *user_buf,                 \
+                                size_t count, loff_t *ppos)                  \
+{                                                                            \
+       struct hci_dev *hdev = file->private_data;                            \
+       char buf[32];                                                         \
+       size_t buf_size = min(count, (sizeof(buf) - 1));                      \
+       bool enable;                                                          \
+                                                                             \
+       if (test_bit(HCI_UP, &hdev->flags))                                   \
+               return -EBUSY;                                                \
+                                                                             \
+       if (copy_from_user(buf, user_buf, buf_size))                          \
+               return -EFAULT;                                               \
+                                                                             \
+       buf[buf_size] = '\0';                                                 \
+       if (strtobool(buf, &enable))                                          \
+               return -EINVAL;                                               \
+                                                                             \
+       if (enable == test_bit(__quirk, &hdev->quirks))                       \
+               return -EALREADY;                                             \
+                                                                             \
+       change_bit(__quirk, &hdev->quirks);                                   \
+                                                                             \
+       return count;                                                         \
+}                                                                            \
+                                                                             \
+static const struct file_operations __name ## _fops = {                              \
+       .open           = simple_open,                                        \
+       .read           = __name ## _read,                                    \
+       .write          = __name ## _write,                                   \
+       .llseek         = default_llseek,                                     \
+}                                                                            \
+
 static int features_show(struct seq_file *f, void *ptr)
 {
        struct hci_dev *hdev = f->private;
@@ -66,6 +114,30 @@ static const struct file_operations features_fops = {
        .release        = single_release,
 };
 
+static int device_id_show(struct seq_file *f, void *ptr)
+{
+       struct hci_dev *hdev = f->private;
+
+       hci_dev_lock(hdev);
+       seq_printf(f, "%4.4x:%4.4x:%4.4x:%4.4x\n", hdev->devid_source,
+                 hdev->devid_vendor, hdev->devid_product, hdev->devid_version);
+       hci_dev_unlock(hdev);
+
+       return 0;
+}
+
+static int device_id_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, device_id_show, inode->i_private);
+}
+
+static const struct file_operations device_id_fops = {
+       .open           = device_id_open,
+       .read           = seq_read,
+       .llseek         = seq_lseek,
+       .release        = single_release,
+};
+
 static int device_list_show(struct seq_file *f, void *ptr)
 {
        struct hci_dev *hdev = f->private;
@@ -166,7 +238,7 @@ static int remote_oob_show(struct seq_file *f, void *ptr)
                seq_printf(f, "%pMR (type %u) %u %*phN %*phN %*phN %*phN\n",
                           &data->bdaddr, data->bdaddr_type, data->present,
                           16, data->hash192, 16, data->rand192,
-                          16, data->hash256, 19, data->rand256);
+                          16, data->hash256, 16, data->rand256);
        }
        hci_dev_unlock(hdev);
 
@@ -247,7 +319,7 @@ static ssize_t use_debug_keys_read(struct file *file, char __user *user_buf,
        struct hci_dev *hdev = file->private_data;
        char buf[3];
 
-       buf[0] = test_bit(HCI_USE_DEBUG_KEYS, &hdev->dev_flags) ? 'Y': 'N';
+       buf[0] = hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS) ? 'Y': 'N';
        buf[1] = '\n';
        buf[2] = '\0';
        return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
@@ -265,7 +337,7 @@ static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
        struct hci_dev *hdev = file->private_data;
        char buf[3];
 
-       buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
+       buf[0] = hci_dev_test_flag(hdev, HCI_SC_ONLY) ? 'Y': 'N';
        buf[1] = '\n';
        buf[2] = '\0';
        return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
@@ -287,6 +359,8 @@ void hci_debugfs_create_common(struct hci_dev *hdev)
        debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
        debugfs_create_u8("hardware_error", 0444, hdev->debugfs,
                          &hdev->hw_error_code);
+       debugfs_create_file("device_id", 0444, hdev->debugfs, hdev,
+                           &device_id_fops);
 
        debugfs_create_file("device_list", 0444, hdev->debugfs, hdev,
                            &device_list_fops);
@@ -679,7 +753,7 @@ static ssize_t force_static_address_read(struct file *file,
        struct hci_dev *hdev = file->private_data;
        char buf[3];
 
-       buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ? 'Y': 'N';
+       buf[0] = hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ? 'Y': 'N';
        buf[1] = '\n';
        buf[2] = '\0';
        return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
@@ -704,10 +778,10 @@ static ssize_t force_static_address_write(struct file *file,
        if (strtobool(buf, &enable))
                return -EINVAL;
 
-       if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags))
+       if (enable == hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR))
                return -EALREADY;
 
-       change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags);
+       hci_dev_change_flag(hdev, HCI_FORCE_STATIC_ADDR);
 
        return count;
 }
@@ -997,6 +1071,11 @@ static int adv_max_interval_get(void *data, u64 *val)
 DEFINE_SIMPLE_ATTRIBUTE(adv_max_interval_fops, adv_max_interval_get,
                        adv_max_interval_set, "%llu\n");
 
+DEFINE_QUIRK_ATTRIBUTE(quirk_strict_duplicate_filter,
+                      HCI_QUIRK_STRICT_DUPLICATE_FILTER);
+DEFINE_QUIRK_ATTRIBUTE(quirk_simultaneous_discovery,
+                      HCI_QUIRK_SIMULTANEOUS_DISCOVERY);
+
 void hci_debugfs_create_le(struct hci_dev *hdev)
 {
        debugfs_create_file("identity", 0400, hdev->debugfs, hdev,
@@ -1041,6 +1120,13 @@ void hci_debugfs_create_le(struct hci_dev *hdev)
                            &adv_max_interval_fops);
        debugfs_create_u16("discov_interleaved_timeout", 0644, hdev->debugfs,
                           &hdev->discov_interleaved_timeout);
+
+       debugfs_create_file("quirk_strict_duplicate_filter", 0644,
+                           hdev->debugfs, hdev,
+                           &quirk_strict_duplicate_filter_fops);
+       debugfs_create_file("quirk_simultaneous_discovery", 0644,
+                           hdev->debugfs, hdev,
+                           &quirk_simultaneous_discovery_fops);
 }
 
 void hci_debugfs_create_conn(struct hci_conn *conn)
index fb68efe083c55c9c05d1716c04f8b3d380a2eb78..4444dc8cedc21a6f8237e31adf337feefe477c11 100644 (file)
    SOFTWARE IS DISCLAIMED.
 */
 
+#if IS_ENABLED(CONFIG_BT_DEBUGFS)
+
 void hci_debugfs_create_common(struct hci_dev *hdev);
 void hci_debugfs_create_bredr(struct hci_dev *hdev);
 void hci_debugfs_create_le(struct hci_dev *hdev);
 void hci_debugfs_create_conn(struct hci_conn *conn);
+
+#else
+
+static inline void hci_debugfs_create_common(struct hci_dev *hdev)
+{
+}
+
+static inline void hci_debugfs_create_bredr(struct hci_dev *hdev)
+{
+}
+
+static inline void hci_debugfs_create_le(struct hci_dev *hdev)
+{
+}
+
+static inline void hci_debugfs_create_conn(struct hci_conn *conn)
+{
+}
+
+#endif
index a3fb094822b621e5ef3b3205d1d5fce7c9d3f6b8..01031038eb0e489a43c3484b0b79483fe617195b 100644 (file)
@@ -70,7 +70,7 @@ static void hci_cc_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
        if (status)
                return;
 
-       set_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
+       hci_dev_set_flag(hdev, HCI_PERIODIC_INQ);
 }
 
 static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
@@ -82,7 +82,7 @@ static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
        if (status)
                return;
 
-       clear_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
+       hci_dev_clear_flag(hdev, HCI_PERIODIC_INQ);
 
        hci_conn_check_pending(hdev);
 }
@@ -198,7 +198,7 @@ static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
                return;
 
        /* Reset all non-persistent flags */
-       hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
+       hci_dev_clear_volatile_flags(hdev);
 
        hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
 
@@ -265,7 +265,7 @@ static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
 
        hci_dev_lock(hdev);
 
-       if (test_bit(HCI_MGMT, &hdev->dev_flags))
+       if (hci_dev_test_flag(hdev, HCI_MGMT))
                mgmt_set_local_name_complete(hdev, sent, status);
        else if (!status)
                memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
@@ -282,8 +282,8 @@ static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
        if (rp->status)
                return;
 
-       if (test_bit(HCI_SETUP, &hdev->dev_flags) ||
-           test_bit(HCI_CONFIG, &hdev->dev_flags))
+       if (hci_dev_test_flag(hdev, HCI_SETUP) ||
+           hci_dev_test_flag(hdev, HCI_CONFIG))
                memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
 }
 
@@ -309,7 +309,7 @@ static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
                        clear_bit(HCI_AUTH, &hdev->flags);
        }
 
-       if (test_bit(HCI_MGMT, &hdev->dev_flags))
+       if (hci_dev_test_flag(hdev, HCI_MGMT))
                mgmt_auth_enable_complete(hdev, status);
 
        hci_dev_unlock(hdev);
@@ -404,7 +404,7 @@ static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
        if (status == 0)
                memcpy(hdev->dev_class, sent, 3);
 
-       if (test_bit(HCI_MGMT, &hdev->dev_flags))
+       if (hci_dev_test_flag(hdev, HCI_MGMT))
                mgmt_set_class_of_dev_complete(hdev, sent, status);
 
        hci_dev_unlock(hdev);
@@ -497,13 +497,13 @@ static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
                        hdev->features[1][0] &= ~LMP_HOST_SSP;
        }
 
-       if (test_bit(HCI_MGMT, &hdev->dev_flags))
+       if (hci_dev_test_flag(hdev, HCI_MGMT))
                mgmt_ssp_enable_complete(hdev, sent->mode, status);
        else if (!status) {
                if (sent->mode)
-                       set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
+                       hci_dev_set_flag(hdev, HCI_SSP_ENABLED);
                else
-                       clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
+                       hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
        }
 
        hci_dev_unlock(hdev);
@@ -529,11 +529,11 @@ static void hci_cc_write_sc_support(struct hci_dev *hdev, struct sk_buff *skb)
                        hdev->features[1][0] &= ~LMP_HOST_SC;
        }
 
-       if (!test_bit(HCI_MGMT, &hdev->dev_flags) && !status) {
+       if (!hci_dev_test_flag(hdev, HCI_MGMT) && !status) {
                if (sent->support)
-                       set_bit(HCI_SC_ENABLED, &hdev->dev_flags);
+                       hci_dev_set_flag(hdev, HCI_SC_ENABLED);
                else
-                       clear_bit(HCI_SC_ENABLED, &hdev->dev_flags);
+                       hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
        }
 
        hci_dev_unlock(hdev);
@@ -548,8 +548,8 @@ static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
        if (rp->status)
                return;
 
-       if (test_bit(HCI_SETUP, &hdev->dev_flags) ||
-           test_bit(HCI_CONFIG, &hdev->dev_flags)) {
+       if (hci_dev_test_flag(hdev, HCI_SETUP) ||
+           hci_dev_test_flag(hdev, HCI_CONFIG)) {
                hdev->hci_ver = rp->hci_ver;
                hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
                hdev->lmp_ver = rp->lmp_ver;
@@ -568,8 +568,8 @@ static void hci_cc_read_local_commands(struct hci_dev *hdev,
        if (rp->status)
                return;
 
-       if (test_bit(HCI_SETUP, &hdev->dev_flags) ||
-           test_bit(HCI_CONFIG, &hdev->dev_flags))
+       if (hci_dev_test_flag(hdev, HCI_SETUP) ||
+           hci_dev_test_flag(hdev, HCI_CONFIG))
                memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
 }
 
@@ -691,7 +691,7 @@ static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
        if (test_bit(HCI_INIT, &hdev->flags))
                bacpy(&hdev->bdaddr, &rp->bdaddr);
 
-       if (test_bit(HCI_SETUP, &hdev->dev_flags))
+       if (hci_dev_test_flag(hdev, HCI_SETUP))
                bacpy(&hdev->setup_addr, &rp->bdaddr);
 }
 
@@ -900,7 +900,7 @@ static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
 
        hci_dev_lock(hdev);
 
-       if (test_bit(HCI_MGMT, &hdev->dev_flags))
+       if (hci_dev_test_flag(hdev, HCI_MGMT))
                mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
 
        if (rp->status)
@@ -926,7 +926,7 @@ static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
 
        hci_dev_lock(hdev);
 
-       if (test_bit(HCI_MGMT, &hdev->dev_flags))
+       if (hci_dev_test_flag(hdev, HCI_MGMT))
                mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
                                                 rp->status);
 
@@ -985,7 +985,7 @@ static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
 
        hci_dev_lock(hdev);
 
-       if (test_bit(HCI_MGMT, &hdev->dev_flags))
+       if (hci_dev_test_flag(hdev, HCI_MGMT))
                mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
                                                 rp->status);
 
@@ -1001,7 +1001,7 @@ static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
 
        hci_dev_lock(hdev);
 
-       if (test_bit(HCI_MGMT, &hdev->dev_flags))
+       if (hci_dev_test_flag(hdev, HCI_MGMT))
                mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
                                                     ACL_LINK, 0, rp->status);
 
@@ -1016,7 +1016,7 @@ static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
 
        hci_dev_lock(hdev);
 
-       if (test_bit(HCI_MGMT, &hdev->dev_flags))
+       if (hci_dev_test_flag(hdev, HCI_MGMT))
                mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
                                                 0, rp->status);
 
@@ -1032,7 +1032,7 @@ static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
 
        hci_dev_lock(hdev);
 
-       if (test_bit(HCI_MGMT, &hdev->dev_flags))
+       if (hci_dev_test_flag(hdev, HCI_MGMT))
                mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
                                                     ACL_LINK, 0, rp->status);
 
@@ -1045,11 +1045,6 @@ static void hci_cc_read_local_oob_data(struct hci_dev *hdev,
        struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
 
        BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
-
-       hci_dev_lock(hdev);
-       mgmt_read_local_oob_data_complete(hdev, rp->hash, rp->rand, NULL, NULL,
-                                         rp->status);
-       hci_dev_unlock(hdev);
 }
 
 static void hci_cc_read_local_oob_ext_data(struct hci_dev *hdev,
@@ -1058,15 +1053,8 @@ static void hci_cc_read_local_oob_ext_data(struct hci_dev *hdev,
        struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
 
        BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
-
-       hci_dev_lock(hdev);
-       mgmt_read_local_oob_data_complete(hdev, rp->hash192, rp->rand192,
-                                         rp->hash256, rp->rand256,
-                                         rp->status);
-       hci_dev_unlock(hdev);
 }
 
-
 static void hci_cc_le_set_random_addr(struct hci_dev *hdev, struct sk_buff *skb)
 {
        __u8 status = *((__u8 *) skb->data);
@@ -1109,7 +1097,7 @@ static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb)
        if (*sent) {
                struct hci_conn *conn;
 
-               set_bit(HCI_LE_ADV, &hdev->dev_flags);
+               hci_dev_set_flag(hdev, HCI_LE_ADV);
 
                conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
                if (conn)
@@ -1117,7 +1105,7 @@ static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb)
                                           &conn->le_conn_timeout,
                                           conn->conn_timeout);
        } else {
-               clear_bit(HCI_LE_ADV, &hdev->dev_flags);
+               hci_dev_clear_flag(hdev, HCI_LE_ADV);
        }
 
        hci_dev_unlock(hdev);
@@ -1192,7 +1180,7 @@ static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
 
        switch (cp->enable) {
        case LE_SCAN_ENABLE:
-               set_bit(HCI_LE_SCAN, &hdev->dev_flags);
+               hci_dev_set_flag(hdev, HCI_LE_SCAN);
                if (hdev->le_scan_type == LE_SCAN_ACTIVE)
                        clear_pending_adv_report(hdev);
                break;
@@ -1217,7 +1205,7 @@ static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
                 */
                cancel_delayed_work(&hdev->le_scan_disable);
 
-               clear_bit(HCI_LE_SCAN, &hdev->dev_flags);
+               hci_dev_clear_flag(hdev, HCI_LE_SCAN);
 
                /* The HCI_LE_SCAN_INTERRUPTED flag indicates that we
                 * interrupted scanning due to a connect request. Mark
@@ -1226,10 +1214,9 @@ static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
                 * been disabled because of active scanning, so
                 * re-enable it again if necessary.
                 */
-               if (test_and_clear_bit(HCI_LE_SCAN_INTERRUPTED,
-                                      &hdev->dev_flags))
+               if (hci_dev_test_and_clear_flag(hdev, HCI_LE_SCAN_INTERRUPTED))
                        hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
-               else if (!test_bit(HCI_LE_ADV, &hdev->dev_flags) &&
+               else if (!hci_dev_test_flag(hdev, HCI_LE_ADV) &&
                         hdev->discovery.state == DISCOVERY_FINDING)
                        mgmt_reenable_advertising(hdev);
 
@@ -1388,11 +1375,11 @@ static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
 
        if (sent->le) {
                hdev->features[1][0] |= LMP_HOST_LE;
-               set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
+               hci_dev_set_flag(hdev, HCI_LE_ENABLED);
        } else {
                hdev->features[1][0] &= ~LMP_HOST_LE;
-               clear_bit(HCI_LE_ENABLED, &hdev->dev_flags);
-               clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
+               hci_dev_clear_flag(hdev, HCI_LE_ENABLED);
+               hci_dev_clear_flag(hdev, HCI_ADVERTISING);
        }
 
        if (sent->simul)
@@ -1537,7 +1524,7 @@ static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
                if (conn && conn->state == BT_CONNECT) {
                        if (status != 0x0c || conn->attempt > 2) {
                                conn->state = BT_CLOSED;
-                               hci_proto_connect_cfm(conn, status);
+                               hci_connect_cfm(conn, status);
                                hci_conn_del(conn);
                        } else
                                conn->state = BT_CONNECT2;
@@ -1581,7 +1568,7 @@ static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
                if (sco) {
                        sco->state = BT_CLOSED;
 
-                       hci_proto_connect_cfm(sco, status);
+                       hci_connect_cfm(sco, status);
                        hci_conn_del(sco);
                }
        }
@@ -1608,7 +1595,7 @@ static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
        conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
        if (conn) {
                if (conn->state == BT_CONFIG) {
-                       hci_proto_connect_cfm(conn, status);
+                       hci_connect_cfm(conn, status);
                        hci_conn_drop(conn);
                }
        }
@@ -1635,7 +1622,7 @@ static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
        conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
        if (conn) {
                if (conn->state == BT_CONFIG) {
-                       hci_proto_connect_cfm(conn, status);
+                       hci_connect_cfm(conn, status);
                        hci_conn_drop(conn);
                }
        }
@@ -1769,7 +1756,7 @@ static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
 
        conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
 
-       if (test_bit(HCI_MGMT, &hdev->dev_flags))
+       if (hci_dev_test_flag(hdev, HCI_MGMT))
                hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
 
        if (!conn)
@@ -1811,7 +1798,7 @@ static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
        conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
        if (conn) {
                if (conn->state == BT_CONFIG) {
-                       hci_proto_connect_cfm(conn, status);
+                       hci_connect_cfm(conn, status);
                        hci_conn_drop(conn);
                }
        }
@@ -1838,7 +1825,7 @@ static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
        conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
        if (conn) {
                if (conn->state == BT_CONFIG) {
-                       hci_proto_connect_cfm(conn, status);
+                       hci_connect_cfm(conn, status);
                        hci_conn_drop(conn);
                }
        }
@@ -1873,7 +1860,7 @@ static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
                if (sco) {
                        sco->state = BT_CLOSED;
 
-                       hci_proto_connect_cfm(sco, status);
+                       hci_connect_cfm(sco, status);
                        hci_conn_del(sco);
                }
        }
@@ -2118,7 +2105,7 @@ static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
        smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
        wake_up_bit(&hdev->flags, HCI_INQUIRY);
 
-       if (!test_bit(HCI_MGMT, &hdev->dev_flags))
+       if (!hci_dev_test_flag(hdev, HCI_MGMT))
                return;
 
        hci_dev_lock(hdev);
@@ -2127,7 +2114,16 @@ static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
                goto unlock;
 
        if (list_empty(&discov->resolve)) {
-               hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
+               /* When BR/EDR inquiry is active and no LE scanning is in
+                * progress, then change discovery state to indicate completion.
+                *
+                * When running LE scanning and BR/EDR inquiry simultaneously
+                * and the LE scan already finished, then change the discovery
+                * state to indicate completion.
+                */
+               if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
+                   !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
+                       hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
                goto unlock;
        }
 
@@ -2136,7 +2132,16 @@ static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
                e->name_state = NAME_PENDING;
                hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
        } else {
-               hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
+               /* When BR/EDR inquiry is active and no LE scanning is in
+                * progress, then change discovery state to indicate completion.
+                *
+                * When running LE scanning and BR/EDR inquiry simultaneously
+                * and the LE scan already finished, then change the discovery
+                * state to indicate completion.
+                */
+               if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
+                   !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
+                       hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
        }
 
 unlock:
@@ -2154,7 +2159,7 @@ static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
        if (!num_rsp)
                return;
 
-       if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
+       if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
                return;
 
        hci_dev_lock(hdev);
@@ -2255,10 +2260,10 @@ static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
                hci_sco_setup(conn, ev->status);
 
        if (ev->status) {
-               hci_proto_connect_cfm(conn, ev->status);
+               hci_connect_cfm(conn, ev->status);
                hci_conn_del(conn);
        } else if (ev->link_type != ACL_LINK)
-               hci_proto_connect_cfm(conn, ev->status);
+               hci_connect_cfm(conn, ev->status);
 
 unlock:
        hci_dev_unlock(hdev);
@@ -2304,8 +2309,8 @@ static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
         * connection. These features are only touched through mgmt so
         * only do the checks if HCI_MGMT is set.
         */
-       if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
-           !test_bit(HCI_CONNECTABLE, &hdev->dev_flags) &&
+       if (hci_dev_test_flag(hdev, HCI_MGMT) &&
+           !hci_dev_test_flag(hdev, HCI_CONNECTABLE) &&
            !hci_bdaddr_list_lookup(&hdev->whitelist, &ev->bdaddr,
                                    BDADDR_BREDR)) {
                    hci_reject_conn(hdev, &ev->bdaddr);
@@ -2366,7 +2371,7 @@ static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
                             &cp);
        } else {
                conn->state = BT_CONNECT2;
-               hci_proto_connect_cfm(conn, 0);
+               hci_connect_cfm(conn, 0);
        }
 }
 
@@ -2444,7 +2449,7 @@ static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
 
        type = conn->type;
 
-       hci_proto_disconn_cfm(conn, ev->reason);
+       hci_disconn_cfm(conn, ev->reason);
        hci_conn_del(conn);
 
        /* Re-enable advertising if necessary, since it might
@@ -2501,7 +2506,7 @@ static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
                                     &cp);
                } else {
                        conn->state = BT_CONNECTED;
-                       hci_proto_connect_cfm(conn, ev->status);
+                       hci_connect_cfm(conn, ev->status);
                        hci_conn_drop(conn);
                }
        } else {
@@ -2542,7 +2547,7 @@ static void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
 
        conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
 
-       if (!test_bit(HCI_MGMT, &hdev->dev_flags))
+       if (!hci_dev_test_flag(hdev, HCI_MGMT))
                goto check_auth;
 
        if (ev->status == 0)
@@ -2608,7 +2613,7 @@ static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
         * whenever the encryption procedure fails.
         */
        if (ev->status && conn->type == LE_LINK)
-               set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
+               hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
 
        clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
 
@@ -2626,15 +2631,15 @@ static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
                 * connections that are not encrypted with AES-CCM
                 * using a P-256 authenticated combination key.
                 */
-               if (test_bit(HCI_SC_ONLY, &hdev->dev_flags) &&
+               if (hci_dev_test_flag(hdev, HCI_SC_ONLY) &&
                    (!test_bit(HCI_CONN_AES_CCM, &conn->flags) ||
                     conn->key_type != HCI_LK_AUTH_COMBINATION_P256)) {
-                       hci_proto_connect_cfm(conn, HCI_ERROR_AUTH_FAILURE);
+                       hci_connect_cfm(conn, HCI_ERROR_AUTH_FAILURE);
                        hci_conn_drop(conn);
                        goto unlock;
                }
 
-               hci_proto_connect_cfm(conn, ev->status);
+               hci_connect_cfm(conn, ev->status);
                hci_conn_drop(conn);
        } else
                hci_encrypt_cfm(conn, ev->status, ev->encrypt);
@@ -2707,7 +2712,7 @@ static void hci_remote_features_evt(struct hci_dev *hdev,
 
        if (!hci_outgoing_auth_needed(hdev, conn)) {
                conn->state = BT_CONNECTED;
-               hci_proto_connect_cfm(conn, ev->status);
+               hci_connect_cfm(conn, ev->status);
                hci_conn_drop(conn);
        }
 
@@ -2715,17 +2720,19 @@ unlock:
        hci_dev_unlock(hdev);
 }
 
-static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb,
+                                u16 *opcode, u8 *status,
+                                hci_req_complete_t *req_complete,
+                                hci_req_complete_skb_t *req_complete_skb)
 {
        struct hci_ev_cmd_complete *ev = (void *) skb->data;
-       u8 status = skb->data[sizeof(*ev)];
-       __u16 opcode;
 
-       skb_pull(skb, sizeof(*ev));
+       *opcode = __le16_to_cpu(ev->opcode);
+       *status = skb->data[sizeof(*ev)];
 
-       opcode = __le16_to_cpu(ev->opcode);
+       skb_pull(skb, sizeof(*ev));
 
-       switch (opcode) {
+       switch (*opcode) {
        case HCI_OP_INQUIRY_CANCEL:
                hci_cc_inquiry_cancel(hdev, skb);
                break;
@@ -3003,32 +3010,36 @@ static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
                break;
 
        default:
-               BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
+               BT_DBG("%s opcode 0x%4.4x", hdev->name, *opcode);
                break;
        }
 
-       if (opcode != HCI_OP_NOP)
+       if (*opcode != HCI_OP_NOP)
                cancel_delayed_work(&hdev->cmd_timer);
 
-       hci_req_cmd_complete(hdev, opcode, status);
-
-       if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
+       if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags))
                atomic_set(&hdev->cmd_cnt, 1);
-               if (!skb_queue_empty(&hdev->cmd_q))
-                       queue_work(hdev->workqueue, &hdev->cmd_work);
-       }
+
+       hci_req_cmd_complete(hdev, *opcode, *status, req_complete,
+                            req_complete_skb);
+
+       if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
+               queue_work(hdev->workqueue, &hdev->cmd_work);
 }
 
-static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb,
+                              u16 *opcode, u8 *status,
+                              hci_req_complete_t *req_complete,
+                              hci_req_complete_skb_t *req_complete_skb)
 {
        struct hci_ev_cmd_status *ev = (void *) skb->data;
-       __u16 opcode;
 
        skb_pull(skb, sizeof(*ev));
 
-       opcode = __le16_to_cpu(ev->opcode);
+       *opcode = __le16_to_cpu(ev->opcode);
+       *status = ev->status;
 
-       switch (opcode) {
+       switch (*opcode) {
        case HCI_OP_INQUIRY:
                hci_cs_inquiry(hdev, ev->status);
                break;
@@ -3098,22 +3109,29 @@ static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
                break;
 
        default:
-               BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
+               BT_DBG("%s opcode 0x%4.4x", hdev->name, *opcode);
                break;
        }
 
-       if (opcode != HCI_OP_NOP)
+       if (*opcode != HCI_OP_NOP)
                cancel_delayed_work(&hdev->cmd_timer);
 
+       if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags))
+               atomic_set(&hdev->cmd_cnt, 1);
+
+       /* Indicate request completion if the command failed. Also, if
+        * we're not waiting for a special event and we get a success
+        * command status we should try to flag the request as completed
+        * (since for this kind of commands there will not be a command
+        * complete event).
+        */
        if (ev->status ||
            (hdev->sent_cmd && !bt_cb(hdev->sent_cmd)->req.event))
-               hci_req_cmd_complete(hdev, opcode, ev->status);
+               hci_req_cmd_complete(hdev, *opcode, ev->status, req_complete,
+                                    req_complete_skb);
 
-       if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
-               atomic_set(&hdev->cmd_cnt, 1);
-               if (!skb_queue_empty(&hdev->cmd_q))
-                       queue_work(hdev->workqueue, &hdev->cmd_work);
-       }
+       if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
+               queue_work(hdev->workqueue, &hdev->cmd_work);
 }
 
 static void hci_hardware_error_evt(struct hci_dev *hdev, struct sk_buff *skb)
@@ -3331,11 +3349,11 @@ static void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
                hci_conn_drop(conn);
        }
 
-       if (!test_bit(HCI_BONDABLE, &hdev->dev_flags) &&
+       if (!hci_dev_test_flag(hdev, HCI_BONDABLE) &&
            !test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags)) {
                hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
                             sizeof(ev->bdaddr), &ev->bdaddr);
-       } else if (test_bit(HCI_MGMT, &hdev->dev_flags)) {
+       } else if (hci_dev_test_flag(hdev, HCI_MGMT)) {
                u8 secure;
 
                if (conn->pending_sec_level == BT_SECURITY_HIGH)
@@ -3391,7 +3409,7 @@ static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
 
        BT_DBG("%s", hdev->name);
 
-       if (!test_bit(HCI_MGMT, &hdev->dev_flags))
+       if (!hci_dev_test_flag(hdev, HCI_MGMT))
                return;
 
        hci_dev_lock(hdev);
@@ -3465,7 +3483,7 @@ static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
        set_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
        conn_set_key(conn, ev->key_type, conn->pin_length);
 
-       if (!test_bit(HCI_MGMT, &hdev->dev_flags))
+       if (!hci_dev_test_flag(hdev, HCI_MGMT))
                goto unlock;
 
        key = hci_add_link_key(hdev, conn, &ev->bdaddr, ev->link_key,
@@ -3487,7 +3505,7 @@ static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
         * store_hint being 0).
         */
        if (key->type == HCI_LK_DEBUG_COMBINATION &&
-           !test_bit(HCI_KEEP_DEBUG_KEYS, &hdev->dev_flags)) {
+           !hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS)) {
                list_del_rcu(&key->list);
                kfree_rcu(key, rcu);
                goto unlock;
@@ -3570,7 +3588,7 @@ static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
        if (!num_rsp)
                return;
 
-       if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
+       if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
                return;
 
        hci_dev_lock(hdev);
@@ -3679,7 +3697,7 @@ static void hci_remote_ext_features_evt(struct hci_dev *hdev,
 
        if (!hci_outgoing_auth_needed(hdev, conn)) {
                conn->state = BT_CONNECTED;
-               hci_proto_connect_cfm(conn, ev->status);
+               hci_connect_cfm(conn, ev->status);
                hci_conn_drop(conn);
        }
 
@@ -3738,7 +3756,7 @@ static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
                break;
        }
 
-       hci_proto_connect_cfm(conn, ev->status);
+       hci_connect_cfm(conn, ev->status);
        if (ev->status)
                hci_conn_del(conn);
 
@@ -3776,7 +3794,7 @@ static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
        if (!num_rsp)
                return;
 
-       if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
+       if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
                return;
 
        hci_dev_lock(hdev);
@@ -3794,7 +3812,7 @@ static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
                data.rssi               = info->rssi;
                data.ssp_mode           = 0x01;
 
-               if (test_bit(HCI_MGMT, &hdev->dev_flags))
+               if (hci_dev_test_flag(hdev, HCI_MGMT))
                        name_known = eir_has_data_type(info->data,
                                                       sizeof(info->data),
                                                       EIR_NAME_COMPLETE);
@@ -3849,7 +3867,7 @@ static void hci_key_refresh_complete_evt(struct hci_dev *hdev,
                if (!ev->status)
                        conn->state = BT_CONNECTED;
 
-               hci_proto_connect_cfm(conn, ev->status);
+               hci_connect_cfm(conn, ev->status);
                hci_conn_drop(conn);
        } else {
                hci_auth_cfm(conn, ev->status);
@@ -3890,41 +3908,37 @@ static u8 bredr_oob_data_present(struct hci_conn *conn)
        if (!data)
                return 0x00;
 
-       if (conn->out || test_bit(HCI_CONN_REMOTE_OOB, &conn->flags)) {
-               if (bredr_sc_enabled(hdev)) {
-                       /* When Secure Connections is enabled, then just
-                        * return the present value stored with the OOB
-                        * data. The stored value contains the right present
-                        * information. However it can only be trusted when
-                        * not in Secure Connection Only mode.
-                        */
-                       if (!test_bit(HCI_SC_ONLY, &hdev->dev_flags))
-                               return data->present;
-
-                       /* When Secure Connections Only mode is enabled, then
-                        * the P-256 values are required. If they are not
-                        * available, then do not declare that OOB data is
-                        * present.
-                        */
-                       if (!memcmp(data->rand256, ZERO_KEY, 16) ||
-                           !memcmp(data->hash256, ZERO_KEY, 16))
-                               return 0x00;
-
-                       return 0x02;
-               }
+       if (bredr_sc_enabled(hdev)) {
+               /* When Secure Connections is enabled, then just
+                * return the present value stored with the OOB
+                * data. The stored value contains the right present
+                * information. However it can only be trusted when
+                * not in Secure Connection Only mode.
+                */
+               if (!hci_dev_test_flag(hdev, HCI_SC_ONLY))
+                       return data->present;
 
-               /* When Secure Connections is not enabled or actually
-                * not supported by the hardware, then check that if
-                * P-192 data values are present.
+               /* When Secure Connections Only mode is enabled, then
+                * the P-256 values are required. If they are not
+                * available, then do not declare that OOB data is
+                * present.
                 */
-               if (!memcmp(data->rand192, ZERO_KEY, 16) ||
-                   !memcmp(data->hash192, ZERO_KEY, 16))
+               if (!memcmp(data->rand256, ZERO_KEY, 16) ||
+                   !memcmp(data->hash256, ZERO_KEY, 16))
                        return 0x00;
 
-               return 0x01;
+               return 0x02;
        }
 
-       return 0x00;
+       /* When Secure Connections is not enabled or actually
+        * not supported by the hardware, then check that if
+        * P-192 data values are present.
+        */
+       if (!memcmp(data->rand192, ZERO_KEY, 16) ||
+           !memcmp(data->hash192, ZERO_KEY, 16))
+               return 0x00;
+
+       return 0x01;
 }
 
 static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
@@ -3942,13 +3956,13 @@ static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
 
        hci_conn_hold(conn);
 
-       if (!test_bit(HCI_MGMT, &hdev->dev_flags))
+       if (!hci_dev_test_flag(hdev, HCI_MGMT))
                goto unlock;
 
        /* Allow pairing if we're pairable, the initiators of the
         * pairing or if the remote is not requesting bonding.
         */
-       if (test_bit(HCI_BONDABLE, &hdev->dev_flags) ||
+       if (hci_dev_test_flag(hdev, HCI_BONDABLE) ||
            test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags) ||
            (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
                struct hci_cp_io_capability_reply cp;
@@ -3974,7 +3988,7 @@ static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
                /* If we're not bondable, force one of the non-bondable
                 * authentication requirement values.
                 */
-               if (!test_bit(HCI_BONDABLE, &hdev->dev_flags))
+               if (!hci_dev_test_flag(hdev, HCI_BONDABLE))
                        conn->auth_type &= HCI_AT_NO_BONDING_MITM;
 
                cp.authentication = conn->auth_type;
@@ -4011,8 +4025,6 @@ static void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
 
        conn->remote_cap = ev->capability;
        conn->remote_auth = ev->authentication;
-       if (ev->oob_data)
-               set_bit(HCI_CONN_REMOTE_OOB, &conn->flags);
 
 unlock:
        hci_dev_unlock(hdev);
@@ -4029,7 +4041,7 @@ static void hci_user_confirm_request_evt(struct hci_dev *hdev,
 
        hci_dev_lock(hdev);
 
-       if (!test_bit(HCI_MGMT, &hdev->dev_flags))
+       if (!hci_dev_test_flag(hdev, HCI_MGMT))
                goto unlock;
 
        conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
@@ -4100,7 +4112,7 @@ static void hci_user_passkey_request_evt(struct hci_dev *hdev,
 
        BT_DBG("%s", hdev->name);
 
-       if (test_bit(HCI_MGMT, &hdev->dev_flags))
+       if (hci_dev_test_flag(hdev, HCI_MGMT))
                mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
 }
 
@@ -4119,7 +4131,7 @@ static void hci_user_passkey_notify_evt(struct hci_dev *hdev,
        conn->passkey_notify = __le32_to_cpu(ev->passkey);
        conn->passkey_entered = 0;
 
-       if (test_bit(HCI_MGMT, &hdev->dev_flags))
+       if (hci_dev_test_flag(hdev, HCI_MGMT))
                mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
                                         conn->dst_type, conn->passkey_notify,
                                         conn->passkey_entered);
@@ -4157,7 +4169,7 @@ static void hci_keypress_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
                return;
        }
 
-       if (test_bit(HCI_MGMT, &hdev->dev_flags))
+       if (hci_dev_test_flag(hdev, HCI_MGMT))
                mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
                                         conn->dst_type, conn->passkey_notify,
                                         conn->passkey_entered);
@@ -4226,7 +4238,7 @@ static void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
 
        hci_dev_lock(hdev);
 
-       if (!test_bit(HCI_MGMT, &hdev->dev_flags))
+       if (!hci_dev_test_flag(hdev, HCI_MGMT))
                goto unlock;
 
        data = hci_find_remote_oob_data(hdev, &ev->bdaddr, BDADDR_BREDR);
@@ -4243,7 +4255,7 @@ static void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
                struct hci_cp_remote_oob_ext_data_reply cp;
 
                bacpy(&cp.bdaddr, &ev->bdaddr);
-               if (test_bit(HCI_SC_ONLY, &hdev->dev_flags)) {
+               if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
                        memset(cp.hash192, 0, sizeof(cp.hash192));
                        memset(cp.rand192, 0, sizeof(cp.rand192));
                } else {
@@ -4409,7 +4421,7 @@ static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
        /* All controllers implicitly stop advertising in the event of a
         * connection, so ensure that the state bit is cleared.
         */
-       clear_bit(HCI_LE_ADV, &hdev->dev_flags);
+       hci_dev_clear_flag(hdev, HCI_LE_ADV);
 
        conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
        if (!conn) {
@@ -4432,7 +4444,7 @@ static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
                if (conn->out) {
                        conn->resp_addr_type = ev->bdaddr_type;
                        bacpy(&conn->resp_addr, &ev->bdaddr);
-                       if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
+                       if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
                                conn->init_addr_type = ADDR_LE_DEV_RANDOM;
                                bacpy(&conn->init_addr, &hdev->rpa);
                        } else {
@@ -4512,7 +4524,7 @@ static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
        hci_debugfs_create_conn(conn);
        hci_conn_add_sysfs(conn);
 
-       hci_proto_connect_cfm(conn, ev->status);
+       hci_connect_cfm(conn, ev->status);
 
        params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst,
                                           conn->dst_type);
@@ -4658,7 +4670,7 @@ static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
                /* If the controller is not using resolvable random
                 * addresses, then this report can be ignored.
                 */
-               if (!test_bit(HCI_PRIVACY, &hdev->dev_flags))
+               if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
                        return;
 
                /* If the local IRK of the controller does not match
@@ -5020,32 +5032,79 @@ static void hci_chan_selected_evt(struct hci_dev *hdev, struct sk_buff *skb)
        amp_read_loc_assoc_final_data(hdev, hcon);
 }
 
-void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
+static bool hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
+                                u8 event, struct sk_buff *skb)
 {
-       struct hci_event_hdr *hdr = (void *) skb->data;
-       __u8 event = hdr->evt;
+       struct hci_ev_cmd_complete *ev;
+       struct hci_event_hdr *hdr;
 
-       hci_dev_lock(hdev);
+       if (!skb)
+               return false;
 
-       /* Received events are (currently) only needed when a request is
-        * ongoing so avoid unnecessary memory allocation.
-        */
-       if (hci_req_pending(hdev)) {
-               kfree_skb(hdev->recv_evt);
-               hdev->recv_evt = skb_clone(skb, GFP_KERNEL);
+       if (skb->len < sizeof(*hdr)) {
+               BT_ERR("Too short HCI event");
+               return false;
        }
 
-       hci_dev_unlock(hdev);
-
+       hdr = (void *) skb->data;
        skb_pull(skb, HCI_EVENT_HDR_SIZE);
 
+       if (event) {
+               if (hdr->evt != event)
+                       return false;
+               return true;
+       }
+
+       if (hdr->evt != HCI_EV_CMD_COMPLETE) {
+               BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
+               return false;
+       }
+
+       if (skb->len < sizeof(*ev)) {
+               BT_ERR("Too short cmd_complete event");
+               return false;
+       }
+
+       ev = (void *) skb->data;
+       skb_pull(skb, sizeof(*ev));
+
+       if (opcode != __le16_to_cpu(ev->opcode)) {
+               BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
+                      __le16_to_cpu(ev->opcode));
+               return false;
+       }
+
+       return true;
+}
+
+void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
+{
+       struct hci_event_hdr *hdr = (void *) skb->data;
+       hci_req_complete_t req_complete = NULL;
+       hci_req_complete_skb_t req_complete_skb = NULL;
+       struct sk_buff *orig_skb = NULL;
+       u8 status = 0, event = hdr->evt, req_evt = 0;
+       u16 opcode = HCI_OP_NOP;
+
        if (hdev->sent_cmd && bt_cb(hdev->sent_cmd)->req.event == event) {
                struct hci_command_hdr *cmd_hdr = (void *) hdev->sent_cmd->data;
-               u16 opcode = __le16_to_cpu(cmd_hdr->opcode);
-
-               hci_req_cmd_complete(hdev, opcode, 0);
+               opcode = __le16_to_cpu(cmd_hdr->opcode);
+               hci_req_cmd_complete(hdev, opcode, status, &req_complete,
+                                    &req_complete_skb);
+               req_evt = event;
        }
 
+       /* If it looks like we might end up having to call
+        * req_complete_skb, store a pristine copy of the skb since the
+        * various handlers may modify the original one through
+        * skb_pull() calls, etc.
+        */
+       if (req_complete_skb || event == HCI_EV_CMD_STATUS ||
+           event == HCI_EV_CMD_COMPLETE)
+               orig_skb = skb_clone(skb, GFP_KERNEL);
+
+       skb_pull(skb, HCI_EVENT_HDR_SIZE);
+
        switch (event) {
        case HCI_EV_INQUIRY_COMPLETE:
                hci_inquiry_complete_evt(hdev, skb);
@@ -5088,11 +5147,13 @@ void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
                break;
 
        case HCI_EV_CMD_COMPLETE:
-               hci_cmd_complete_evt(hdev, skb);
+               hci_cmd_complete_evt(hdev, skb, &opcode, &status,
+                                    &req_complete, &req_complete_skb);
                break;
 
        case HCI_EV_CMD_STATUS:
-               hci_cmd_status_evt(hdev, skb);
+               hci_cmd_status_evt(hdev, skb, &opcode, &status, &req_complete,
+                                  &req_complete_skb);
                break;
 
        case HCI_EV_HARDWARE_ERROR:
@@ -5224,6 +5285,17 @@ void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
                break;
        }
 
+       if (req_complete) {
+               req_complete(hdev, status, opcode);
+       } else if (req_complete_skb) {
+               if (!hci_get_cmd_complete(hdev, opcode, req_evt, orig_skb)) {
+                       kfree_skb(orig_skb);
+                       orig_skb = NULL;
+               }
+               req_complete_skb(hdev, status, opcode, orig_skb);
+       }
+
+       kfree_skb(orig_skb);
        kfree_skb(skb);
        hdev->stat.evt_rx++;
 }
index b59f92c6df0cf7cfa1b4c770cb2e880079ae0c84..d6025d6e6d59f957c612a1e7ff455f770734eb6a 100644 (file)
@@ -34,7 +34,8 @@ void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
        req->err = 0;
 }
 
-int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
+static int req_run(struct hci_request *req, hci_req_complete_t complete,
+                  hci_req_complete_skb_t complete_skb)
 {
        struct hci_dev *hdev = req->hdev;
        struct sk_buff *skb;
@@ -56,6 +57,7 @@ int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
 
        skb = skb_peek_tail(&req->cmd_q);
        bt_cb(skb)->req.complete = complete;
+       bt_cb(skb)->req.complete_skb = complete_skb;
 
        spin_lock_irqsave(&hdev->cmd_q.lock, flags);
        skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
@@ -66,6 +68,16 @@ int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
        return 0;
 }
 
+int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
+{
+       return req_run(req, complete, NULL);
+}
+
+int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete)
+{
+       return req_run(req, NULL, complete);
+}
+
 struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
                                const void *param)
 {
@@ -270,7 +282,7 @@ void hci_req_add_le_passive_scan(struct hci_request *req)
         * and 0x01 (whitelist enabled) use the new filter policies
         * 0x02 (no whitelist) and 0x03 (whitelist enabled).
         */
-       if (test_bit(HCI_PRIVACY, &hdev->dev_flags) &&
+       if (hci_dev_test_flag(hdev, HCI_PRIVACY) &&
            (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
                filter_policy |= 0x02;
 
@@ -304,10 +316,10 @@ static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
         * In this kind of scenario skip the update and let the random
         * address be updated at the next cycle.
         */
-       if (test_bit(HCI_LE_ADV, &hdev->dev_flags) ||
+       if (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
            hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
                BT_DBG("Deferring random address update");
-               set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
+               hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
                return;
        }
 
@@ -324,12 +336,12 @@ int hci_update_random_address(struct hci_request *req, bool require_privacy,
         * current RPA has expired or there is something else than
         * the current RPA in use, then generate a new one.
         */
-       if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
+       if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
                int to;
 
                *own_addr_type = ADDR_LE_DEV_RANDOM;
 
-               if (!test_and_clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags) &&
+               if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
                    !bacmp(&hdev->random_addr, &hdev->rpa))
                        return 0;
 
@@ -383,9 +395,9 @@ int hci_update_random_address(struct hci_request *req, bool require_privacy,
         * and a static address has been configured, then use that
         * address instead of the public BR/EDR address.
         */
-       if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
+       if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
            !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
-           (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) &&
+           (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
             bacmp(&hdev->static_addr, BDADDR_ANY))) {
                *own_addr_type = ADDR_LE_DEV_RANDOM;
                if (bacmp(&hdev->static_addr, &hdev->random_addr))
@@ -425,7 +437,7 @@ void __hci_update_page_scan(struct hci_request *req)
        struct hci_dev *hdev = req->hdev;
        u8 scan;
 
-       if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
+       if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
                return;
 
        if (!hdev_is_powered(hdev))
@@ -434,7 +446,7 @@ void __hci_update_page_scan(struct hci_request *req)
        if (mgmt_powering_down(hdev))
                return;
 
-       if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags) ||
+       if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) ||
            disconnected_whitelist_entries(hdev))
                scan = SCAN_PAGE;
        else
@@ -443,7 +455,7 @@ void __hci_update_page_scan(struct hci_request *req)
        if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE))
                return;
 
-       if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
+       if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
                scan |= SCAN_INQUIRY;
 
        hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
@@ -471,14 +483,14 @@ void __hci_update_background_scan(struct hci_request *req)
 
        if (!test_bit(HCI_UP, &hdev->flags) ||
            test_bit(HCI_INIT, &hdev->flags) ||
-           test_bit(HCI_SETUP, &hdev->dev_flags) ||
-           test_bit(HCI_CONFIG, &hdev->dev_flags) ||
-           test_bit(HCI_AUTO_OFF, &hdev->dev_flags) ||
-           test_bit(HCI_UNREGISTER, &hdev->dev_flags))
+           hci_dev_test_flag(hdev, HCI_SETUP) ||
+           hci_dev_test_flag(hdev, HCI_CONFIG) ||
+           hci_dev_test_flag(hdev, HCI_AUTO_OFF) ||
+           hci_dev_test_flag(hdev, HCI_UNREGISTER))
                return;
 
        /* No point in doing scanning if LE support hasn't been enabled */
-       if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
+       if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
                return;
 
        /* If discovery is active don't interfere with it */
@@ -502,7 +514,7 @@ void __hci_update_background_scan(struct hci_request *req)
                 */
 
                /* If controller is not scanning we are done. */
-               if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
+               if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
                        return;
 
                hci_req_add_le_scan_disable(req);
@@ -524,7 +536,7 @@ void __hci_update_background_scan(struct hci_request *req)
                /* If controller is currently scanning, we stop it to ensure we
                 * don't miss any advertising (due to duplicates filter).
                 */
-               if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
+               if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
                        hci_req_add_le_scan_disable(req);
 
                hci_req_add_le_passive_scan(req);
index adf074d33544083ca74f9dd3f22123a84bd8b527..bf6df92f42dbf44be59134349f8e62ca953d9214 100644 (file)
@@ -32,11 +32,14 @@ struct hci_request {
 
 void hci_req_init(struct hci_request *req, struct hci_dev *hdev);
 int hci_req_run(struct hci_request *req, hci_req_complete_t complete);
+int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete);
 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
                 const void *param);
 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
                    const void *param, u8 event);
-void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status);
+void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
+                         hci_req_complete_t *req_complete,
+                         hci_req_complete_skb_t *req_complete_skb);
 
 struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
                                const void *param);
index 1d65c5be7c823a282bc74a4d9691db53eb37b8d3..56f9edbf3d05dc6a2c6ba4f42174b2314d5e920d 100644 (file)
 #include <net/bluetooth/bluetooth.h>
 #include <net/bluetooth/hci_core.h>
 #include <net/bluetooth/hci_mon.h>
+#include <net/bluetooth/mgmt.h>
+
+#include "mgmt_util.h"
+
+static LIST_HEAD(mgmt_chan_list);
+static DEFINE_MUTEX(mgmt_chan_list_lock);
 
 static atomic_t monitor_promisc = ATOMIC_INIT(0);
 
@@ -44,11 +50,32 @@ struct hci_pinfo {
        struct hci_filter filter;
        __u32             cmsg_mask;
        unsigned short    channel;
+       unsigned long     flags;
 };
 
-static inline int hci_test_bit(int nr, void *addr)
+void hci_sock_set_flag(struct sock *sk, int nr)
+{
+       set_bit(nr, &hci_pi(sk)->flags);
+}
+
+void hci_sock_clear_flag(struct sock *sk, int nr)
 {
-       return *((__u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31));
+       clear_bit(nr, &hci_pi(sk)->flags);
+}
+
+int hci_sock_test_flag(struct sock *sk, int nr)
+{
+       return test_bit(nr, &hci_pi(sk)->flags);
+}
+
+unsigned short hci_sock_get_channel(struct sock *sk)
+{
+       return hci_pi(sk)->channel;
+}
+
+static inline int hci_test_bit(int nr, const void *addr)
+{
+       return *((const __u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31));
 }
 
 /* Security filter */
@@ -183,54 +210,31 @@ void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
        kfree_skb(skb_copy);
 }
 
-/* Send frame to control socket */
-void hci_send_to_control(struct sk_buff *skb, struct sock *skip_sk)
+/* Send frame to sockets with specific channel */
+void hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
+                        int flag, struct sock *skip_sk)
 {
        struct sock *sk;
 
-       BT_DBG("len %d", skb->len);
+       BT_DBG("channel %u len %d", channel, skb->len);
 
        read_lock(&hci_sk_list.lock);
 
        sk_for_each(sk, &hci_sk_list.head) {
                struct sk_buff *nskb;
 
-               /* Skip the original socket */
-               if (sk == skip_sk)
-                       continue;
-
-               if (sk->sk_state != BT_BOUND)
-                       continue;
-
-               if (hci_pi(sk)->channel != HCI_CHANNEL_CONTROL)
+               /* Ignore socket without the flag set */
+               if (!hci_sock_test_flag(sk, flag))
                        continue;
 
-               nskb = skb_clone(skb, GFP_ATOMIC);
-               if (!nskb)
+               /* Skip the original socket */
+               if (sk == skip_sk)
                        continue;
 
-               if (sock_queue_rcv_skb(sk, nskb))
-                       kfree_skb(nskb);
-       }
-
-       read_unlock(&hci_sk_list.lock);
-}
-
-static void queue_monitor_skb(struct sk_buff *skb)
-{
-       struct sock *sk;
-
-       BT_DBG("len %d", skb->len);
-
-       read_lock(&hci_sk_list.lock);
-
-       sk_for_each(sk, &hci_sk_list.head) {
-               struct sk_buff *nskb;
-
                if (sk->sk_state != BT_BOUND)
                        continue;
 
-               if (hci_pi(sk)->channel != HCI_CHANNEL_MONITOR)
+               if (hci_pi(sk)->channel != channel)
                        continue;
 
                nskb = skb_clone(skb, GFP_ATOMIC);
@@ -290,7 +294,8 @@ void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
        hdr->index = cpu_to_le16(hdev->id);
        hdr->len = cpu_to_le16(skb->len);
 
-       queue_monitor_skb(skb_copy);
+       hci_send_to_channel(HCI_CHANNEL_MONITOR, skb_copy,
+                           HCI_SOCK_TRUSTED, NULL);
        kfree_skb(skb_copy);
 }
 
@@ -397,7 +402,8 @@ void hci_sock_dev_event(struct hci_dev *hdev, int event)
 
                skb = create_monitor_event(hdev, event);
                if (skb) {
-                       queue_monitor_skb(skb);
+                       hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
+                                           HCI_SOCK_TRUSTED, NULL);
                        kfree_skb(skb);
                }
        }
@@ -428,6 +434,56 @@ void hci_sock_dev_event(struct hci_dev *hdev, int event)
        }
 }
 
+static struct hci_mgmt_chan *__hci_mgmt_chan_find(unsigned short channel)
+{
+       struct hci_mgmt_chan *c;
+
+       list_for_each_entry(c, &mgmt_chan_list, list) {
+               if (c->channel == channel)
+                       return c;
+       }
+
+       return NULL;
+}
+
+static struct hci_mgmt_chan *hci_mgmt_chan_find(unsigned short channel)
+{
+       struct hci_mgmt_chan *c;
+
+       mutex_lock(&mgmt_chan_list_lock);
+       c = __hci_mgmt_chan_find(channel);
+       mutex_unlock(&mgmt_chan_list_lock);
+
+       return c;
+}
+
+int hci_mgmt_chan_register(struct hci_mgmt_chan *c)
+{
+       if (c->channel < HCI_CHANNEL_CONTROL)
+               return -EINVAL;
+
+       mutex_lock(&mgmt_chan_list_lock);
+       if (__hci_mgmt_chan_find(c->channel)) {
+               mutex_unlock(&mgmt_chan_list_lock);
+               return -EALREADY;
+       }
+
+       list_add_tail(&c->list, &mgmt_chan_list);
+
+       mutex_unlock(&mgmt_chan_list_lock);
+
+       return 0;
+}
+EXPORT_SYMBOL(hci_mgmt_chan_register);
+
+void hci_mgmt_chan_unregister(struct hci_mgmt_chan *c)
+{
+       mutex_lock(&mgmt_chan_list_lock);
+       list_del(&c->list);
+       mutex_unlock(&mgmt_chan_list_lock);
+}
+EXPORT_SYMBOL(hci_mgmt_chan_unregister);
+
 static int hci_sock_release(struct socket *sock)
 {
        struct sock *sk = sock->sk;
@@ -448,7 +504,7 @@ static int hci_sock_release(struct socket *sock)
        if (hdev) {
                if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
                        mgmt_index_added(hdev);
-                       clear_bit(HCI_USER_CHANNEL, &hdev->dev_flags);
+                       hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
                        hci_dev_close(hdev->id);
                }
 
@@ -508,10 +564,10 @@ static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
        if (!hdev)
                return -EBADFD;
 
-       if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
+       if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
                return -EBUSY;
 
-       if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
+       if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
                return -EOPNOTSUPP;
 
        if (hdev->dev_type != HCI_BREDR)
@@ -687,14 +743,14 @@ static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
 
                if (test_bit(HCI_UP, &hdev->flags) ||
                    test_bit(HCI_INIT, &hdev->flags) ||
-                   test_bit(HCI_SETUP, &hdev->dev_flags) ||
-                   test_bit(HCI_CONFIG, &hdev->dev_flags)) {
+                   hci_dev_test_flag(hdev, HCI_SETUP) ||
+                   hci_dev_test_flag(hdev, HCI_CONFIG)) {
                        err = -EBUSY;
                        hci_dev_put(hdev);
                        goto done;
                }
 
-               if (test_and_set_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
+               if (hci_dev_test_and_set_flag(hdev, HCI_USER_CHANNEL)) {
                        err = -EUSERS;
                        hci_dev_put(hdev);
                        goto done;
@@ -704,7 +760,7 @@ static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
 
                err = hci_dev_open(hdev->id);
                if (err) {
-                       clear_bit(HCI_USER_CHANNEL, &hdev->dev_flags);
+                       hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
                        mgmt_index_added(hdev);
                        hci_dev_put(hdev);
                        goto done;
@@ -715,38 +771,62 @@ static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
                hci_pi(sk)->hdev = hdev;
                break;
 
-       case HCI_CHANNEL_CONTROL:
+       case HCI_CHANNEL_MONITOR:
                if (haddr.hci_dev != HCI_DEV_NONE) {
                        err = -EINVAL;
                        goto done;
                }
 
-               if (!capable(CAP_NET_ADMIN)) {
+               if (!capable(CAP_NET_RAW)) {
                        err = -EPERM;
                        goto done;
                }
 
+               /* The monitor interface is restricted to CAP_NET_RAW
+                * capabilities and with that implicitly trusted.
+                */
+               hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
+
+               send_monitor_replay(sk);
+
+               atomic_inc(&monitor_promisc);
                break;
 
-       case HCI_CHANNEL_MONITOR:
-               if (haddr.hci_dev != HCI_DEV_NONE) {
+       default:
+               if (!hci_mgmt_chan_find(haddr.hci_channel)) {
                        err = -EINVAL;
                        goto done;
                }
 
-               if (!capable(CAP_NET_RAW)) {
-                       err = -EPERM;
+               if (haddr.hci_dev != HCI_DEV_NONE) {
+                       err = -EINVAL;
                        goto done;
                }
 
-               send_monitor_replay(sk);
-
-               atomic_inc(&monitor_promisc);
+               /* Users with CAP_NET_ADMIN capabilities are allowed
+                * access to all management commands and events. For
+                * untrusted users the interface is restricted and
+                * also only untrusted events are sent.
+                */
+               if (capable(CAP_NET_ADMIN))
+                       hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
+
+               /* At the moment the index and unconfigured index events
+                * are enabled unconditionally. Setting them on each
+                * socket when binding keeps this functionality. They
+                * however might be cleared later and then sending of these
+                * events will be disabled, but that is then intentional.
+                *
+                * This also enables generic events that are safe to be
+                * received by untrusted users. Example for such events
+                * are changes to settings, class of device, name etc.
+                */
+               if (haddr.hci_channel == HCI_CHANNEL_CONTROL) {
+                       hci_sock_set_flag(sk, HCI_MGMT_INDEX_EVENTS);
+                       hci_sock_set_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
+                       hci_sock_set_flag(sk, HCI_MGMT_GENERIC_EVENTS);
+               }
                break;
-
-       default:
-               err = -EINVAL;
-               goto done;
        }
 
 
@@ -826,8 +906,8 @@ static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg,
        }
 }
 
-static int hci_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
-                           struct msghdr *msg, size_t len, int flags)
+static int hci_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
+                           int flags)
 {
        int noblock = flags & MSG_DONTWAIT;
        struct sock *sk = sock->sk;
@@ -860,10 +940,13 @@ static int hci_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
                hci_sock_cmsg(sk, msg, skb);
                break;
        case HCI_CHANNEL_USER:
-       case HCI_CHANNEL_CONTROL:
        case HCI_CHANNEL_MONITOR:
                sock_recv_timestamp(msg, sk, skb);
                break;
+       default:
+               if (hci_mgmt_chan_find(hci_pi(sk)->channel))
+                       sock_recv_timestamp(msg, sk, skb);
+               break;
        }
 
        skb_free_datagram(sk, skb);
@@ -871,10 +954,122 @@ static int hci_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
        return err ? : copied;
 }
 
-static int hci_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
-                           struct msghdr *msg, size_t len)
+static int hci_mgmt_cmd(struct hci_mgmt_chan *chan, struct sock *sk,
+                       struct msghdr *msg, size_t msglen)
+{
+       void *buf;
+       u8 *cp;
+       struct mgmt_hdr *hdr;
+       u16 opcode, index, len;
+       struct hci_dev *hdev = NULL;
+       const struct hci_mgmt_handler *handler;
+       bool var_len, no_hdev;
+       int err;
+
+       BT_DBG("got %zu bytes", msglen);
+
+       if (msglen < sizeof(*hdr))
+               return -EINVAL;
+
+       buf = kmalloc(msglen, GFP_KERNEL);
+       if (!buf)
+               return -ENOMEM;
+
+       if (memcpy_from_msg(buf, msg, msglen)) {
+               err = -EFAULT;
+               goto done;
+       }
+
+       hdr = buf;
+       opcode = __le16_to_cpu(hdr->opcode);
+       index = __le16_to_cpu(hdr->index);
+       len = __le16_to_cpu(hdr->len);
+
+       if (len != msglen - sizeof(*hdr)) {
+               err = -EINVAL;
+               goto done;
+       }
+
+       if (opcode >= chan->handler_count ||
+           chan->handlers[opcode].func == NULL) {
+               BT_DBG("Unknown op %u", opcode);
+               err = mgmt_cmd_status(sk, index, opcode,
+                                     MGMT_STATUS_UNKNOWN_COMMAND);
+               goto done;
+       }
+
+       handler = &chan->handlers[opcode];
+
+       if (!hci_sock_test_flag(sk, HCI_SOCK_TRUSTED) &&
+           !(handler->flags & HCI_MGMT_UNTRUSTED)) {
+               err = mgmt_cmd_status(sk, index, opcode,
+                                     MGMT_STATUS_PERMISSION_DENIED);
+               goto done;
+       }
+
+       if (index != MGMT_INDEX_NONE) {
+               hdev = hci_dev_get(index);
+               if (!hdev) {
+                       err = mgmt_cmd_status(sk, index, opcode,
+                                             MGMT_STATUS_INVALID_INDEX);
+                       goto done;
+               }
+
+               if (hci_dev_test_flag(hdev, HCI_SETUP) ||
+                   hci_dev_test_flag(hdev, HCI_CONFIG) ||
+                   hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
+                       err = mgmt_cmd_status(sk, index, opcode,
+                                             MGMT_STATUS_INVALID_INDEX);
+                       goto done;
+               }
+
+               if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
+                   !(handler->flags & HCI_MGMT_UNCONFIGURED)) {
+                       err = mgmt_cmd_status(sk, index, opcode,
+                                             MGMT_STATUS_INVALID_INDEX);
+                       goto done;
+               }
+       }
+
+       no_hdev = (handler->flags & HCI_MGMT_NO_HDEV);
+       if (no_hdev != !hdev) {
+               err = mgmt_cmd_status(sk, index, opcode,
+                                     MGMT_STATUS_INVALID_INDEX);
+               goto done;
+       }
+
+       var_len = (handler->flags & HCI_MGMT_VAR_LEN);
+       if ((var_len && len < handler->data_len) ||
+           (!var_len && len != handler->data_len)) {
+               err = mgmt_cmd_status(sk, index, opcode,
+                                     MGMT_STATUS_INVALID_PARAMS);
+               goto done;
+       }
+
+       if (hdev && chan->hdev_init)
+               chan->hdev_init(sk, hdev);
+
+       cp = buf + sizeof(*hdr);
+
+       err = handler->func(sk, hdev, cp, len);
+       if (err < 0)
+               goto done;
+
+       err = msglen;
+
+done:
+       if (hdev)
+               hci_dev_put(hdev);
+
+       kfree(buf);
+       return err;
+}
+
+static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg,
+                           size_t len)
 {
        struct sock *sk = sock->sk;
+       struct hci_mgmt_chan *chan;
        struct hci_dev *hdev;
        struct sk_buff *skb;
        int err;
@@ -896,14 +1091,18 @@ static int hci_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
        case HCI_CHANNEL_RAW:
        case HCI_CHANNEL_USER:
                break;
-       case HCI_CHANNEL_CONTROL:
-               err = mgmt_control(sk, msg, len);
-               goto done;
        case HCI_CHANNEL_MONITOR:
                err = -EOPNOTSUPP;
                goto done;
        default:
-               err = -EINVAL;
+               mutex_lock(&mgmt_chan_list_lock);
+               chan = __hci_mgmt_chan_find(hci_pi(sk)->channel);
+               if (chan)
+                       err = hci_mgmt_cmd(chan, sk, msg, len);
+               else
+                       err = -EINVAL;
+
+               mutex_unlock(&mgmt_chan_list_lock);
                goto done;
        }
 
index 07348e142f16a783b7764d72314830f4b7844330..a05b9dbf14c991dd1a90de4f23877bc02b78e1b5 100644 (file)
@@ -70,10 +70,11 @@ static void hidp_session_terminate(struct hidp_session *s);
 
 static void hidp_copy_session(struct hidp_session *session, struct hidp_conninfo *ci)
 {
+       u32 valid_flags = 0;
        memset(ci, 0, sizeof(*ci));
        bacpy(&ci->bdaddr, &session->bdaddr);
 
-       ci->flags = session->flags;
+       ci->flags = session->flags & valid_flags;
        ci->state = BT_CONNECTED;
 
        if (session->input) {
@@ -907,7 +908,7 @@ static int hidp_session_new(struct hidp_session **out, const bdaddr_t *bdaddr,
        kref_init(&session->ref);
        atomic_set(&session->state, HIDP_SESSION_IDLING);
        init_waitqueue_head(&session->state_queue);
-       session->flags = req->flags & (1 << HIDP_BLUETOOTH_VENDOR_ID);
+       session->flags = req->flags & BIT(HIDP_BLUETOOTH_VENDOR_ID);
 
        /* connection management */
        bacpy(&session->bdaddr, bdaddr);
@@ -1312,6 +1313,7 @@ int hidp_connection_add(struct hidp_connadd_req *req,
                        struct socket *ctrl_sock,
                        struct socket *intr_sock)
 {
+       u32 valid_flags = 0;
        struct hidp_session *session;
        struct l2cap_conn *conn;
        struct l2cap_chan *chan;
@@ -1321,6 +1323,9 @@ int hidp_connection_add(struct hidp_connadd_req *req,
        if (ret)
                return ret;
 
+       if (req->flags & ~valid_flags)
+               return -EINVAL;
+
        chan = l2cap_pi(ctrl_sock->sk)->chan;
        conn = NULL;
        l2cap_chan_lock(chan);
@@ -1351,13 +1356,17 @@ out_conn:
 
 int hidp_connection_del(struct hidp_conndel_req *req)
 {
+       u32 valid_flags = BIT(HIDP_VIRTUAL_CABLE_UNPLUG);
        struct hidp_session *session;
 
+       if (req->flags & ~valid_flags)
+               return -EINVAL;
+
        session = hidp_session_find(&req->bdaddr);
        if (!session)
                return -ENOENT;
 
-       if (req->flags & (1 << HIDP_VIRTUAL_CABLE_UNPLUG))
+       if (req->flags & BIT(HIDP_VIRTUAL_CABLE_UNPLUG))
                hidp_send_ctrl_message(session,
                                       HIDP_TRANS_HID_CONTROL |
                                         HIDP_CTRL_VIRTUAL_CABLE_UNPLUG,
index 6ba33f9631e8e5830374ab4e51720c493969c67c..dad419782a1280bbff079c4978578f0be54e37a3 100644 (file)
@@ -292,7 +292,7 @@ static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
        struct sk_buff *skb;
 
        skb_queue_walk(head, skb) {
-               if (bt_cb(skb)->control.txseq == seq)
+               if (bt_cb(skb)->l2cap.txseq == seq)
                        return skb;
        }
 
@@ -954,11 +954,11 @@ static inline void __unpack_control(struct l2cap_chan *chan,
 {
        if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
                __unpack_extended_control(get_unaligned_le32(skb->data),
-                                         &bt_cb(skb)->control);
+                                         &bt_cb(skb)->l2cap);
                skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
        } else {
                __unpack_enhanced_control(get_unaligned_le16(skb->data),
-                                         &bt_cb(skb)->control);
+                                         &bt_cb(skb)->l2cap);
                skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
        }
 }
@@ -1200,8 +1200,8 @@ static void l2cap_move_setup(struct l2cap_chan *chan)
 
        chan->retry_count = 0;
        skb_queue_walk(&chan->tx_q, skb) {
-               if (bt_cb(skb)->control.retries)
-                       bt_cb(skb)->control.retries = 1;
+               if (bt_cb(skb)->l2cap.retries)
+                       bt_cb(skb)->l2cap.retries = 1;
                else
                        break;
        }
@@ -1244,6 +1244,13 @@ static void l2cap_move_done(struct l2cap_chan *chan)
 
 static void l2cap_chan_ready(struct l2cap_chan *chan)
 {
+       /* The channel may have already been flagged as connected in
+        * case of receiving data before the L2CAP info req/rsp
+        * procedure is complete.
+        */
+       if (chan->state == BT_CONNECTED)
+               return;
+
        /* This clears all conf flags, including CONF_NOT_COMPLETE */
        chan->conf_state = 0;
        __clear_chan_timer(chan);
@@ -1839,8 +1846,8 @@ static void l2cap_streaming_send(struct l2cap_chan *chan,
 
                skb = skb_dequeue(&chan->tx_q);
 
-               bt_cb(skb)->control.retries = 1;
-               control = &bt_cb(skb)->control;
+               bt_cb(skb)->l2cap.retries = 1;
+               control = &bt_cb(skb)->l2cap;
 
                control->reqseq = 0;
                control->txseq = chan->next_tx_seq;
@@ -1884,8 +1891,8 @@ static int l2cap_ertm_send(struct l2cap_chan *chan)
 
                skb = chan->tx_send_head;
 
-               bt_cb(skb)->control.retries = 1;
-               control = &bt_cb(skb)->control;
+               bt_cb(skb)->l2cap.retries = 1;
+               control = &bt_cb(skb)->l2cap;
 
                if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
                        control->final = 1;
@@ -1956,11 +1963,11 @@ static void l2cap_ertm_resend(struct l2cap_chan *chan)
                        continue;
                }
 
-               bt_cb(skb)->control.retries++;
-               control = bt_cb(skb)->control;
+               bt_cb(skb)->l2cap.retries++;
+               control = bt_cb(skb)->l2cap;
 
                if (chan->max_tx != 0 &&
-                   bt_cb(skb)->control.retries > chan->max_tx) {
+                   bt_cb(skb)->l2cap.retries > chan->max_tx) {
                        BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
                        l2cap_send_disconn_req(chan, ECONNRESET);
                        l2cap_seq_list_clear(&chan->retrans_list);
@@ -2038,7 +2045,7 @@ static void l2cap_retransmit_all(struct l2cap_chan *chan,
 
        if (chan->unacked_frames) {
                skb_queue_walk(&chan->tx_q, skb) {
-                       if (bt_cb(skb)->control.txseq == control->reqseq ||
+                       if (bt_cb(skb)->l2cap.txseq == control->reqseq ||
                            skb == chan->tx_send_head)
                                break;
                }
@@ -2048,7 +2055,7 @@ static void l2cap_retransmit_all(struct l2cap_chan *chan,
                                break;
 
                        l2cap_seq_list_append(&chan->retrans_list,
-                                             bt_cb(skb)->control.txseq);
+                                             bt_cb(skb)->l2cap.txseq);
                }
 
                l2cap_ertm_resend(chan);
@@ -2260,8 +2267,8 @@ static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
                return ERR_PTR(err);
        }
 
-       bt_cb(skb)->control.fcs = chan->fcs;
-       bt_cb(skb)->control.retries = 0;
+       bt_cb(skb)->l2cap.fcs = chan->fcs;
+       bt_cb(skb)->l2cap.retries = 0;
        return skb;
 }
 
@@ -2314,7 +2321,7 @@ static int l2cap_segment_sdu(struct l2cap_chan *chan,
                        return PTR_ERR(skb);
                }
 
-               bt_cb(skb)->control.sar = sar;
+               bt_cb(skb)->l2cap.sar = sar;
                __skb_queue_tail(seg_queue, skb);
 
                len -= pdu_len;
@@ -2849,7 +2856,7 @@ static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
                        continue;
 
                /* Don't send frame to the channel it came from */
-               if (bt_cb(skb)->chan == chan)
+               if (bt_cb(skb)->l2cap.chan == chan)
                        continue;
 
                nskb = skb_clone(skb, GFP_KERNEL);
@@ -3893,7 +3900,7 @@ static int l2cap_connect_req(struct l2cap_conn *conn,
                return -EPROTO;
 
        hci_dev_lock(hdev);
-       if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
+       if (hci_dev_test_flag(hdev, HCI_MGMT) &&
            !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags))
                mgmt_device_connected(hdev, hcon, 0, NULL, 0);
        hci_dev_unlock(hdev);
@@ -5911,7 +5918,7 @@ static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
 
                skb_unlink(skb, &chan->srej_q);
                chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
-               err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->control);
+               err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->l2cap);
                if (err)
                        break;
        }
@@ -5945,7 +5952,7 @@ static void l2cap_handle_srej(struct l2cap_chan *chan,
                return;
        }
 
-       if (chan->max_tx != 0 && bt_cb(skb)->control.retries >= chan->max_tx) {
+       if (chan->max_tx != 0 && bt_cb(skb)->l2cap.retries >= chan->max_tx) {
                BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
                l2cap_send_disconn_req(chan, ECONNRESET);
                return;
@@ -5998,7 +6005,7 @@ static void l2cap_handle_rej(struct l2cap_chan *chan,
        skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
 
        if (chan->max_tx && skb &&
-           bt_cb(skb)->control.retries >= chan->max_tx) {
+           bt_cb(skb)->l2cap.retries >= chan->max_tx) {
                BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
                l2cap_send_disconn_req(chan, ECONNRESET);
                return;
@@ -6558,7 +6565,7 @@ static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
 
 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
 {
-       struct l2cap_ctrl *control = &bt_cb(skb)->control;
+       struct l2cap_ctrl *control = &bt_cb(skb)->l2cap;
        u16 len;
        u8 event;
 
@@ -6785,6 +6792,13 @@ static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
 
        BT_DBG("chan %p, len %d", chan, skb->len);
 
+       /* If we receive data on a fixed channel before the info req/rsp
+        * procdure is done simply assume that the channel is supported
+        * and mark it as ready.
+        */
+       if (chan->chan_type == L2CAP_CHAN_FIXED)
+               l2cap_chan_ready(chan);
+
        if (chan->state != BT_CONNECTED)
                goto drop;
 
@@ -6850,8 +6864,8 @@ static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
                goto drop;
 
        /* Store remote BD_ADDR and PSM for msg_name */
-       bacpy(&bt_cb(skb)->bdaddr, &hcon->dst);
-       bt_cb(skb)->psm = psm;
+       bacpy(&bt_cb(skb)->l2cap.bdaddr, &hcon->dst);
+       bt_cb(skb)->l2cap.psm = psm;
 
        if (!chan->ops->recv(chan, skb)) {
                l2cap_chan_put(chan);
@@ -6973,12 +6987,12 @@ static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
        conn->local_fixed_chan = L2CAP_FC_SIG_BREDR | L2CAP_FC_CONNLESS;
 
        if (hcon->type == ACL_LINK &&
-           test_bit(HCI_HS_ENABLED, &hcon->hdev->dev_flags))
+           hci_dev_test_flag(hcon->hdev, HCI_HS_ENABLED))
                conn->local_fixed_chan |= L2CAP_FC_A2MP;
 
-       if (test_bit(HCI_LE_ENABLED, &hcon->hdev->dev_flags) &&
+       if (hci_dev_test_flag(hcon->hdev, HCI_LE_ENABLED) &&
            (bredr_sc_enabled(hcon->hdev) ||
-            test_bit(HCI_FORCE_BREDR_SMP, &hcon->hdev->dbg_flags)))
+            hci_dev_test_flag(hcon->hdev, HCI_FORCE_BREDR_SMP)))
                conn->local_fixed_chan |= L2CAP_FC_SMP_BREDR;
 
        mutex_init(&conn->ident_lock);
@@ -7098,7 +7112,7 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
                else
                        dst_type = ADDR_LE_DEV_RANDOM;
 
-               if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
+               if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
                        role = HCI_ROLE_SLAVE;
                else
                        role = HCI_ROLE_MASTER;
@@ -7238,13 +7252,16 @@ static struct l2cap_chan *l2cap_global_fixed_chan(struct l2cap_chan *c,
        return NULL;
 }
 
-void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
+static void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
 {
        struct hci_dev *hdev = hcon->hdev;
        struct l2cap_conn *conn;
        struct l2cap_chan *pchan;
        u8 dst_type;
 
+       if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
+               return;
+
        BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
 
        if (status) {
@@ -7307,8 +7324,11 @@ int l2cap_disconn_ind(struct hci_conn *hcon)
        return conn->disc_reason;
 }
 
-void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
+static void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
 {
+       if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
+               return;
+
        BT_DBG("hcon %p reason %d", hcon, reason);
 
        l2cap_conn_del(hcon, bt_to_errno(reason));
@@ -7331,13 +7351,13 @@ static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
        }
 }
 
-int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
+static void l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
 {
        struct l2cap_conn *conn = hcon->l2cap_data;
        struct l2cap_chan *chan;
 
        if (!conn)
-               return 0;
+               return;
 
        BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
 
@@ -7420,8 +7440,6 @@ int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
        }
 
        mutex_unlock(&conn->chan_lock);
-
-       return 0;
 }
 
 int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
@@ -7529,6 +7547,13 @@ drop:
        return 0;
 }
 
+static struct hci_cb l2cap_cb = {
+       .name           = "L2CAP",
+       .connect_cfm    = l2cap_connect_cfm,
+       .disconn_cfm    = l2cap_disconn_cfm,
+       .security_cfm   = l2cap_security_cfm,
+};
+
 static int l2cap_debugfs_show(struct seq_file *f, void *p)
 {
        struct l2cap_chan *c;
@@ -7570,6 +7595,8 @@ int __init l2cap_init(void)
        if (err < 0)
                return err;
 
+       hci_register_cb(&l2cap_cb);
+
        if (IS_ERR_OR_NULL(bt_debugfs))
                return 0;
 
@@ -7587,6 +7614,7 @@ int __init l2cap_init(void)
 void l2cap_exit(void)
 {
        debugfs_remove(l2cap_debugfs);
+       hci_unregister_cb(&l2cap_cb);
        l2cap_cleanup_sockets();
 }
 
index 60694f0f4c73768dee1db1a4926ced43522dbe83..a7278f05eafbbda65da7c991820474a95e4e1a3c 100644 (file)
@@ -944,8 +944,8 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
        return err;
 }
 
-static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
-                             struct msghdr *msg, size_t len)
+static int l2cap_sock_sendmsg(struct socket *sock, struct msghdr *msg,
+                             size_t len)
 {
        struct sock *sk = sock->sk;
        struct l2cap_chan *chan = l2cap_pi(sk)->chan;
@@ -976,8 +976,8 @@ static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
        return err;
 }
 
-static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
-                             struct msghdr *msg, size_t len, int flags)
+static int l2cap_sock_recvmsg(struct socket *sock, struct msghdr *msg,
+                             size_t len, int flags)
 {
        struct sock *sk = sock->sk;
        struct l2cap_pinfo *pi = l2cap_pi(sk);
@@ -1004,9 +1004,9 @@ static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
        release_sock(sk);
 
        if (sock->type == SOCK_STREAM)
-               err = bt_sock_stream_recvmsg(iocb, sock, msg, len, flags);
+               err = bt_sock_stream_recvmsg(sock, msg, len, flags);
        else
-               err = bt_sock_recvmsg(iocb, sock, msg, len, flags);
+               err = bt_sock_recvmsg(sock, msg, len, flags);
 
        if (pi->chan->mode != L2CAP_MODE_ERTM)
                return err;
@@ -1330,7 +1330,7 @@ static struct sk_buff *l2cap_sock_alloc_skb_cb(struct l2cap_chan *chan,
 
        skb->priority = sk->sk_priority;
 
-       bt_cb(skb)->chan = chan;
+       bt_cb(skb)->l2cap.chan = chan;
 
        return skb;
 }
@@ -1444,8 +1444,8 @@ static void l2cap_skb_msg_name(struct sk_buff *skb, void *msg_name,
 
        memset(la, 0, sizeof(struct sockaddr_l2));
        la->l2_family = AF_BLUETOOTH;
-       la->l2_psm = bt_cb(skb)->psm;
-       bacpy(&la->l2_bdaddr, &bt_cb(skb)->bdaddr);
+       la->l2_psm = bt_cb(skb)->l2cap.psm;
+       bacpy(&la->l2_bdaddr, &bt_cb(skb)->l2cap.bdaddr);
 
        *msg_namelen = sizeof(struct sockaddr_l2);
 }
index 9ec5390c85eba61c3c3bcb5a813c8d8af326cf33..845dfcc43a20e79b12904eae88399d8230b1e564 100644 (file)
 
 #include <net/bluetooth/bluetooth.h>
 #include <net/bluetooth/hci_core.h>
+#include <net/bluetooth/hci_sock.h>
 #include <net/bluetooth/l2cap.h>
 #include <net/bluetooth/mgmt.h>
 
 #include "hci_request.h"
 #include "smp.h"
+#include "mgmt_util.h"
 
 #define MGMT_VERSION   1
-#define MGMT_REVISION  8
+#define MGMT_REVISION  9
 
 static const u16 mgmt_commands[] = {
        MGMT_OP_READ_INDEX_LIST,
@@ -95,6 +97,11 @@ static const u16 mgmt_commands[] = {
        MGMT_OP_SET_EXTERNAL_CONFIG,
        MGMT_OP_SET_PUBLIC_ADDRESS,
        MGMT_OP_START_SERVICE_DISCOVERY,
+       MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
+       MGMT_OP_READ_EXT_INDEX_LIST,
+       MGMT_OP_READ_ADV_FEATURES,
+       MGMT_OP_ADD_ADVERTISING,
+       MGMT_OP_REMOVE_ADVERTISING,
 };
 
 static const u16 mgmt_events[] = {
@@ -127,6 +134,32 @@ static const u16 mgmt_events[] = {
        MGMT_EV_UNCONF_INDEX_ADDED,
        MGMT_EV_UNCONF_INDEX_REMOVED,
        MGMT_EV_NEW_CONFIG_OPTIONS,
+       MGMT_EV_EXT_INDEX_ADDED,
+       MGMT_EV_EXT_INDEX_REMOVED,
+       MGMT_EV_LOCAL_OOB_DATA_UPDATED,
+       MGMT_EV_ADVERTISING_ADDED,
+       MGMT_EV_ADVERTISING_REMOVED,
+};
+
+static const u16 mgmt_untrusted_commands[] = {
+       MGMT_OP_READ_INDEX_LIST,
+       MGMT_OP_READ_INFO,
+       MGMT_OP_READ_UNCONF_INDEX_LIST,
+       MGMT_OP_READ_CONFIG_INFO,
+       MGMT_OP_READ_EXT_INDEX_LIST,
+};
+
+static const u16 mgmt_untrusted_events[] = {
+       MGMT_EV_INDEX_ADDED,
+       MGMT_EV_INDEX_REMOVED,
+       MGMT_EV_NEW_SETTINGS,
+       MGMT_EV_CLASS_OF_DEV_CHANGED,
+       MGMT_EV_LOCAL_NAME_CHANGED,
+       MGMT_EV_UNCONF_INDEX_ADDED,
+       MGMT_EV_UNCONF_INDEX_REMOVED,
+       MGMT_EV_NEW_CONFIG_OPTIONS,
+       MGMT_EV_EXT_INDEX_ADDED,
+       MGMT_EV_EXT_INDEX_REMOVED,
 };
 
 #define CACHE_TIMEOUT  msecs_to_jiffies(2 * 1000)
@@ -134,17 +167,6 @@ static const u16 mgmt_events[] = {
 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
                 "\x00\x00\x00\x00\x00\x00\x00\x00"
 
-struct pending_cmd {
-       struct list_head list;
-       u16 opcode;
-       int index;
-       void *param;
-       size_t param_len;
-       struct sock *sk;
-       void *user_data;
-       int (*cmd_complete)(struct pending_cmd *cmd, u8 status);
-};
-
 /* HCI to MGMT error code conversion table */
 static u8 mgmt_status_table[] = {
        MGMT_STATUS_SUCCESS,
@@ -218,98 +240,32 @@ static u8 mgmt_status(u8 hci_status)
        return MGMT_STATUS_FAILED;
 }
 
-static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 data_len,
-                     struct sock *skip_sk)
+static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
+                           u16 len, int flag)
 {
-       struct sk_buff *skb;
-       struct mgmt_hdr *hdr;
-
-       skb = alloc_skb(sizeof(*hdr) + data_len, GFP_KERNEL);
-       if (!skb)
-               return -ENOMEM;
-
-       hdr = (void *) skb_put(skb, sizeof(*hdr));
-       hdr->opcode = cpu_to_le16(event);
-       if (hdev)
-               hdr->index = cpu_to_le16(hdev->id);
-       else
-               hdr->index = cpu_to_le16(MGMT_INDEX_NONE);
-       hdr->len = cpu_to_le16(data_len);
-
-       if (data)
-               memcpy(skb_put(skb, data_len), data, data_len);
-
-       /* Time stamp */
-       __net_timestamp(skb);
-
-       hci_send_to_control(skb, skip_sk);
-       kfree_skb(skb);
-
-       return 0;
+       return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
+                              flag, NULL);
 }
 
-static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
+static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
+                             u16 len, int flag, struct sock *skip_sk)
 {
-       struct sk_buff *skb;
-       struct mgmt_hdr *hdr;
-       struct mgmt_ev_cmd_status *ev;
-       int err;
-
-       BT_DBG("sock %p, index %u, cmd %u, status %u", sk, index, cmd, status);
-
-       skb = alloc_skb(sizeof(*hdr) + sizeof(*ev), GFP_KERNEL);
-       if (!skb)
-               return -ENOMEM;
-
-       hdr = (void *) skb_put(skb, sizeof(*hdr));
-
-       hdr->opcode = cpu_to_le16(MGMT_EV_CMD_STATUS);
-       hdr->index = cpu_to_le16(index);
-       hdr->len = cpu_to_le16(sizeof(*ev));
-
-       ev = (void *) skb_put(skb, sizeof(*ev));
-       ev->status = status;
-       ev->opcode = cpu_to_le16(cmd);
-
-       err = sock_queue_rcv_skb(sk, skb);
-       if (err < 0)
-               kfree_skb(skb);
-
-       return err;
+       return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
+                              flag, skip_sk);
 }
 
-static int cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status,
-                       void *rp, size_t rp_len)
+static int mgmt_generic_event(u16 event, struct hci_dev *hdev, void *data,
+                             u16 len, struct sock *skip_sk)
 {
-       struct sk_buff *skb;
-       struct mgmt_hdr *hdr;
-       struct mgmt_ev_cmd_complete *ev;
-       int err;
-
-       BT_DBG("sock %p", sk);
-
-       skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + rp_len, GFP_KERNEL);
-       if (!skb)
-               return -ENOMEM;
-
-       hdr = (void *) skb_put(skb, sizeof(*hdr));
-
-       hdr->opcode = cpu_to_le16(MGMT_EV_CMD_COMPLETE);
-       hdr->index = cpu_to_le16(index);
-       hdr->len = cpu_to_le16(sizeof(*ev) + rp_len);
-
-       ev = (void *) skb_put(skb, sizeof(*ev) + rp_len);
-       ev->opcode = cpu_to_le16(cmd);
-       ev->status = status;
-
-       if (rp)
-               memcpy(ev->data, rp, rp_len);
-
-       err = sock_queue_rcv_skb(sk, skb);
-       if (err < 0)
-               kfree_skb(skb);
+       return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
+                              HCI_MGMT_GENERIC_EVENTS, skip_sk);
+}
 
-       return err;
+static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
+                     struct sock *skip_sk)
+{
+       return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
+                              HCI_SOCK_TRUSTED, skip_sk);
 }
 
 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
@@ -322,22 +278,28 @@ static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
        rp.version = MGMT_VERSION;
        rp.revision = cpu_to_le16(MGMT_REVISION);
 
-       return cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0, &rp,
-                           sizeof(rp));
+       return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
+                                &rp, sizeof(rp));
 }
 
 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
                         u16 data_len)
 {
        struct mgmt_rp_read_commands *rp;
-       const u16 num_commands = ARRAY_SIZE(mgmt_commands);
-       const u16 num_events = ARRAY_SIZE(mgmt_events);
-       __le16 *opcode;
+       u16 num_commands, num_events;
        size_t rp_size;
        int i, err;
 
        BT_DBG("sock %p", sk);
 
+       if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
+               num_commands = ARRAY_SIZE(mgmt_commands);
+               num_events = ARRAY_SIZE(mgmt_events);
+       } else {
+               num_commands = ARRAY_SIZE(mgmt_untrusted_commands);
+               num_events = ARRAY_SIZE(mgmt_untrusted_events);
+       }
+
        rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
 
        rp = kmalloc(rp_size, GFP_KERNEL);
@@ -347,14 +309,26 @@ static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
        rp->num_commands = cpu_to_le16(num_commands);
        rp->num_events = cpu_to_le16(num_events);
 
-       for (i = 0, opcode = rp->opcodes; i < num_commands; i++, opcode++)
-               put_unaligned_le16(mgmt_commands[i], opcode);
+       if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
+               __le16 *opcode = rp->opcodes;
+
+               for (i = 0; i < num_commands; i++, opcode++)
+                       put_unaligned_le16(mgmt_commands[i], opcode);
+
+               for (i = 0; i < num_events; i++, opcode++)
+                       put_unaligned_le16(mgmt_events[i], opcode);
+       } else {
+               __le16 *opcode = rp->opcodes;
+
+               for (i = 0; i < num_commands; i++, opcode++)
+                       put_unaligned_le16(mgmt_untrusted_commands[i], opcode);
 
-       for (i = 0; i < num_events; i++, opcode++)
-               put_unaligned_le16(mgmt_events[i], opcode);
+               for (i = 0; i < num_events; i++, opcode++)
+                       put_unaligned_le16(mgmt_untrusted_events[i], opcode);
+       }
 
-       err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0, rp,
-                          rp_size);
+       err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
+                               rp, rp_size);
        kfree(rp);
 
        return err;
@@ -376,7 +350,7 @@ static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
        count = 0;
        list_for_each_entry(d, &hci_dev_list, list) {
                if (d->dev_type == HCI_BREDR &&
-                   !test_bit(HCI_UNCONFIGURED, &d->dev_flags))
+                   !hci_dev_test_flag(d, HCI_UNCONFIGURED))
                        count++;
        }
 
@@ -389,9 +363,9 @@ static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
 
        count = 0;
        list_for_each_entry(d, &hci_dev_list, list) {
-               if (test_bit(HCI_SETUP, &d->dev_flags) ||
-                   test_bit(HCI_CONFIG, &d->dev_flags) ||
-                   test_bit(HCI_USER_CHANNEL, &d->dev_flags))
+               if (hci_dev_test_flag(d, HCI_SETUP) ||
+                   hci_dev_test_flag(d, HCI_CONFIG) ||
+                   hci_dev_test_flag(d, HCI_USER_CHANNEL))
                        continue;
 
                /* Devices marked as raw-only are neither configured
@@ -401,7 +375,7 @@ static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
                        continue;
 
                if (d->dev_type == HCI_BREDR &&
-                   !test_bit(HCI_UNCONFIGURED, &d->dev_flags)) {
+                   !hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
                        rp->index[count++] = cpu_to_le16(d->id);
                        BT_DBG("Added hci%u", d->id);
                }
@@ -412,8 +386,8 @@ static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
 
        read_unlock(&hci_dev_list_lock);
 
-       err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST, 0, rp,
-                          rp_len);
+       err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
+                               0, rp, rp_len);
 
        kfree(rp);
 
@@ -436,7 +410,7 @@ static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
        count = 0;
        list_for_each_entry(d, &hci_dev_list, list) {
                if (d->dev_type == HCI_BREDR &&
-                   test_bit(HCI_UNCONFIGURED, &d->dev_flags))
+                   hci_dev_test_flag(d, HCI_UNCONFIGURED))
                        count++;
        }
 
@@ -449,9 +423,9 @@ static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
 
        count = 0;
        list_for_each_entry(d, &hci_dev_list, list) {
-               if (test_bit(HCI_SETUP, &d->dev_flags) ||
-                   test_bit(HCI_CONFIG, &d->dev_flags) ||
-                   test_bit(HCI_USER_CHANNEL, &d->dev_flags))
+               if (hci_dev_test_flag(d, HCI_SETUP) ||
+                   hci_dev_test_flag(d, HCI_CONFIG) ||
+                   hci_dev_test_flag(d, HCI_USER_CHANNEL))
                        continue;
 
                /* Devices marked as raw-only are neither configured
@@ -461,7 +435,7 @@ static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
                        continue;
 
                if (d->dev_type == HCI_BREDR &&
-                   test_bit(HCI_UNCONFIGURED, &d->dev_flags)) {
+                   hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
                        rp->index[count++] = cpu_to_le16(d->id);
                        BT_DBG("Added hci%u", d->id);
                }
@@ -472,8 +446,84 @@ static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
 
        read_unlock(&hci_dev_list_lock);
 
-       err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_UNCONF_INDEX_LIST,
-                          0, rp, rp_len);
+       err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
+                               MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
+
+       kfree(rp);
+
+       return err;
+}
+
+static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
+                              void *data, u16 data_len)
+{
+       struct mgmt_rp_read_ext_index_list *rp;
+       struct hci_dev *d;
+       size_t rp_len;
+       u16 count;
+       int err;
+
+       BT_DBG("sock %p", sk);
+
+       read_lock(&hci_dev_list_lock);
+
+       count = 0;
+       list_for_each_entry(d, &hci_dev_list, list) {
+               if (d->dev_type == HCI_BREDR || d->dev_type == HCI_AMP)
+                       count++;
+       }
+
+       rp_len = sizeof(*rp) + (sizeof(rp->entry[0]) * count);
+       rp = kmalloc(rp_len, GFP_ATOMIC);
+       if (!rp) {
+               read_unlock(&hci_dev_list_lock);
+               return -ENOMEM;
+       }
+
+       count = 0;
+       list_for_each_entry(d, &hci_dev_list, list) {
+               if (hci_dev_test_flag(d, HCI_SETUP) ||
+                   hci_dev_test_flag(d, HCI_CONFIG) ||
+                   hci_dev_test_flag(d, HCI_USER_CHANNEL))
+                       continue;
+
+               /* Devices marked as raw-only are neither configured
+                * nor unconfigured controllers.
+                */
+               if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
+                       continue;
+
+               if (d->dev_type == HCI_BREDR) {
+                       if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
+                               rp->entry[count].type = 0x01;
+                       else
+                               rp->entry[count].type = 0x00;
+               } else if (d->dev_type == HCI_AMP) {
+                       rp->entry[count].type = 0x02;
+               } else {
+                       continue;
+               }
+
+               rp->entry[count].bus = d->bus;
+               rp->entry[count++].index = cpu_to_le16(d->id);
+               BT_DBG("Added hci%u", d->id);
+       }
+
+       rp->num_controllers = cpu_to_le16(count);
+       rp_len = sizeof(*rp) + (sizeof(rp->entry[0]) * count);
+
+       read_unlock(&hci_dev_list_lock);
+
+       /* If this command is called at least once, then all the
+        * default index and unconfigured index events are disabled
+        * and from now on only extended index events are used.
+        */
+       hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
+       hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
+       hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
+
+       err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
+                               MGMT_OP_READ_EXT_INDEX_LIST, 0, rp, rp_len);
 
        kfree(rp);
 
@@ -483,7 +533,7 @@ static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
 static bool is_configured(struct hci_dev *hdev)
 {
        if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
-           !test_bit(HCI_EXT_CONFIGURED, &hdev->dev_flags))
+           !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
                return false;
 
        if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) &&
@@ -498,7 +548,7 @@ static __le32 get_missing_options(struct hci_dev *hdev)
        u32 options = 0;
 
        if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
-           !test_bit(HCI_EXT_CONFIGURED, &hdev->dev_flags))
+           !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
                options |= MGMT_OPTION_EXTERNAL_CONFIG;
 
        if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) &&
@@ -512,16 +562,16 @@ static int new_options(struct hci_dev *hdev, struct sock *skip)
 {
        __le32 options = get_missing_options(hdev);
 
-       return mgmt_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
-                         sizeof(options), skip);
+       return mgmt_generic_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
+                                 sizeof(options), skip);
 }
 
 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
 {
        __le32 options = get_missing_options(hdev);
 
-       return cmd_complete(sk, hdev->id, opcode, 0, &options,
-                           sizeof(options));
+       return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
+                                sizeof(options));
 }
 
 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
@@ -548,8 +598,8 @@ static int read_config_info(struct sock *sk, struct hci_dev *hdev,
 
        hci_dev_unlock(hdev);
 
-       return cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0, &rp,
-                           sizeof(rp));
+       return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
+                                &rp, sizeof(rp));
 }
 
 static u32 get_supported_settings(struct hci_dev *hdev)
@@ -582,6 +632,7 @@ static u32 get_supported_settings(struct hci_dev *hdev)
                settings |= MGMT_SETTING_ADVERTISING;
                settings |= MGMT_SETTING_SECURE_CONN;
                settings |= MGMT_SETTING_PRIVACY;
+               settings |= MGMT_SETTING_STATIC_ADDRESS;
        }
 
        if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
@@ -598,45 +649,64 @@ static u32 get_current_settings(struct hci_dev *hdev)
        if (hdev_is_powered(hdev))
                settings |= MGMT_SETTING_POWERED;
 
-       if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
+       if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
                settings |= MGMT_SETTING_CONNECTABLE;
 
-       if (test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
+       if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
                settings |= MGMT_SETTING_FAST_CONNECTABLE;
 
-       if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
+       if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
                settings |= MGMT_SETTING_DISCOVERABLE;
 
-       if (test_bit(HCI_BONDABLE, &hdev->dev_flags))
+       if (hci_dev_test_flag(hdev, HCI_BONDABLE))
                settings |= MGMT_SETTING_BONDABLE;
 
-       if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
+       if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
                settings |= MGMT_SETTING_BREDR;
 
-       if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
+       if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
                settings |= MGMT_SETTING_LE;
 
-       if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
+       if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
                settings |= MGMT_SETTING_LINK_SECURITY;
 
-       if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
+       if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
                settings |= MGMT_SETTING_SSP;
 
-       if (test_bit(HCI_HS_ENABLED, &hdev->dev_flags))
+       if (hci_dev_test_flag(hdev, HCI_HS_ENABLED))
                settings |= MGMT_SETTING_HS;
 
-       if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
+       if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
                settings |= MGMT_SETTING_ADVERTISING;
 
-       if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags))
+       if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
                settings |= MGMT_SETTING_SECURE_CONN;
 
-       if (test_bit(HCI_KEEP_DEBUG_KEYS, &hdev->dev_flags))
+       if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
                settings |= MGMT_SETTING_DEBUG_KEYS;
 
-       if (test_bit(HCI_PRIVACY, &hdev->dev_flags))
+       if (hci_dev_test_flag(hdev, HCI_PRIVACY))
                settings |= MGMT_SETTING_PRIVACY;
 
+       /* The current setting for static address has two purposes. The
+        * first is to indicate if the static address will be used and
+        * the second is to indicate if it is actually set.
+        *
+        * This means if the static address is not configured, this flag
+        * will never be set. If the address is configured, then if the
+        * address is actually used decides if the flag is set or not.
+        *
+        * For single mode LE only controllers and dual-mode controllers
+        * with BR/EDR disabled, the existence of the static address will
+        * be evaluated.
+        */
+       if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
+           !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
+           !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
+               if (bacmp(&hdev->static_addr, BDADDR_ANY))
+                       settings |= MGMT_SETTING_STATIC_ADDRESS;
+       }
+
        return settings;
 }
 
@@ -750,35 +820,19 @@ static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
        return ptr;
 }
 
-static struct pending_cmd *mgmt_pending_find(u16 opcode, struct hci_dev *hdev)
+static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
 {
-       struct pending_cmd *cmd;
-
-       list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
-               if (cmd->opcode == opcode)
-                       return cmd;
-       }
-
-       return NULL;
+       return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
 }
 
-static struct pending_cmd *mgmt_pending_find_data(u16 opcode,
+static struct mgmt_pending_cmd *pending_find_data(u16 opcode,
                                                  struct hci_dev *hdev,
                                                  const void *data)
 {
-       struct pending_cmd *cmd;
-
-       list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
-               if (cmd->user_data != data)
-                       continue;
-               if (cmd->opcode == opcode)
-                       return cmd;
-       }
-
-       return NULL;
+       return mgmt_pending_find_data(HCI_CHANNEL_CONTROL, opcode, hdev, data);
 }
 
-static u8 create_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
+static u8 create_default_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
 {
        u8 ad_len = 0;
        size_t name_len;
@@ -804,21 +858,36 @@ static u8 create_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
        return ad_len;
 }
 
-static void update_scan_rsp_data(struct hci_request *req)
+static u8 create_instance_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
+{
+       /* TODO: Set the appropriate entries based on advertising instance flags
+        * here once flags other than 0 are supported.
+        */
+       memcpy(ptr, hdev->adv_instance.scan_rsp_data,
+              hdev->adv_instance.scan_rsp_len);
+
+       return hdev->adv_instance.scan_rsp_len;
+}
+
+static void update_scan_rsp_data_for_instance(struct hci_request *req,
+                                             u8 instance)
 {
        struct hci_dev *hdev = req->hdev;
        struct hci_cp_le_set_scan_rsp_data cp;
        u8 len;
 
-       if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
+       if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
                return;
 
        memset(&cp, 0, sizeof(cp));
 
-       len = create_scan_rsp_data(hdev, cp.data);
+       if (instance)
+               len = create_instance_scan_rsp_data(hdev, cp.data);
+       else
+               len = create_default_scan_rsp_data(hdev, cp.data);
 
        if (hdev->scan_rsp_data_len == len &&
-           memcmp(cp.data, hdev->scan_rsp_data, len) == 0)
+           !memcmp(cp.data, hdev->scan_rsp_data, len))
                return;
 
        memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
@@ -829,14 +898,33 @@ static void update_scan_rsp_data(struct hci_request *req)
        hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
 }
 
+static void update_scan_rsp_data(struct hci_request *req)
+{
+       struct hci_dev *hdev = req->hdev;
+       u8 instance;
+
+       /* The "Set Advertising" setting supersedes the "Add Advertising"
+        * setting. Here we set the scan response data based on which
+        * setting was set. When neither apply, default to the global settings,
+        * represented by instance "0".
+        */
+       if (hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE) &&
+           !hci_dev_test_flag(hdev, HCI_ADVERTISING))
+               instance = 0x01;
+       else
+               instance = 0x00;
+
+       update_scan_rsp_data_for_instance(req, instance);
+}
+
 static u8 get_adv_discov_flags(struct hci_dev *hdev)
 {
-       struct pending_cmd *cmd;
+       struct mgmt_pending_cmd *cmd;
 
        /* If there's a pending mgmt command the flags will not yet have
         * their final values, so check for this first.
         */
-       cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
+       cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
        if (cmd) {
                struct mgmt_mode *cp = cmd->param;
                if (cp->val == 0x01)
@@ -844,39 +932,131 @@ static u8 get_adv_discov_flags(struct hci_dev *hdev)
                else if (cp->val == 0x02)
                        return LE_AD_LIMITED;
        } else {
-               if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
+               if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
                        return LE_AD_LIMITED;
-               else if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
+               else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
                        return LE_AD_GENERAL;
        }
 
        return 0;
 }
 
-static u8 create_adv_data(struct hci_dev *hdev, u8 *ptr)
+static u8 get_current_adv_instance(struct hci_dev *hdev)
+{
+       /* The "Set Advertising" setting supersedes the "Add Advertising"
+        * setting. Here we set the advertising data based on which
+        * setting was set. When neither apply, default to the global settings,
+        * represented by instance "0".
+        */
+       if (hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE) &&
+           !hci_dev_test_flag(hdev, HCI_ADVERTISING))
+               return 0x01;
+
+       return 0x00;
+}
+
+static bool get_connectable(struct hci_dev *hdev)
+{
+       struct mgmt_pending_cmd *cmd;
+
+       /* If there's a pending mgmt command the flag will not yet have
+        * it's final value, so check for this first.
+        */
+       cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
+       if (cmd) {
+               struct mgmt_mode *cp = cmd->param;
+
+               return cp->val;
+       }
+
+       return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
+}
+
+static u32 get_adv_instance_flags(struct hci_dev *hdev, u8 instance)
+{
+       u32 flags;
+
+       if (instance > 0x01)
+               return 0;
+
+       if (instance == 0x01)
+               return hdev->adv_instance.flags;
+
+       /* Instance 0 always manages the "Tx Power" and "Flags" fields */
+       flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
+
+       /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting corresponds
+        * to the "connectable" instance flag.
+        */
+       if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
+               flags |= MGMT_ADV_FLAG_CONNECTABLE;
+
+       return flags;
+}
+
+static u8 get_adv_instance_scan_rsp_len(struct hci_dev *hdev, u8 instance)
+{
+       /* Ignore instance 0 and other unsupported instances */
+       if (instance != 0x01)
+               return 0;
+
+       /* TODO: Take into account the "appearance" and "local-name" flags here.
+        * These are currently being ignored as they are not supported.
+        */
+       return hdev->adv_instance.scan_rsp_len;
+}
+
+static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr)
 {
        u8 ad_len = 0, flags = 0;
+       u32 instance_flags = get_adv_instance_flags(hdev, instance);
 
-       flags |= get_adv_discov_flags(hdev);
+       /* The Add Advertising command allows userspace to set both the general
+        * and limited discoverable flags.
+        */
+       if (instance_flags & MGMT_ADV_FLAG_DISCOV)
+               flags |= LE_AD_GENERAL;
 
-       if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
-               flags |= LE_AD_NO_BREDR;
+       if (instance_flags & MGMT_ADV_FLAG_LIMITED_DISCOV)
+               flags |= LE_AD_LIMITED;
 
-       if (flags) {
-               BT_DBG("adv flags 0x%02x", flags);
+       if (flags || (instance_flags & MGMT_ADV_FLAG_MANAGED_FLAGS)) {
+               /* If a discovery flag wasn't provided, simply use the global
+                * settings.
+                */
+               if (!flags)
+                       flags |= get_adv_discov_flags(hdev);
 
-               ptr[0] = 2;
-               ptr[1] = EIR_FLAGS;
-               ptr[2] = flags;
+               if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
+                       flags |= LE_AD_NO_BREDR;
 
-               ad_len += 3;
-               ptr += 3;
+               /* If flags would still be empty, then there is no need to
+                * include the "Flags" AD field".
+                */
+               if (flags) {
+                       ptr[0] = 0x02;
+                       ptr[1] = EIR_FLAGS;
+                       ptr[2] = flags;
+
+                       ad_len += 3;
+                       ptr += 3;
+               }
        }
 
-       if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
-               ptr[0] = 2;
+       if (instance) {
+               memcpy(ptr, hdev->adv_instance.adv_data,
+                      hdev->adv_instance.adv_data_len);
+
+               ad_len += hdev->adv_instance.adv_data_len;
+               ptr += hdev->adv_instance.adv_data_len;
+       }
+
+       /* Provide Tx Power only if we can provide a valid value for it */
+       if (hdev->adv_tx_power != HCI_TX_POWER_INVALID &&
+           (instance_flags & MGMT_ADV_FLAG_TX_POWER)) {
+               ptr[0] = 0x02;
                ptr[1] = EIR_TX_POWER;
-               ptr[2] = (u8) hdev->adv_tx_power;
+               ptr[2] = (u8)hdev->adv_tx_power;
 
                ad_len += 3;
                ptr += 3;
@@ -885,19 +1065,20 @@ static u8 create_adv_data(struct hci_dev *hdev, u8 *ptr)
        return ad_len;
 }
 
-static void update_adv_data(struct hci_request *req)
+static void update_adv_data_for_instance(struct hci_request *req, u8 instance)
 {
        struct hci_dev *hdev = req->hdev;
        struct hci_cp_le_set_adv_data cp;
        u8 len;
 
-       if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
+       if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
                return;
 
        memset(&cp, 0, sizeof(cp));
 
-       len = create_adv_data(hdev, cp.data);
+       len = create_instance_adv_data(hdev, instance, cp.data);
 
+       /* There's nothing to do if the data hasn't changed */
        if (hdev->adv_data_len == len &&
            memcmp(cp.data, hdev->adv_data, len) == 0)
                return;
@@ -910,6 +1091,14 @@ static void update_adv_data(struct hci_request *req)
        hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
 }
 
+static void update_adv_data(struct hci_request *req)
+{
+       struct hci_dev *hdev = req->hdev;
+       u8 instance = get_current_adv_instance(hdev);
+
+       update_adv_data_for_instance(req, instance);
+}
+
 int mgmt_update_adv_data(struct hci_dev *hdev)
 {
        struct hci_request req;
@@ -979,10 +1168,10 @@ static void update_eir(struct hci_request *req)
        if (!lmp_ext_inq_capable(hdev))
                return;
 
-       if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
+       if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
                return;
 
-       if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
+       if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
                return;
 
        memset(&cp, 0, sizeof(cp));
@@ -1018,17 +1207,17 @@ static void update_class(struct hci_request *req)
        if (!hdev_is_powered(hdev))
                return;
 
-       if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
+       if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
                return;
 
-       if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
+       if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
                return;
 
        cod[0] = hdev->minor_class;
        cod[1] = hdev->major_class;
        cod[2] = get_service_classes(hdev);
 
-       if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
+       if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
                cod[1] |= 0x20;
 
        if (memcmp(cod, hdev->dev_class, 3) == 0)
@@ -1037,22 +1226,6 @@ static void update_class(struct hci_request *req)
        hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
 }
 
-static bool get_connectable(struct hci_dev *hdev)
-{
-       struct pending_cmd *cmd;
-
-       /* If there's a pending mgmt command the flag will not yet have
-        * it's final value, so check for this first.
-        */
-       cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
-       if (cmd) {
-               struct mgmt_mode *cp = cmd->param;
-               return cp->val;
-       }
-
-       return test_bit(HCI_CONNECTABLE, &hdev->dev_flags);
-}
-
 static void disable_advertising(struct hci_request *req)
 {
        u8 enable = 0x00;
@@ -1066,11 +1239,13 @@ static void enable_advertising(struct hci_request *req)
        struct hci_cp_le_set_adv_param cp;
        u8 own_addr_type, enable = 0x01;
        bool connectable;
+       u8 instance;
+       u32 flags;
 
        if (hci_conn_num(hdev, LE_LINK) > 0)
                return;
 
-       if (test_bit(HCI_LE_ADV, &hdev->dev_flags))
+       if (hci_dev_test_flag(hdev, HCI_LE_ADV))
                disable_advertising(req);
 
        /* Clear the HCI_LE_ADV bit temporarily so that the
@@ -1078,9 +1253,16 @@ static void enable_advertising(struct hci_request *req)
         * and write a new random address. The flag will be set back on
         * as soon as the SET_ADV_ENABLE HCI command completes.
         */
-       clear_bit(HCI_LE_ADV, &hdev->dev_flags);
+       hci_dev_clear_flag(hdev, HCI_LE_ADV);
+
+       instance = get_current_adv_instance(hdev);
+       flags = get_adv_instance_flags(hdev, instance);
 
-       connectable = get_connectable(hdev);
+       /* If the "connectable" instance flag was not set, then choose between
+        * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
+        */
+       connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
+                     get_connectable(hdev);
 
        /* Set require_privacy to true only when non-connectable
         * advertising is used. In that case it is fine to use a
@@ -1092,7 +1274,14 @@ static void enable_advertising(struct hci_request *req)
        memset(&cp, 0, sizeof(cp));
        cp.min_interval = cpu_to_le16(hdev->le_adv_min_interval);
        cp.max_interval = cpu_to_le16(hdev->le_adv_max_interval);
-       cp.type = connectable ? LE_ADV_IND : LE_ADV_NONCONN_IND;
+
+       if (connectable)
+               cp.type = LE_ADV_IND;
+       else if (get_adv_instance_scan_rsp_len(hdev, instance))
+               cp.type = LE_ADV_SCAN_IND;
+       else
+               cp.type = LE_ADV_NONCONN_IND;
+
        cp.own_address_type = own_addr_type;
        cp.channel_map = hdev->le_adv_channel_map;
 
@@ -1107,7 +1296,7 @@ static void service_cache_off(struct work_struct *work)
                                            service_cache.work);
        struct hci_request req;
 
-       if (!test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
+       if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
                return;
 
        hci_req_init(&req, hdev);
@@ -1130,9 +1319,9 @@ static void rpa_expired(struct work_struct *work)
 
        BT_DBG("");
 
-       set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
+       hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
 
-       if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags))
+       if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
                return;
 
        /* The generation of a new RPA and programming it into the
@@ -1145,7 +1334,7 @@ static void rpa_expired(struct work_struct *work)
 
 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
 {
-       if (test_and_set_bit(HCI_MGMT, &hdev->dev_flags))
+       if (hci_dev_test_and_set_flag(hdev, HCI_MGMT))
                return;
 
        INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
@@ -1156,7 +1345,7 @@ static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
         * for mgmt we require user-space to explicitly enable
         * it
         */
-       clear_bit(HCI_BONDABLE, &hdev->dev_flags);
+       hci_dev_clear_flag(hdev, HCI_BONDABLE);
 }
 
 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
@@ -1185,73 +1374,16 @@ static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
 
        hci_dev_unlock(hdev);
 
-       return cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
-                           sizeof(rp));
-}
-
-static void mgmt_pending_free(struct pending_cmd *cmd)
-{
-       sock_put(cmd->sk);
-       kfree(cmd->param);
-       kfree(cmd);
-}
-
-static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
-                                           struct hci_dev *hdev, void *data,
-                                           u16 len)
-{
-       struct pending_cmd *cmd;
-
-       cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
-       if (!cmd)
-               return NULL;
-
-       cmd->opcode = opcode;
-       cmd->index = hdev->id;
-
-       cmd->param = kmemdup(data, len, GFP_KERNEL);
-       if (!cmd->param) {
-               kfree(cmd);
-               return NULL;
-       }
-
-       cmd->param_len = len;
-
-       cmd->sk = sk;
-       sock_hold(sk);
-
-       list_add(&cmd->list, &hdev->mgmt_pending);
-
-       return cmd;
-}
-
-static void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev,
-                                void (*cb)(struct pending_cmd *cmd,
-                                           void *data),
-                                void *data)
-{
-       struct pending_cmd *cmd, *tmp;
-
-       list_for_each_entry_safe(cmd, tmp, &hdev->mgmt_pending, list) {
-               if (opcode > 0 && cmd->opcode != opcode)
-                       continue;
-
-               cb(cmd, data);
-       }
-}
-
-static void mgmt_pending_remove(struct pending_cmd *cmd)
-{
-       list_del(&cmd->list);
-       mgmt_pending_free(cmd);
+       return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
+                                sizeof(rp));
 }
 
 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
 {
        __le32 settings = cpu_to_le32(get_current_settings(hdev));
 
-       return cmd_complete(sk, hdev->id, opcode, 0, &settings,
-                           sizeof(settings));
+       return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
+                                sizeof(settings));
 }
 
 static void clean_up_hci_complete(struct hci_dev *hdev, u8 status, u16 opcode)
@@ -1272,9 +1404,10 @@ static bool hci_stop_discovery(struct hci_request *req)
 
        switch (hdev->discovery.state) {
        case DISCOVERY_FINDING:
-               if (test_bit(HCI_INQUIRY, &hdev->flags)) {
+               if (test_bit(HCI_INQUIRY, &hdev->flags))
                        hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
-               } else {
+
+               if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
                        cancel_delayed_work(&hdev->le_scan_disable);
                        hci_req_add_le_scan_disable(req);
                }
@@ -1295,7 +1428,7 @@ static bool hci_stop_discovery(struct hci_request *req)
 
        default:
                /* Passive scanning */
-               if (test_bit(HCI_LE_SCAN, &hdev->dev_flags)) {
+               if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
                        hci_req_add_le_scan_disable(req);
                        return true;
                }
@@ -1306,22 +1439,68 @@ static bool hci_stop_discovery(struct hci_request *req)
        return false;
 }
 
-static int clean_up_hci_state(struct hci_dev *hdev)
+static void advertising_added(struct sock *sk, struct hci_dev *hdev,
+                             u8 instance)
 {
-       struct hci_request req;
-       struct hci_conn *conn;
-       bool discov_stopped;
-       int err;
+       struct mgmt_ev_advertising_added ev;
 
-       hci_req_init(&req, hdev);
+       ev.instance = instance;
 
-       if (test_bit(HCI_ISCAN, &hdev->flags) ||
-           test_bit(HCI_PSCAN, &hdev->flags)) {
-               u8 scan = 0x00;
+       mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
+}
+
+static void advertising_removed(struct sock *sk, struct hci_dev *hdev,
+                               u8 instance)
+{
+       struct mgmt_ev_advertising_removed ev;
+
+       ev.instance = instance;
+
+       mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
+}
+
+static void clear_adv_instance(struct hci_dev *hdev)
+{
+       struct hci_request req;
+
+       if (!hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE))
+               return;
+
+       if (hdev->adv_instance.timeout)
+               cancel_delayed_work(&hdev->adv_instance.timeout_exp);
+
+       memset(&hdev->adv_instance, 0, sizeof(hdev->adv_instance));
+       advertising_removed(NULL, hdev, 1);
+       hci_dev_clear_flag(hdev, HCI_ADVERTISING_INSTANCE);
+
+       if (!hdev_is_powered(hdev) ||
+           hci_dev_test_flag(hdev, HCI_ADVERTISING))
+               return;
+
+       hci_req_init(&req, hdev);
+       disable_advertising(&req);
+       hci_req_run(&req, NULL);
+}
+
+static int clean_up_hci_state(struct hci_dev *hdev)
+{
+       struct hci_request req;
+       struct hci_conn *conn;
+       bool discov_stopped;
+       int err;
+
+       hci_req_init(&req, hdev);
+
+       if (test_bit(HCI_ISCAN, &hdev->flags) ||
+           test_bit(HCI_PSCAN, &hdev->flags)) {
+               u8 scan = 0x00;
                hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
        }
 
-       if (test_bit(HCI_LE_ADV, &hdev->dev_flags))
+       if (hdev->adv_instance.timeout)
+               clear_adv_instance(hdev);
+
+       if (hci_dev_test_flag(hdev, HCI_LE_ADV))
                disable_advertising(&req);
 
        discov_stopped = hci_stop_discovery(&req);
@@ -1369,24 +1548,24 @@ static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
                       u16 len)
 {
        struct mgmt_mode *cp = data;
-       struct pending_cmd *cmd;
+       struct mgmt_pending_cmd *cmd;
        int err;
 
        BT_DBG("request for %s", hdev->name);
 
        if (cp->val != 0x00 && cp->val != 0x01)
-               return cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
-                                 MGMT_STATUS_INVALID_PARAMS);
+               return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
+                                      MGMT_STATUS_INVALID_PARAMS);
 
        hci_dev_lock(hdev);
 
-       if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev)) {
-               err = cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
-                                MGMT_STATUS_BUSY);
+       if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
+               err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
+                                     MGMT_STATUS_BUSY);
                goto failed;
        }
 
-       if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
+       if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
                cancel_delayed_work(&hdev->power_off);
 
                if (cp->val) {
@@ -1433,11 +1612,10 @@ failed:
 
 static int new_settings(struct hci_dev *hdev, struct sock *skip)
 {
-       __le32 ev;
+       __le32 ev = cpu_to_le32(get_current_settings(hdev));
 
-       ev = cpu_to_le32(get_current_settings(hdev));
-
-       return mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev), skip);
+       return mgmt_generic_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
+                                 sizeof(ev), skip);
 }
 
 int mgmt_new_settings(struct hci_dev *hdev)
@@ -1451,7 +1629,7 @@ struct cmd_lookup {
        u8 mgmt_status;
 };
 
-static void settings_rsp(struct pending_cmd *cmd, void *data)
+static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
 {
        struct cmd_lookup *match = data;
 
@@ -1467,15 +1645,15 @@ static void settings_rsp(struct pending_cmd *cmd, void *data)
        mgmt_pending_free(cmd);
 }
 
-static void cmd_status_rsp(struct pending_cmd *cmd, void *data)
+static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
 {
        u8 *status = data;
 
-       cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
+       mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
        mgmt_pending_remove(cmd);
 }
 
-static void cmd_complete_rsp(struct pending_cmd *cmd, void *data)
+static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
 {
        if (cmd->cmd_complete) {
                u8 *status = data;
@@ -1489,23 +1667,23 @@ static void cmd_complete_rsp(struct pending_cmd *cmd, void *data)
        cmd_status_rsp(cmd, data);
 }
 
-static int generic_cmd_complete(struct pending_cmd *cmd, u8 status)
+static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
 {
-       return cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
-                           cmd->param, cmd->param_len);
+       return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
+                                cmd->param, cmd->param_len);
 }
 
-static int addr_cmd_complete(struct pending_cmd *cmd, u8 status)
+static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
 {
-       return cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, cmd->param,
-                           sizeof(struct mgmt_addr_info));
+       return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
+                                cmd->param, sizeof(struct mgmt_addr_info));
 }
 
 static u8 mgmt_bredr_support(struct hci_dev *hdev)
 {
        if (!lmp_bredr_capable(hdev))
                return MGMT_STATUS_NOT_SUPPORTED;
-       else if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
+       else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
                return MGMT_STATUS_REJECTED;
        else
                return MGMT_STATUS_SUCCESS;
@@ -1515,7 +1693,7 @@ static u8 mgmt_le_support(struct hci_dev *hdev)
 {
        if (!lmp_le_capable(hdev))
                return MGMT_STATUS_NOT_SUPPORTED;
-       else if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
+       else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
                return MGMT_STATUS_REJECTED;
        else
                return MGMT_STATUS_SUCCESS;
@@ -1524,7 +1702,7 @@ static u8 mgmt_le_support(struct hci_dev *hdev)
 static void set_discoverable_complete(struct hci_dev *hdev, u8 status,
                                      u16 opcode)
 {
-       struct pending_cmd *cmd;
+       struct mgmt_pending_cmd *cmd;
        struct mgmt_mode *cp;
        struct hci_request req;
        bool changed;
@@ -1533,21 +1711,20 @@ static void set_discoverable_complete(struct hci_dev *hdev, u8 status,
 
        hci_dev_lock(hdev);
 
-       cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
+       cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
        if (!cmd)
                goto unlock;
 
        if (status) {
                u8 mgmt_err = mgmt_status(status);
-               cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
-               clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
+               mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
+               hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
                goto remove_cmd;
        }
 
        cp = cmd->param;
        if (cp->val) {
-               changed = !test_and_set_bit(HCI_DISCOVERABLE,
-                                           &hdev->dev_flags);
+               changed = !hci_dev_test_and_set_flag(hdev, HCI_DISCOVERABLE);
 
                if (hdev->discov_timeout > 0) {
                        int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
@@ -1555,8 +1732,7 @@ static void set_discoverable_complete(struct hci_dev *hdev, u8 status,
                                           to);
                }
        } else {
-               changed = test_and_clear_bit(HCI_DISCOVERABLE,
-                                            &hdev->dev_flags);
+               changed = hci_dev_test_and_clear_flag(hdev, HCI_DISCOVERABLE);
        }
 
        send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
@@ -1585,7 +1761,7 @@ static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
                            u16 len)
 {
        struct mgmt_cp_set_discoverable *cp = data;
-       struct pending_cmd *cmd;
+       struct mgmt_pending_cmd *cmd;
        struct hci_request req;
        u16 timeout;
        u8 scan;
@@ -1593,14 +1769,14 @@ static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
 
        BT_DBG("request for %s", hdev->name);
 
-       if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
-           !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
-               return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
-                                 MGMT_STATUS_REJECTED);
+       if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
+           !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
+               return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
+                                      MGMT_STATUS_REJECTED);
 
        if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
-               return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
-                                 MGMT_STATUS_INVALID_PARAMS);
+               return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
+                                      MGMT_STATUS_INVALID_PARAMS);
 
        timeout = __le16_to_cpu(cp->timeout);
 
@@ -1609,27 +1785,27 @@ static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
         */
        if ((cp->val == 0x00 && timeout > 0) ||
            (cp->val == 0x02 && timeout == 0))
-               return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
-                                 MGMT_STATUS_INVALID_PARAMS);
+               return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
+                                      MGMT_STATUS_INVALID_PARAMS);
 
        hci_dev_lock(hdev);
 
        if (!hdev_is_powered(hdev) && timeout > 0) {
-               err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
-                                MGMT_STATUS_NOT_POWERED);
+               err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
+                                     MGMT_STATUS_NOT_POWERED);
                goto failed;
        }
 
-       if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
-           mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
-               err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
-                                MGMT_STATUS_BUSY);
+       if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
+           pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
+               err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
+                                     MGMT_STATUS_BUSY);
                goto failed;
        }
 
-       if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags)) {
-               err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
-                                MGMT_STATUS_REJECTED);
+       if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
+               err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
+                                     MGMT_STATUS_REJECTED);
                goto failed;
        }
 
@@ -1640,8 +1816,8 @@ static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
                 * not a valid operation since it requires a timeout
                 * and so no need to check HCI_LIMITED_DISCOVERABLE.
                 */
-               if (!!cp->val != test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) {
-                       change_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
+               if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
+                       hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
                        changed = true;
                }
 
@@ -1659,9 +1835,9 @@ static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
         * value with the new value. And if only the timeout gets updated,
         * then no need for any HCI transactions.
         */
-       if (!!cp->val == test_bit(HCI_DISCOVERABLE, &hdev->dev_flags) &&
-           (cp->val == 0x02) == test_bit(HCI_LIMITED_DISCOVERABLE,
-                                         &hdev->dev_flags)) {
+       if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
+           (cp->val == 0x02) == hci_dev_test_flag(hdev,
+                                                  HCI_LIMITED_DISCOVERABLE)) {
                cancel_delayed_work(&hdev->discov_off);
                hdev->discov_timeout = timeout;
 
@@ -1690,16 +1866,16 @@ static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
 
        /* Limited discoverable mode */
        if (cp->val == 0x02)
-               set_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
+               hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
        else
-               clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
+               hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
 
        hci_req_init(&req, hdev);
 
        /* The procedure for LE-only controllers is much simpler - just
         * update the advertising data.
         */
-       if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
+       if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
                goto update_ad;
 
        scan = SCAN_PAGE;
@@ -1729,7 +1905,7 @@ static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
 
                scan |= SCAN_INQUIRY;
        } else {
-               clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
+               hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
        }
 
        hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
@@ -1752,7 +1928,7 @@ static void write_fast_connectable(struct hci_request *req, bool enable)
        struct hci_cp_write_page_scan_activity acp;
        u8 type;
 
-       if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
+       if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
                return;
 
        if (hdev->hci_ver < BLUETOOTH_VER_1_2)
@@ -1784,7 +1960,7 @@ static void write_fast_connectable(struct hci_request *req, bool enable)
 static void set_connectable_complete(struct hci_dev *hdev, u8 status,
                                     u16 opcode)
 {
-       struct pending_cmd *cmd;
+       struct mgmt_pending_cmd *cmd;
        struct mgmt_mode *cp;
        bool conn_changed, discov_changed;
 
@@ -1792,26 +1968,26 @@ static void set_connectable_complete(struct hci_dev *hdev, u8 status,
 
        hci_dev_lock(hdev);
 
-       cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
+       cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
        if (!cmd)
                goto unlock;
 
        if (status) {
                u8 mgmt_err = mgmt_status(status);
-               cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
+               mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
                goto remove_cmd;
        }
 
        cp = cmd->param;
        if (cp->val) {
-               conn_changed = !test_and_set_bit(HCI_CONNECTABLE,
-                                                &hdev->dev_flags);
+               conn_changed = !hci_dev_test_and_set_flag(hdev,
+                                                         HCI_CONNECTABLE);
                discov_changed = false;
        } else {
-               conn_changed = test_and_clear_bit(HCI_CONNECTABLE,
-                                                 &hdev->dev_flags);
-               discov_changed = test_and_clear_bit(HCI_DISCOVERABLE,
-                                                   &hdev->dev_flags);
+               conn_changed = hci_dev_test_and_clear_flag(hdev,
+                                                          HCI_CONNECTABLE);
+               discov_changed = hci_dev_test_and_clear_flag(hdev,
+                                                            HCI_DISCOVERABLE);
        }
 
        send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
@@ -1837,14 +2013,14 @@ static int set_connectable_update_settings(struct hci_dev *hdev,
        bool changed = false;
        int err;
 
-       if (!!val != test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
+       if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
                changed = true;
 
        if (val) {
-               set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
+               hci_dev_set_flag(hdev, HCI_CONNECTABLE);
        } else {
-               clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
-               clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
+               hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
+               hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
        }
 
        err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
@@ -1864,21 +2040,21 @@ static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
                           u16 len)
 {
        struct mgmt_mode *cp = data;
-       struct pending_cmd *cmd;
+       struct mgmt_pending_cmd *cmd;
        struct hci_request req;
        u8 scan;
        int err;
 
        BT_DBG("request for %s", hdev->name);
 
-       if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
-           !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
-               return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
-                                 MGMT_STATUS_REJECTED);
+       if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
+           !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
+               return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
+                                      MGMT_STATUS_REJECTED);
 
        if (cp->val != 0x00 && cp->val != 0x01)
-               return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
-                                 MGMT_STATUS_INVALID_PARAMS);
+               return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
+                                      MGMT_STATUS_INVALID_PARAMS);
 
        hci_dev_lock(hdev);
 
@@ -1887,10 +2063,10 @@ static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
                goto failed;
        }
 
-       if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
-           mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
-               err = cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
-                                MGMT_STATUS_BUSY);
+       if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
+           pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
+               err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
+                                     MGMT_STATUS_BUSY);
                goto failed;
        }
 
@@ -1906,10 +2082,10 @@ static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
         * by-product of disabling connectable, we need to update the
         * advertising flags.
         */
-       if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
+       if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
                if (!cp->val) {
-                       clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
-                       clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
+                       hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
+                       hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
                }
                update_adv_data(&req);
        } else if (cp->val != test_bit(HCI_PSCAN, &hdev->flags)) {
@@ -1938,17 +2114,9 @@ static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
        }
 
 no_scan_update:
-       /* If we're going from non-connectable to connectable or
-        * vice-versa when fast connectable is enabled ensure that fast
-        * connectable gets disabled. write_fast_connectable won't do
-        * anything if the page scan parameters are already what they
-        * should be.
-        */
-       if (cp->val || test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
-               write_fast_connectable(&req, false);
-
        /* Update the advertising parameters if necessary */
-       if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
+       if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
+           hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE))
                enable_advertising(&req);
 
        err = hci_req_run(&req, set_connectable_complete);
@@ -1975,15 +2143,15 @@ static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
        BT_DBG("request for %s", hdev->name);
 
        if (cp->val != 0x00 && cp->val != 0x01)
-               return cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
-                                 MGMT_STATUS_INVALID_PARAMS);
+               return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
+                                      MGMT_STATUS_INVALID_PARAMS);
 
        hci_dev_lock(hdev);
 
        if (cp->val)
-               changed = !test_and_set_bit(HCI_BONDABLE, &hdev->dev_flags);
+               changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
        else
-               changed = test_and_clear_bit(HCI_BONDABLE, &hdev->dev_flags);
+               changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
 
        err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
        if (err < 0)
@@ -2001,7 +2169,7 @@ static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
                             u16 len)
 {
        struct mgmt_mode *cp = data;
-       struct pending_cmd *cmd;
+       struct mgmt_pending_cmd *cmd;
        u8 val, status;
        int err;
 
@@ -2009,21 +2177,20 @@ static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
 
        status = mgmt_bredr_support(hdev);
        if (status)
-               return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
-                                 status);
+               return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
+                                      status);
 
        if (cp->val != 0x00 && cp->val != 0x01)
-               return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
-                                 MGMT_STATUS_INVALID_PARAMS);
+               return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
+                                      MGMT_STATUS_INVALID_PARAMS);
 
        hci_dev_lock(hdev);
 
        if (!hdev_is_powered(hdev)) {
                bool changed = false;
 
-               if (!!cp->val != test_bit(HCI_LINK_SECURITY,
-                                         &hdev->dev_flags)) {
-                       change_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
+               if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
+                       hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
                        changed = true;
                }
 
@@ -2037,9 +2204,9 @@ static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
                goto failed;
        }
 
-       if (mgmt_pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
-               err = cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
-                                MGMT_STATUS_BUSY);
+       if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
+               err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
+                                     MGMT_STATUS_BUSY);
                goto failed;
        }
 
@@ -2070,7 +2237,7 @@ failed:
 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
 {
        struct mgmt_mode *cp = data;
-       struct pending_cmd *cmd;
+       struct mgmt_pending_cmd *cmd;
        u8 status;
        int err;
 
@@ -2078,15 +2245,15 @@ static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
 
        status = mgmt_bredr_support(hdev);
        if (status)
-               return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
+               return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
 
        if (!lmp_ssp_capable(hdev))
-               return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
-                                 MGMT_STATUS_NOT_SUPPORTED);
+               return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
+                                      MGMT_STATUS_NOT_SUPPORTED);
 
        if (cp->val != 0x00 && cp->val != 0x01)
-               return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
-                                 MGMT_STATUS_INVALID_PARAMS);
+               return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
+                                      MGMT_STATUS_INVALID_PARAMS);
 
        hci_dev_lock(hdev);
 
@@ -2094,16 +2261,16 @@ static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
                bool changed;
 
                if (cp->val) {
-                       changed = !test_and_set_bit(HCI_SSP_ENABLED,
-                                                   &hdev->dev_flags);
+                       changed = !hci_dev_test_and_set_flag(hdev,
+                                                            HCI_SSP_ENABLED);
                } else {
-                       changed = test_and_clear_bit(HCI_SSP_ENABLED,
-                                                    &hdev->dev_flags);
+                       changed = hci_dev_test_and_clear_flag(hdev,
+                                                             HCI_SSP_ENABLED);
                        if (!changed)
-                               changed = test_and_clear_bit(HCI_HS_ENABLED,
-                                                            &hdev->dev_flags);
+                               changed = hci_dev_test_and_clear_flag(hdev,
+                                                                     HCI_HS_ENABLED);
                        else
-                               clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
+                               hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
                }
 
                err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
@@ -2116,14 +2283,13 @@ static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
                goto failed;
        }
 
-       if (mgmt_pending_find(MGMT_OP_SET_SSP, hdev) ||
-           mgmt_pending_find(MGMT_OP_SET_HS, hdev)) {
-               err = cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
-                                MGMT_STATUS_BUSY);
+       if (pending_find(MGMT_OP_SET_SSP, hdev)) {
+               err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
+                                     MGMT_STATUS_BUSY);
                goto failed;
        }
 
-       if (!!cp->val == test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
+       if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
                err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
                goto failed;
        }
@@ -2134,7 +2300,7 @@ static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
                goto failed;
        }
 
-       if (!cp->val && test_bit(HCI_USE_DEBUG_KEYS, &hdev->dev_flags))
+       if (!cp->val && hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
                hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
                             sizeof(cp->val), &cp->val);
 
@@ -2160,32 +2326,38 @@ static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
 
        status = mgmt_bredr_support(hdev);
        if (status)
-               return cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
+               return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
 
        if (!lmp_ssp_capable(hdev))
-               return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
-                                 MGMT_STATUS_NOT_SUPPORTED);
+               return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
+                                      MGMT_STATUS_NOT_SUPPORTED);
 
-       if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
-               return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
-                                 MGMT_STATUS_REJECTED);
+       if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
+               return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
+                                      MGMT_STATUS_REJECTED);
 
        if (cp->val != 0x00 && cp->val != 0x01)
-               return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
-                                 MGMT_STATUS_INVALID_PARAMS);
+               return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
+                                      MGMT_STATUS_INVALID_PARAMS);
 
        hci_dev_lock(hdev);
 
+       if (pending_find(MGMT_OP_SET_SSP, hdev)) {
+               err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
+                                     MGMT_STATUS_BUSY);
+               goto unlock;
+       }
+
        if (cp->val) {
-               changed = !test_and_set_bit(HCI_HS_ENABLED, &hdev->dev_flags);
+               changed = !hci_dev_test_and_set_flag(hdev, HCI_HS_ENABLED);
        } else {
                if (hdev_is_powered(hdev)) {
-                       err = cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
-                                        MGMT_STATUS_REJECTED);
+                       err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
+                                             MGMT_STATUS_REJECTED);
                        goto unlock;
                }
 
-               changed = test_and_clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
+               changed = hci_dev_test_and_clear_flag(hdev, HCI_HS_ENABLED);
        }
 
        err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
@@ -2226,7 +2398,7 @@ static void le_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
         * has actually been enabled. During power on, the
         * update in powered_update_hci will take care of it.
         */
-       if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
+       if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
                struct hci_request req;
 
                hci_req_init(&req, hdev);
@@ -2244,7 +2416,7 @@ static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
 {
        struct mgmt_mode *cp = data;
        struct hci_cp_write_le_host_supported hci_cp;
-       struct pending_cmd *cmd;
+       struct mgmt_pending_cmd *cmd;
        struct hci_request req;
        int err;
        u8 val, enabled;
@@ -2252,17 +2424,29 @@ static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
        BT_DBG("request for %s", hdev->name);
 
        if (!lmp_le_capable(hdev))
-               return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
-                                 MGMT_STATUS_NOT_SUPPORTED);
+               return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
+                                      MGMT_STATUS_NOT_SUPPORTED);
 
        if (cp->val != 0x00 && cp->val != 0x01)
-               return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
-                                 MGMT_STATUS_INVALID_PARAMS);
+               return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
+                                      MGMT_STATUS_INVALID_PARAMS);
+
+       /* Bluetooth single mode LE only controllers or dual-mode
+        * controllers configured as LE only devices, do not allow
+        * switching LE off. These have either LE enabled explicitly
+        * or BR/EDR has been previously switched off.
+        *
+        * When trying to enable an already enabled LE, then gracefully
+        * send a positive response. Trying to disable it however will
+        * result into rejection.
+        */
+       if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
+               if (cp->val == 0x01)
+                       return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
 
-       /* LE-only devices do not allow toggling LE on/off */
-       if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
-               return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
-                                 MGMT_STATUS_REJECTED);
+               return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
+                                      MGMT_STATUS_REJECTED);
+       }
 
        hci_dev_lock(hdev);
 
@@ -2272,13 +2456,13 @@ static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
        if (!hdev_is_powered(hdev) || val == enabled) {
                bool changed = false;
 
-               if (val != test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
-                       change_bit(HCI_LE_ENABLED, &hdev->dev_flags);
+               if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
+                       hci_dev_change_flag(hdev, HCI_LE_ENABLED);
                        changed = true;
                }
 
-               if (!val && test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
-                       clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
+               if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
+                       hci_dev_clear_flag(hdev, HCI_ADVERTISING);
                        changed = true;
                }
 
@@ -2292,10 +2476,10 @@ static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
                goto unlock;
        }
 
-       if (mgmt_pending_find(MGMT_OP_SET_LE, hdev) ||
-           mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
-               err = cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
-                                MGMT_STATUS_BUSY);
+       if (pending_find(MGMT_OP_SET_LE, hdev) ||
+           pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
+               err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
+                                     MGMT_STATUS_BUSY);
                goto unlock;
        }
 
@@ -2313,7 +2497,7 @@ static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
                hci_cp.le = val;
                hci_cp.simul = 0x00;
        } else {
-               if (test_bit(HCI_LE_ADV, &hdev->dev_flags))
+               if (hci_dev_test_flag(hdev, HCI_LE_ADV))
                        disable_advertising(&req);
        }
 
@@ -2337,7 +2521,7 @@ unlock:
  */
 static bool pending_eir_or_class(struct hci_dev *hdev)
 {
-       struct pending_cmd *cmd;
+       struct mgmt_pending_cmd *cmd;
 
        list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
                switch (cmd->opcode) {
@@ -2373,16 +2557,16 @@ static u8 get_uuid_size(const u8 *uuid)
 
 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
 {
-       struct pending_cmd *cmd;
+       struct mgmt_pending_cmd *cmd;
 
        hci_dev_lock(hdev);
 
-       cmd = mgmt_pending_find(mgmt_op, hdev);
+       cmd = pending_find(mgmt_op, hdev);
        if (!cmd)
                goto unlock;
 
-       cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(status),
-                    hdev->dev_class, 3);
+       mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
+                         mgmt_status(status), hdev->dev_class, 3);
 
        mgmt_pending_remove(cmd);
 
@@ -2400,7 +2584,7 @@ static void add_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
 {
        struct mgmt_cp_add_uuid *cp = data;
-       struct pending_cmd *cmd;
+       struct mgmt_pending_cmd *cmd;
        struct hci_request req;
        struct bt_uuid *uuid;
        int err;
@@ -2410,8 +2594,8 @@ static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
        hci_dev_lock(hdev);
 
        if (pending_eir_or_class(hdev)) {
-               err = cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
-                                MGMT_STATUS_BUSY);
+               err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
+                                     MGMT_STATUS_BUSY);
                goto failed;
        }
 
@@ -2437,8 +2621,8 @@ static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
                if (err != -ENODATA)
                        goto failed;
 
-               err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
-                                  hdev->dev_class, 3);
+               err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
+                                       hdev->dev_class, 3);
                goto failed;
        }
 
@@ -2460,7 +2644,7 @@ static bool enable_service_cache(struct hci_dev *hdev)
        if (!hdev_is_powered(hdev))
                return false;
 
-       if (!test_and_set_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
+       if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
                queue_delayed_work(hdev->workqueue, &hdev->service_cache,
                                   CACHE_TIMEOUT);
                return true;
@@ -2480,7 +2664,7 @@ static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
                       u16 len)
 {
        struct mgmt_cp_remove_uuid *cp = data;
-       struct pending_cmd *cmd;
+       struct mgmt_pending_cmd *cmd;
        struct bt_uuid *match, *tmp;
        u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
        struct hci_request req;
@@ -2491,8 +2675,8 @@ static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
        hci_dev_lock(hdev);
 
        if (pending_eir_or_class(hdev)) {
-               err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
-                                MGMT_STATUS_BUSY);
+               err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
+                                     MGMT_STATUS_BUSY);
                goto unlock;
        }
 
@@ -2500,8 +2684,9 @@ static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
                hci_uuids_clear(hdev);
 
                if (enable_service_cache(hdev)) {
-                       err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID,
-                                          0, hdev->dev_class, 3);
+                       err = mgmt_cmd_complete(sk, hdev->id,
+                                               MGMT_OP_REMOVE_UUID,
+                                               0, hdev->dev_class, 3);
                        goto unlock;
                }
 
@@ -2520,8 +2705,8 @@ static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
        }
 
        if (found == 0) {
-               err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
-                                MGMT_STATUS_INVALID_PARAMS);
+               err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
+                                     MGMT_STATUS_INVALID_PARAMS);
                goto unlock;
        }
 
@@ -2536,8 +2721,8 @@ update_class:
                if (err != -ENODATA)
                        goto unlock;
 
-               err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
-                                  hdev->dev_class, 3);
+               err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
+                                       hdev->dev_class, 3);
                goto unlock;
        }
 
@@ -2565,27 +2750,27 @@ static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
                         u16 len)
 {
        struct mgmt_cp_set_dev_class *cp = data;
-       struct pending_cmd *cmd;
+       struct mgmt_pending_cmd *cmd;
        struct hci_request req;
        int err;
 
        BT_DBG("request for %s", hdev->name);
 
        if (!lmp_bredr_capable(hdev))
-               return cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
-                                 MGMT_STATUS_NOT_SUPPORTED);
+               return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
+                                      MGMT_STATUS_NOT_SUPPORTED);
 
        hci_dev_lock(hdev);
 
        if (pending_eir_or_class(hdev)) {
-               err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
-                                MGMT_STATUS_BUSY);
+               err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
+                                     MGMT_STATUS_BUSY);
                goto unlock;
        }
 
        if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
-               err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
-                                MGMT_STATUS_INVALID_PARAMS);
+               err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
+                                     MGMT_STATUS_INVALID_PARAMS);
                goto unlock;
        }
 
@@ -2593,14 +2778,14 @@ static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
        hdev->minor_class = cp->minor;
 
        if (!hdev_is_powered(hdev)) {
-               err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
-                                  hdev->dev_class, 3);
+               err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
+                                       hdev->dev_class, 3);
                goto unlock;
        }
 
        hci_req_init(&req, hdev);
 
-       if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
+       if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
                hci_dev_unlock(hdev);
                cancel_delayed_work_sync(&hdev->service_cache);
                hci_dev_lock(hdev);
@@ -2614,8 +2799,8 @@ static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
                if (err != -ENODATA)
                        goto unlock;
 
-               err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
-                                  hdev->dev_class, 3);
+               err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
+                                       hdev->dev_class, 3);
                goto unlock;
        }
 
@@ -2645,15 +2830,15 @@ static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
        BT_DBG("request for %s", hdev->name);
 
        if (!lmp_bredr_capable(hdev))
-               return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
-                                 MGMT_STATUS_NOT_SUPPORTED);
+               return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
+                                      MGMT_STATUS_NOT_SUPPORTED);
 
        key_count = __le16_to_cpu(cp->key_count);
        if (key_count > max_key_count) {
                BT_ERR("load_link_keys: too big key_count value %u",
                       key_count);
-               return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
-                                 MGMT_STATUS_INVALID_PARAMS);
+               return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
+                                      MGMT_STATUS_INVALID_PARAMS);
        }
 
        expected_len = sizeof(*cp) + key_count *
@@ -2661,13 +2846,13 @@ static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
        if (expected_len != len) {
                BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
                       expected_len, len);
-               return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
-                                 MGMT_STATUS_INVALID_PARAMS);
+               return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
+                                      MGMT_STATUS_INVALID_PARAMS);
        }
 
        if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
-               return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
-                                 MGMT_STATUS_INVALID_PARAMS);
+               return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
+                                      MGMT_STATUS_INVALID_PARAMS);
 
        BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
               key_count);
@@ -2676,8 +2861,9 @@ static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
                struct mgmt_link_key_info *key = &cp->keys[i];
 
                if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
-                       return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
-                                         MGMT_STATUS_INVALID_PARAMS);
+                       return mgmt_cmd_status(sk, hdev->id,
+                                              MGMT_OP_LOAD_LINK_KEYS,
+                                              MGMT_STATUS_INVALID_PARAMS);
        }
 
        hci_dev_lock(hdev);
@@ -2685,11 +2871,10 @@ static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
        hci_link_keys_clear(hdev);
 
        if (cp->debug_keys)
-               changed = !test_and_set_bit(HCI_KEEP_DEBUG_KEYS,
-                                           &hdev->dev_flags);
+               changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
        else
-               changed = test_and_clear_bit(HCI_KEEP_DEBUG_KEYS,
-                                            &hdev->dev_flags);
+               changed = hci_dev_test_and_clear_flag(hdev,
+                                                     HCI_KEEP_DEBUG_KEYS);
 
        if (changed)
                new_settings(hdev, NULL);
@@ -2707,7 +2892,7 @@ static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
                                 key->type, key->pin_len, NULL);
        }
 
-       cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
+       mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
 
        hci_dev_unlock(hdev);
 
@@ -2732,7 +2917,7 @@ static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
        struct mgmt_cp_unpair_device *cp = data;
        struct mgmt_rp_unpair_device rp;
        struct hci_cp_disconnect dc;
-       struct pending_cmd *cmd;
+       struct mgmt_pending_cmd *cmd;
        struct hci_conn *conn;
        int err;
 
@@ -2741,20 +2926,21 @@ static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
        rp.addr.type = cp->addr.type;
 
        if (!bdaddr_type_is_valid(cp->addr.type))
-               return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
-                                   MGMT_STATUS_INVALID_PARAMS,
-                                   &rp, sizeof(rp));
+               return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
+                                        MGMT_STATUS_INVALID_PARAMS,
+                                        &rp, sizeof(rp));
 
        if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
-               return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
-                                   MGMT_STATUS_INVALID_PARAMS,
-                                   &rp, sizeof(rp));
+               return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
+                                        MGMT_STATUS_INVALID_PARAMS,
+                                        &rp, sizeof(rp));
 
        hci_dev_lock(hdev);
 
        if (!hdev_is_powered(hdev)) {
-               err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
-                                  MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
+               err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
+                                       MGMT_STATUS_NOT_POWERED, &rp,
+                                       sizeof(rp));
                goto unlock;
        }
 
@@ -2804,8 +2990,9 @@ static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
        }
 
        if (err < 0) {
-               err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
-                                  MGMT_STATUS_NOT_PAIRED, &rp, sizeof(rp));
+               err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
+                                       MGMT_STATUS_NOT_PAIRED, &rp,
+                                       sizeof(rp));
                goto unlock;
        }
 
@@ -2813,8 +3000,8 @@ static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
         * link is requested.
         */
        if (!conn) {
-               err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
-                                  &rp, sizeof(rp));
+               err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
+                                       &rp, sizeof(rp));
                device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
                goto unlock;
        }
@@ -2844,7 +3031,7 @@ static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
 {
        struct mgmt_cp_disconnect *cp = data;
        struct mgmt_rp_disconnect rp;
-       struct pending_cmd *cmd;
+       struct mgmt_pending_cmd *cmd;
        struct hci_conn *conn;
        int err;
 
@@ -2855,21 +3042,22 @@ static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
        rp.addr.type = cp->addr.type;
 
        if (!bdaddr_type_is_valid(cp->addr.type))
-               return cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
-                                   MGMT_STATUS_INVALID_PARAMS,
-                                   &rp, sizeof(rp));
+               return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
+                                        MGMT_STATUS_INVALID_PARAMS,
+                                        &rp, sizeof(rp));
 
        hci_dev_lock(hdev);
 
        if (!test_bit(HCI_UP, &hdev->flags)) {
-               err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
-                                  MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
+               err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
+                                       MGMT_STATUS_NOT_POWERED, &rp,
+                                       sizeof(rp));
                goto failed;
        }
 
-       if (mgmt_pending_find(MGMT_OP_DISCONNECT, hdev)) {
-               err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
-                                  MGMT_STATUS_BUSY, &rp, sizeof(rp));
+       if (pending_find(MGMT_OP_DISCONNECT, hdev)) {
+               err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
+                                       MGMT_STATUS_BUSY, &rp, sizeof(rp));
                goto failed;
        }
 
@@ -2880,8 +3068,9 @@ static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
                conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
 
        if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
-               err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
-                                  MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
+               err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
+                                       MGMT_STATUS_NOT_CONNECTED, &rp,
+                                       sizeof(rp));
                goto failed;
        }
 
@@ -2935,8 +3124,8 @@ static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
        hci_dev_lock(hdev);
 
        if (!hdev_is_powered(hdev)) {
-               err = cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
-                                MGMT_STATUS_NOT_POWERED);
+               err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
+                                     MGMT_STATUS_NOT_POWERED);
                goto unlock;
        }
 
@@ -2969,8 +3158,8 @@ static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
        /* Recalculate length in case of filtered SCO connections, etc */
        rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
 
-       err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
-                          rp_len);
+       err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
+                               rp_len);
 
        kfree(rp);
 
@@ -2982,7 +3171,7 @@ unlock:
 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
                                   struct mgmt_cp_pin_code_neg_reply *cp)
 {
-       struct pending_cmd *cmd;
+       struct mgmt_pending_cmd *cmd;
        int err;
 
        cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
@@ -3004,7 +3193,7 @@ static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
        struct hci_conn *conn;
        struct mgmt_cp_pin_code_reply *cp = data;
        struct hci_cp_pin_code_reply reply;
-       struct pending_cmd *cmd;
+       struct mgmt_pending_cmd *cmd;
        int err;
 
        BT_DBG("");
@@ -3012,15 +3201,15 @@ static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
        hci_dev_lock(hdev);
 
        if (!hdev_is_powered(hdev)) {
-               err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
-                                MGMT_STATUS_NOT_POWERED);
+               err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
+                                     MGMT_STATUS_NOT_POWERED);
                goto failed;
        }
 
        conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
        if (!conn) {
-               err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
-                                MGMT_STATUS_NOT_CONNECTED);
+               err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
+                                     MGMT_STATUS_NOT_CONNECTED);
                goto failed;
        }
 
@@ -3033,8 +3222,8 @@ static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
 
                err = send_pin_code_neg_reply(sk, hdev, &ncp);
                if (err >= 0)
-                       err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
-                                        MGMT_STATUS_INVALID_PARAMS);
+                       err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
+                                             MGMT_STATUS_INVALID_PARAMS);
 
                goto failed;
        }
@@ -3068,8 +3257,8 @@ static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
        BT_DBG("");
 
        if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
-               return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
-                                   MGMT_STATUS_INVALID_PARAMS, NULL, 0);
+               return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
+                                        MGMT_STATUS_INVALID_PARAMS, NULL, 0);
 
        hci_dev_lock(hdev);
 
@@ -3080,14 +3269,14 @@ static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
 
        hci_dev_unlock(hdev);
 
-       return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0, NULL,
-                           0);
+       return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
+                                NULL, 0);
 }
 
-static struct pending_cmd *find_pairing(struct hci_conn *conn)
+static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
 {
        struct hci_dev *hdev = conn->hdev;
-       struct pending_cmd *cmd;
+       struct mgmt_pending_cmd *cmd;
 
        list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
                if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
@@ -3102,7 +3291,7 @@ static struct pending_cmd *find_pairing(struct hci_conn *conn)
        return NULL;
 }
 
-static int pairing_complete(struct pending_cmd *cmd, u8 status)
+static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
 {
        struct mgmt_rp_pair_device rp;
        struct hci_conn *conn = cmd->user_data;
@@ -3111,8 +3300,8 @@ static int pairing_complete(struct pending_cmd *cmd, u8 status)
        bacpy(&rp.addr.bdaddr, &conn->dst);
        rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
 
-       err = cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE, status,
-                          &rp, sizeof(rp));
+       err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE,
+                               status, &rp, sizeof(rp));
 
        /* So we don't get further callbacks for this connection */
        conn->connect_cfm_cb = NULL;
@@ -3134,7 +3323,7 @@ static int pairing_complete(struct pending_cmd *cmd, u8 status)
 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
 {
        u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
-       struct pending_cmd *cmd;
+       struct mgmt_pending_cmd *cmd;
 
        cmd = find_pairing(conn);
        if (cmd) {
@@ -3145,7 +3334,7 @@ void mgmt_smp_complete(struct hci_conn *conn, bool complete)
 
 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
 {
-       struct pending_cmd *cmd;
+       struct mgmt_pending_cmd *cmd;
 
        BT_DBG("status %u", status);
 
@@ -3161,7 +3350,7 @@ static void pairing_complete_cb(struct hci_conn *conn, u8 status)
 
 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
 {
-       struct pending_cmd *cmd;
+       struct mgmt_pending_cmd *cmd;
 
        BT_DBG("status %u", status);
 
@@ -3183,7 +3372,7 @@ static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
 {
        struct mgmt_cp_pair_device *cp = data;
        struct mgmt_rp_pair_device rp;
-       struct pending_cmd *cmd;
+       struct mgmt_pending_cmd *cmd;
        u8 sec_level, auth_type;
        struct hci_conn *conn;
        int err;
@@ -3195,20 +3384,28 @@ static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
        rp.addr.type = cp->addr.type;
 
        if (!bdaddr_type_is_valid(cp->addr.type))
-               return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
-                                   MGMT_STATUS_INVALID_PARAMS,
-                                   &rp, sizeof(rp));
+               return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
+                                        MGMT_STATUS_INVALID_PARAMS,
+                                        &rp, sizeof(rp));
 
        if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
-               return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
-                                   MGMT_STATUS_INVALID_PARAMS,
-                                   &rp, sizeof(rp));
+               return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
+                                        MGMT_STATUS_INVALID_PARAMS,
+                                        &rp, sizeof(rp));
 
        hci_dev_lock(hdev);
 
        if (!hdev_is_powered(hdev)) {
-               err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
-                                  MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
+               err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
+                                       MGMT_STATUS_NOT_POWERED, &rp,
+                                       sizeof(rp));
+               goto unlock;
+       }
+
+       if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
+               err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
+                                       MGMT_STATUS_ALREADY_PAIRED, &rp,
+                                       sizeof(rp));
                goto unlock;
        }
 
@@ -3249,19 +3446,22 @@ static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
 
                if (PTR_ERR(conn) == -EBUSY)
                        status = MGMT_STATUS_BUSY;
+               else if (PTR_ERR(conn) == -EOPNOTSUPP)
+                       status = MGMT_STATUS_NOT_SUPPORTED;
+               else if (PTR_ERR(conn) == -ECONNREFUSED)
+                       status = MGMT_STATUS_REJECTED;
                else
                        status = MGMT_STATUS_CONNECT_FAILED;
 
-               err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
-                                  status, &rp,
-                                  sizeof(rp));
+               err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
+                                       status, &rp, sizeof(rp));
                goto unlock;
        }
 
        if (conn->connect_cfm_cb) {
                hci_conn_drop(conn);
-               err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
-                                  MGMT_STATUS_BUSY, &rp, sizeof(rp));
+               err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
+                                       MGMT_STATUS_BUSY, &rp, sizeof(rp));
                goto unlock;
        }
 
@@ -3305,7 +3505,7 @@ static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
                              u16 len)
 {
        struct mgmt_addr_info *addr = data;
-       struct pending_cmd *cmd;
+       struct mgmt_pending_cmd *cmd;
        struct hci_conn *conn;
        int err;
 
@@ -3314,31 +3514,31 @@ static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
        hci_dev_lock(hdev);
 
        if (!hdev_is_powered(hdev)) {
-               err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
-                                MGMT_STATUS_NOT_POWERED);
+               err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
+                                     MGMT_STATUS_NOT_POWERED);
                goto unlock;
        }
 
-       cmd = mgmt_pending_find(MGMT_OP_PAIR_DEVICE, hdev);
+       cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
        if (!cmd) {
-               err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
-                                MGMT_STATUS_INVALID_PARAMS);
+               err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
+                                     MGMT_STATUS_INVALID_PARAMS);
                goto unlock;
        }
 
        conn = cmd->user_data;
 
        if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
-               err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
-                                MGMT_STATUS_INVALID_PARAMS);
+               err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
+                                     MGMT_STATUS_INVALID_PARAMS);
                goto unlock;
        }
 
        cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
        mgmt_pending_remove(cmd);
 
-       err = cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
-                          addr, sizeof(*addr));
+       err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
+                               addr, sizeof(*addr));
 unlock:
        hci_dev_unlock(hdev);
        return err;
@@ -3348,16 +3548,16 @@ static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
                             struct mgmt_addr_info *addr, u16 mgmt_op,
                             u16 hci_op, __le32 passkey)
 {
-       struct pending_cmd *cmd;
+       struct mgmt_pending_cmd *cmd;
        struct hci_conn *conn;
        int err;
 
        hci_dev_lock(hdev);
 
        if (!hdev_is_powered(hdev)) {
-               err = cmd_complete(sk, hdev->id, mgmt_op,
-                                  MGMT_STATUS_NOT_POWERED, addr,
-                                  sizeof(*addr));
+               err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
+                                       MGMT_STATUS_NOT_POWERED, addr,
+                                       sizeof(*addr));
                goto done;
        }
 
@@ -3367,22 +3567,22 @@ static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
                conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &addr->bdaddr);
 
        if (!conn) {
-               err = cmd_complete(sk, hdev->id, mgmt_op,
-                                  MGMT_STATUS_NOT_CONNECTED, addr,
-                                  sizeof(*addr));
+               err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
+                                       MGMT_STATUS_NOT_CONNECTED, addr,
+                                       sizeof(*addr));
                goto done;
        }
 
        if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
                err = smp_user_confirm_reply(conn, mgmt_op, passkey);
                if (!err)
-                       err = cmd_complete(sk, hdev->id, mgmt_op,
-                                          MGMT_STATUS_SUCCESS, addr,
-                                          sizeof(*addr));
+                       err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
+                                               MGMT_STATUS_SUCCESS, addr,
+                                               sizeof(*addr));
                else
-                       err = cmd_complete(sk, hdev->id, mgmt_op,
-                                          MGMT_STATUS_FAILED, addr,
-                                          sizeof(*addr));
+                       err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
+                                               MGMT_STATUS_FAILED, addr,
+                                               sizeof(*addr));
 
                goto done;
        }
@@ -3434,8 +3634,8 @@ static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
        BT_DBG("");
 
        if (len != sizeof(*cp))
-               return cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
-                                 MGMT_STATUS_INVALID_PARAMS);
+               return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
+                                      MGMT_STATUS_INVALID_PARAMS);
 
        return user_pairing_resp(sk, hdev, &cp->addr,
                                 MGMT_OP_USER_CONFIRM_REPLY,
@@ -3491,24 +3691,24 @@ static void update_name(struct hci_request *req)
 static void set_name_complete(struct hci_dev *hdev, u8 status, u16 opcode)
 {
        struct mgmt_cp_set_local_name *cp;
-       struct pending_cmd *cmd;
+       struct mgmt_pending_cmd *cmd;
 
        BT_DBG("status 0x%02x", status);
 
        hci_dev_lock(hdev);
 
-       cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
+       cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
        if (!cmd)
                goto unlock;
 
        cp = cmd->param;
 
        if (status)
-               cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
-                          mgmt_status(status));
+               mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
+                               mgmt_status(status));
        else
-               cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
-                            cp, sizeof(*cp));
+               mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
+                                 cp, sizeof(*cp));
 
        mgmt_pending_remove(cmd);
 
@@ -3520,7 +3720,7 @@ static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
                          u16 len)
 {
        struct mgmt_cp_set_local_name *cp = data;
-       struct pending_cmd *cmd;
+       struct mgmt_pending_cmd *cmd;
        struct hci_request req;
        int err;
 
@@ -3534,8 +3734,8 @@ static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
        if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
            !memcmp(hdev->short_name, cp->short_name,
                    sizeof(hdev->short_name))) {
-               err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
-                                  data, len);
+               err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
+                                       data, len);
                goto failed;
        }
 
@@ -3544,13 +3744,13 @@ static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
        if (!hdev_is_powered(hdev)) {
                memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
 
-               err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
-                                  data, len);
+               err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
+                                       data, len);
                if (err < 0)
                        goto failed;
 
-               err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data, len,
-                                sk);
+               err = mgmt_generic_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev,
+                                        data, len, sk);
 
                goto failed;
        }
@@ -3585,10 +3785,70 @@ failed:
        return err;
 }
 
+static void read_local_oob_data_complete(struct hci_dev *hdev, u8 status,
+                                        u16 opcode, struct sk_buff *skb)
+{
+       struct mgmt_rp_read_local_oob_data mgmt_rp;
+       size_t rp_size = sizeof(mgmt_rp);
+       struct mgmt_pending_cmd *cmd;
+
+       BT_DBG("%s status %u", hdev->name, status);
+
+       cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
+       if (!cmd)
+               return;
+
+       if (status || !skb) {
+               mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
+                               status ? mgmt_status(status) : MGMT_STATUS_FAILED);
+               goto remove;
+       }
+
+       memset(&mgmt_rp, 0, sizeof(mgmt_rp));
+
+       if (opcode == HCI_OP_READ_LOCAL_OOB_DATA) {
+               struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
+
+               if (skb->len < sizeof(*rp)) {
+                       mgmt_cmd_status(cmd->sk, hdev->id,
+                                       MGMT_OP_READ_LOCAL_OOB_DATA,
+                                       MGMT_STATUS_FAILED);
+                       goto remove;
+               }
+
+               memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash));
+               memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand));
+
+               rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256);
+       } else {
+               struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
+
+               if (skb->len < sizeof(*rp)) {
+                       mgmt_cmd_status(cmd->sk, hdev->id,
+                                       MGMT_OP_READ_LOCAL_OOB_DATA,
+                                       MGMT_STATUS_FAILED);
+                       goto remove;
+               }
+
+               memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192));
+               memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192));
+
+               memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256));
+               memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256));
+       }
+
+       mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
+                         MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size);
+
+remove:
+       mgmt_pending_remove(cmd);
+}
+
 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
                               void *data, u16 data_len)
 {
-       struct pending_cmd *cmd;
+       struct mgmt_pending_cmd *cmd;
+       struct hci_request req;
        int err;
 
        BT_DBG("%s", hdev->name);
@@ -3596,20 +3856,20 @@ static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
        hci_dev_lock(hdev);
 
        if (!hdev_is_powered(hdev)) {
-               err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
-                                MGMT_STATUS_NOT_POWERED);
+               err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
+                                     MGMT_STATUS_NOT_POWERED);
                goto unlock;
        }
 
        if (!lmp_ssp_capable(hdev)) {
-               err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
-                                MGMT_STATUS_NOT_SUPPORTED);
+               err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
+                                     MGMT_STATUS_NOT_SUPPORTED);
                goto unlock;
        }
 
-       if (mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
-               err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
-                                MGMT_STATUS_BUSY);
+       if (pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
+               err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
+                                     MGMT_STATUS_BUSY);
                goto unlock;
        }
 
@@ -3619,12 +3879,14 @@ static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
                goto unlock;
        }
 
+       hci_req_init(&req, hdev);
+
        if (bredr_sc_enabled(hdev))
-               err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_EXT_DATA,
-                                  0, NULL);
+               hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_EXT_DATA, 0, NULL);
        else
-               err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
+               hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
 
+       err = hci_req_run_skb(&req, read_local_oob_data_complete);
        if (err < 0)
                mgmt_pending_remove(cmd);
 
@@ -3642,9 +3904,10 @@ static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
        BT_DBG("%s ", hdev->name);
 
        if (!bdaddr_type_is_valid(addr->type))
-               return cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
-                                   MGMT_STATUS_INVALID_PARAMS, addr,
-                                   sizeof(*addr));
+               return mgmt_cmd_complete(sk, hdev->id,
+                                        MGMT_OP_ADD_REMOTE_OOB_DATA,
+                                        MGMT_STATUS_INVALID_PARAMS,
+                                        addr, sizeof(*addr));
 
        hci_dev_lock(hdev);
 
@@ -3653,10 +3916,10 @@ static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
                u8 status;
 
                if (cp->addr.type != BDADDR_BREDR) {
-                       err = cmd_complete(sk, hdev->id,
-                                          MGMT_OP_ADD_REMOTE_OOB_DATA,
-                                          MGMT_STATUS_INVALID_PARAMS,
-                                          &cp->addr, sizeof(cp->addr));
+                       err = mgmt_cmd_complete(sk, hdev->id,
+                                               MGMT_OP_ADD_REMOTE_OOB_DATA,
+                                               MGMT_STATUS_INVALID_PARAMS,
+                                               &cp->addr, sizeof(cp->addr));
                        goto unlock;
                }
 
@@ -3668,8 +3931,9 @@ static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
                else
                        status = MGMT_STATUS_SUCCESS;
 
-               err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
-                                  status, &cp->addr, sizeof(cp->addr));
+               err = mgmt_cmd_complete(sk, hdev->id,
+                                       MGMT_OP_ADD_REMOTE_OOB_DATA, status,
+                                       &cp->addr, sizeof(cp->addr));
        } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
                struct mgmt_cp_add_remote_oob_ext_data *cp = data;
                u8 *rand192, *hash192, *rand256, *hash256;
@@ -3681,10 +3945,10 @@ static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
                         */
                        if (memcmp(cp->rand192, ZERO_KEY, 16) ||
                            memcmp(cp->hash192, ZERO_KEY, 16)) {
-                               err = cmd_complete(sk, hdev->id,
-                                                  MGMT_OP_ADD_REMOTE_OOB_DATA,
-                                                  MGMT_STATUS_INVALID_PARAMS,
-                                                  addr, sizeof(*addr));
+                               err = mgmt_cmd_complete(sk, hdev->id,
+                                                       MGMT_OP_ADD_REMOTE_OOB_DATA,
+                                                       MGMT_STATUS_INVALID_PARAMS,
+                                                       addr, sizeof(*addr));
                                goto unlock;
                        }
 
@@ -3724,12 +3988,13 @@ static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
                else
                        status = MGMT_STATUS_SUCCESS;
 
-               err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
-                                  status, &cp->addr, sizeof(cp->addr));
+               err = mgmt_cmd_complete(sk, hdev->id,
+                                       MGMT_OP_ADD_REMOTE_OOB_DATA,
+                                       status, &cp->addr, sizeof(cp->addr));
        } else {
                BT_ERR("add_remote_oob_data: invalid length of %u bytes", len);
-               err = cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
-                                MGMT_STATUS_INVALID_PARAMS);
+               err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
+                                     MGMT_STATUS_INVALID_PARAMS);
        }
 
 unlock:
@@ -3747,9 +4012,10 @@ static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
        BT_DBG("%s", hdev->name);
 
        if (cp->addr.type != BDADDR_BREDR)
-               return cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
-                                   MGMT_STATUS_INVALID_PARAMS,
-                                   &cp->addr, sizeof(cp->addr));
+               return mgmt_cmd_complete(sk, hdev->id,
+                                        MGMT_OP_REMOVE_REMOTE_OOB_DATA,
+                                        MGMT_STATUS_INVALID_PARAMS,
+                                        &cp->addr, sizeof(cp->addr));
 
        hci_dev_lock(hdev);
 
@@ -3766,100 +4032,136 @@ static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
                status = MGMT_STATUS_SUCCESS;
 
 done:
-       err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
-                          status, &cp->addr, sizeof(cp->addr));
+       err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
+                               status, &cp->addr, sizeof(cp->addr));
 
        hci_dev_unlock(hdev);
        return err;
 }
 
-static bool trigger_discovery(struct hci_request *req, u8 *status)
+static bool trigger_bredr_inquiry(struct hci_request *req, u8 *status)
 {
        struct hci_dev *hdev = req->hdev;
-       struct hci_cp_le_set_scan_param param_cp;
-       struct hci_cp_le_set_scan_enable enable_cp;
-       struct hci_cp_inquiry inq_cp;
+       struct hci_cp_inquiry cp;
        /* General inquiry access code (GIAC) */
        u8 lap[3] = { 0x33, 0x8b, 0x9e };
+
+       *status = mgmt_bredr_support(hdev);
+       if (*status)
+               return false;
+
+       if (hci_dev_test_flag(hdev, HCI_INQUIRY)) {
+               *status = MGMT_STATUS_BUSY;
+               return false;
+       }
+
+       hci_inquiry_cache_flush(hdev);
+
+       memset(&cp, 0, sizeof(cp));
+       memcpy(&cp.lap, lap, sizeof(cp.lap));
+       cp.length = DISCOV_BREDR_INQUIRY_LEN;
+
+       hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
+
+       return true;
+}
+
+static bool trigger_le_scan(struct hci_request *req, u16 interval, u8 *status)
+{
+       struct hci_dev *hdev = req->hdev;
+       struct hci_cp_le_set_scan_param param_cp;
+       struct hci_cp_le_set_scan_enable enable_cp;
        u8 own_addr_type;
        int err;
 
-       switch (hdev->discovery.type) {
-       case DISCOV_TYPE_BREDR:
-               *status = mgmt_bredr_support(hdev);
-               if (*status)
-                       return false;
+       *status = mgmt_le_support(hdev);
+       if (*status)
+               return false;
 
-               if (test_bit(HCI_INQUIRY, &hdev->flags)) {
-                       *status = MGMT_STATUS_BUSY;
+       if (hci_dev_test_flag(hdev, HCI_LE_ADV)) {
+               /* Don't let discovery abort an outgoing connection attempt
+                * that's using directed advertising.
+                */
+               if (hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
+                       *status = MGMT_STATUS_REJECTED;
                        return false;
                }
 
-               hci_inquiry_cache_flush(hdev);
+               disable_advertising(req);
+       }
 
-               memset(&inq_cp, 0, sizeof(inq_cp));
-               memcpy(&inq_cp.lap, lap, sizeof(inq_cp.lap));
-               inq_cp.length = DISCOV_BREDR_INQUIRY_LEN;
-               hci_req_add(req, HCI_OP_INQUIRY, sizeof(inq_cp), &inq_cp);
-               break;
+       /* If controller is scanning, it means the background scanning is
+        * running. Thus, we should temporarily stop it in order to set the
+        * discovery scanning parameters.
+        */
+       if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
+               hci_req_add_le_scan_disable(req);
 
-       case DISCOV_TYPE_LE:
-       case DISCOV_TYPE_INTERLEAVED:
-               *status = mgmt_le_support(hdev);
-               if (*status)
-                       return false;
+       /* All active scans will be done with either a resolvable private
+        * address (when privacy feature has been enabled) or non-resolvable
+        * private address.
+        */
+       err = hci_update_random_address(req, true, &own_addr_type);
+       if (err < 0) {
+               *status = MGMT_STATUS_FAILED;
+               return false;
+       }
 
-               if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED &&
-                   !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
-                       *status = MGMT_STATUS_NOT_SUPPORTED;
+       memset(&param_cp, 0, sizeof(param_cp));
+       param_cp.type = LE_SCAN_ACTIVE;
+       param_cp.interval = cpu_to_le16(interval);
+       param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
+       param_cp.own_address_type = own_addr_type;
+
+       hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
+                   &param_cp);
+
+       memset(&enable_cp, 0, sizeof(enable_cp));
+       enable_cp.enable = LE_SCAN_ENABLE;
+       enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
+
+       hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
+                   &enable_cp);
+
+       return true;
+}
+
+static bool trigger_discovery(struct hci_request *req, u8 *status)
+{
+       struct hci_dev *hdev = req->hdev;
+
+       switch (hdev->discovery.type) {
+       case DISCOV_TYPE_BREDR:
+               if (!trigger_bredr_inquiry(req, status))
                        return false;
-               }
+               break;
 
-               if (test_bit(HCI_LE_ADV, &hdev->dev_flags)) {
-                       /* Don't let discovery abort an outgoing
-                        * connection attempt that's using directed
-                        * advertising.
+       case DISCOV_TYPE_INTERLEAVED:
+               if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
+                            &hdev->quirks)) {
+                       /* During simultaneous discovery, we double LE scan
+                        * interval. We must leave some time for the controller
+                        * to do BR/EDR inquiry.
                         */
-                       if (hci_conn_hash_lookup_state(hdev, LE_LINK,
-                                                      BT_CONNECT)) {
-                               *status = MGMT_STATUS_REJECTED;
+                       if (!trigger_le_scan(req, DISCOV_LE_SCAN_INT * 2,
+                                            status))
                                return false;
-                       }
 
-                       disable_advertising(req);
-               }
-
-               /* If controller is scanning, it means the background scanning
-                * is running. Thus, we should temporarily stop it in order to
-                * set the discovery scanning parameters.
-                */
-               if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
-                       hci_req_add_le_scan_disable(req);
+                       if (!trigger_bredr_inquiry(req, status))
+                               return false;
 
-               memset(&param_cp, 0, sizeof(param_cp));
+                       return true;
+               }
 
-               /* All active scans will be done with either a resolvable
-                * private address (when privacy feature has been enabled)
-                * or non-resolvable private address.
-                */
-               err = hci_update_random_address(req, true, &own_addr_type);
-               if (err < 0) {
-                       *status = MGMT_STATUS_FAILED;
+               if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
+                       *status = MGMT_STATUS_NOT_SUPPORTED;
                        return false;
                }
+               /* fall through */
 
-               param_cp.type = LE_SCAN_ACTIVE;
-               param_cp.interval = cpu_to_le16(DISCOV_LE_SCAN_INT);
-               param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
-               param_cp.own_address_type = own_addr_type;
-               hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
-                           &param_cp);
-
-               memset(&enable_cp, 0, sizeof(enable_cp));
-               enable_cp.enable = LE_SCAN_ENABLE;
-               enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
-               hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
-                           &enable_cp);
+       case DISCOV_TYPE_LE:
+               if (!trigger_le_scan(req, DISCOV_LE_SCAN_INT, status))
+                       return false;
                break;
 
        default:
@@ -3873,16 +4175,16 @@ static bool trigger_discovery(struct hci_request *req, u8 *status)
 static void start_discovery_complete(struct hci_dev *hdev, u8 status,
                                     u16 opcode)
 {
-       struct pending_cmd *cmd;
+       struct mgmt_pending_cmd *cmd;
        unsigned long timeout;
 
        BT_DBG("status %d", status);
 
        hci_dev_lock(hdev);
 
-       cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
+       cmd = pending_find(MGMT_OP_START_DISCOVERY, hdev);
        if (!cmd)
-               cmd = mgmt_pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
+               cmd = pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
 
        if (cmd) {
                cmd->cmd_complete(cmd, mgmt_status(status));
@@ -3904,7 +4206,18 @@ static void start_discovery_complete(struct hci_dev *hdev, u8 status,
                timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
                break;
        case DISCOV_TYPE_INTERLEAVED:
-               timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
+                /* When running simultaneous discovery, the LE scanning time
+                * should occupy the whole discovery time sine BR/EDR inquiry
+                * and LE scanning are scheduled by the controller.
+                *
+                * For interleaving discovery in comparison, BR/EDR inquiry
+                * and LE scanning are done sequentially with separate
+                * timeouts.
+                */
+               if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
+                       timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
+               else
+                       timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
                break;
        case DISCOV_TYPE_BREDR:
                timeout = 0;
@@ -3923,8 +4236,7 @@ static void start_discovery_complete(struct hci_dev *hdev, u8 status,
                 */
                if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER,
                             &hdev->quirks) &&
-                   (hdev->discovery.uuid_count > 0 ||
-                    hdev->discovery.rssi != HCI_RSSI_INVALID)) {
+                   hdev->discovery.result_filtering) {
                        hdev->discovery.scan_start = jiffies;
                        hdev->discovery.scan_duration = timeout;
                }
@@ -3941,7 +4253,7 @@ static int start_discovery(struct sock *sk, struct hci_dev *hdev,
                           void *data, u16 len)
 {
        struct mgmt_cp_start_discovery *cp = data;
-       struct pending_cmd *cmd;
+       struct mgmt_pending_cmd *cmd;
        struct hci_request req;
        u8 status;
        int err;
@@ -3951,17 +4263,17 @@ static int start_discovery(struct sock *sk, struct hci_dev *hdev,
        hci_dev_lock(hdev);
 
        if (!hdev_is_powered(hdev)) {
-               err = cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
-                                  MGMT_STATUS_NOT_POWERED,
-                                  &cp->type, sizeof(cp->type));
+               err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
+                                       MGMT_STATUS_NOT_POWERED,
+                                       &cp->type, sizeof(cp->type));
                goto failed;
        }
 
        if (hdev->discovery.state != DISCOVERY_STOPPED ||
-           test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) {
-               err = cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
-                                  MGMT_STATUS_BUSY, &cp->type,
-                                  sizeof(cp->type));
+           hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
+               err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
+                                       MGMT_STATUS_BUSY, &cp->type,
+                                       sizeof(cp->type));
                goto failed;
        }
 
@@ -3984,8 +4296,8 @@ static int start_discovery(struct sock *sk, struct hci_dev *hdev,
        hci_req_init(&req, hdev);
 
        if (!trigger_discovery(&req, &status)) {
-               err = cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
-                                  status, &cp->type, sizeof(cp->type));
+               err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
+                                       status, &cp->type, sizeof(cp->type));
                mgmt_pending_remove(cmd);
                goto failed;
        }
@@ -4003,17 +4315,18 @@ failed:
        return err;
 }
 
-static int service_discovery_cmd_complete(struct pending_cmd *cmd, u8 status)
+static int service_discovery_cmd_complete(struct mgmt_pending_cmd *cmd,
+                                         u8 status)
 {
-       return cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
-                           cmd->param, 1);
+       return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
+                                cmd->param, 1);
 }
 
 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
                                   void *data, u16 len)
 {
        struct mgmt_cp_start_service_discovery *cp = data;
-       struct pending_cmd *cmd;
+       struct mgmt_pending_cmd *cmd;
        struct hci_request req;
        const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
        u16 uuid_count, expected_len;
@@ -4025,19 +4338,19 @@ static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
        hci_dev_lock(hdev);
 
        if (!hdev_is_powered(hdev)) {
-               err = cmd_complete(sk, hdev->id,
-                                  MGMT_OP_START_SERVICE_DISCOVERY,
-                                  MGMT_STATUS_NOT_POWERED,
-                                  &cp->type, sizeof(cp->type));
+               err = mgmt_cmd_complete(sk, hdev->id,
+                                       MGMT_OP_START_SERVICE_DISCOVERY,
+                                       MGMT_STATUS_NOT_POWERED,
+                                       &cp->type, sizeof(cp->type));
                goto failed;
        }
 
        if (hdev->discovery.state != DISCOVERY_STOPPED ||
-           test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) {
-               err = cmd_complete(sk, hdev->id,
-                                  MGMT_OP_START_SERVICE_DISCOVERY,
-                                  MGMT_STATUS_BUSY, &cp->type,
-                                  sizeof(cp->type));
+           hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
+               err = mgmt_cmd_complete(sk, hdev->id,
+                                       MGMT_OP_START_SERVICE_DISCOVERY,
+                                       MGMT_STATUS_BUSY, &cp->type,
+                                       sizeof(cp->type));
                goto failed;
        }
 
@@ -4045,10 +4358,10 @@ static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
        if (uuid_count > max_uuid_count) {
                BT_ERR("service_discovery: too big uuid_count value %u",
                       uuid_count);
-               err = cmd_complete(sk, hdev->id,
-                                  MGMT_OP_START_SERVICE_DISCOVERY,
-                                  MGMT_STATUS_INVALID_PARAMS, &cp->type,
-                                  sizeof(cp->type));
+               err = mgmt_cmd_complete(sk, hdev->id,
+                                       MGMT_OP_START_SERVICE_DISCOVERY,
+                                       MGMT_STATUS_INVALID_PARAMS, &cp->type,
+                                       sizeof(cp->type));
                goto failed;
        }
 
@@ -4056,10 +4369,10 @@ static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
        if (expected_len != len) {
                BT_ERR("service_discovery: expected %u bytes, got %u bytes",
                       expected_len, len);
-               err = cmd_complete(sk, hdev->id,
-                                  MGMT_OP_START_SERVICE_DISCOVERY,
-                                  MGMT_STATUS_INVALID_PARAMS, &cp->type,
-                                  sizeof(cp->type));
+               err = mgmt_cmd_complete(sk, hdev->id,
+                                       MGMT_OP_START_SERVICE_DISCOVERY,
+                                       MGMT_STATUS_INVALID_PARAMS, &cp->type,
+                                       sizeof(cp->type));
                goto failed;
        }
 
@@ -4077,6 +4390,7 @@ static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
         */
        hci_discovery_filter_clear(hdev);
 
+       hdev->discovery.result_filtering = true;
        hdev->discovery.type = cp->type;
        hdev->discovery.rssi = cp->rssi;
        hdev->discovery.uuid_count = uuid_count;
@@ -4085,10 +4399,10 @@ static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
                hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
                                                GFP_KERNEL);
                if (!hdev->discovery.uuids) {
-                       err = cmd_complete(sk, hdev->id,
-                                          MGMT_OP_START_SERVICE_DISCOVERY,
-                                          MGMT_STATUS_FAILED,
-                                          &cp->type, sizeof(cp->type));
+                       err = mgmt_cmd_complete(sk, hdev->id,
+                                               MGMT_OP_START_SERVICE_DISCOVERY,
+                                               MGMT_STATUS_FAILED,
+                                               &cp->type, sizeof(cp->type));
                        mgmt_pending_remove(cmd);
                        goto failed;
                }
@@ -4097,9 +4411,9 @@ static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
        hci_req_init(&req, hdev);
 
        if (!trigger_discovery(&req, &status)) {
-               err = cmd_complete(sk, hdev->id,
-                                  MGMT_OP_START_SERVICE_DISCOVERY,
-                                  status, &cp->type, sizeof(cp->type));
+               err = mgmt_cmd_complete(sk, hdev->id,
+                                       MGMT_OP_START_SERVICE_DISCOVERY,
+                                       status, &cp->type, sizeof(cp->type));
                mgmt_pending_remove(cmd);
                goto failed;
        }
@@ -4119,13 +4433,13 @@ failed:
 
 static void stop_discovery_complete(struct hci_dev *hdev, u8 status, u16 opcode)
 {
-       struct pending_cmd *cmd;
+       struct mgmt_pending_cmd *cmd;
 
        BT_DBG("status %d", status);
 
        hci_dev_lock(hdev);
 
-       cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
+       cmd = pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
        if (cmd) {
                cmd->cmd_complete(cmd, mgmt_status(status));
                mgmt_pending_remove(cmd);
@@ -4141,7 +4455,7 @@ static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
                          u16 len)
 {
        struct mgmt_cp_stop_discovery *mgmt_cp = data;
-       struct pending_cmd *cmd;
+       struct mgmt_pending_cmd *cmd;
        struct hci_request req;
        int err;
 
@@ -4150,16 +4464,16 @@ static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
        hci_dev_lock(hdev);
 
        if (!hci_discovery_active(hdev)) {
-               err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
-                                  MGMT_STATUS_REJECTED, &mgmt_cp->type,
-                                  sizeof(mgmt_cp->type));
+               err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
+                                       MGMT_STATUS_REJECTED, &mgmt_cp->type,
+                                       sizeof(mgmt_cp->type));
                goto unlock;
        }
 
        if (hdev->discovery.type != mgmt_cp->type) {
-               err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
-                                  MGMT_STATUS_INVALID_PARAMS, &mgmt_cp->type,
-                                  sizeof(mgmt_cp->type));
+               err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
+                                       MGMT_STATUS_INVALID_PARAMS,
+                                       &mgmt_cp->type, sizeof(mgmt_cp->type));
                goto unlock;
        }
 
@@ -4185,8 +4499,8 @@ static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
 
        /* If no HCI commands were sent we're done */
        if (err == -ENODATA) {
-               err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY, 0,
-                                  &mgmt_cp->type, sizeof(mgmt_cp->type));
+               err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY, 0,
+                                       &mgmt_cp->type, sizeof(mgmt_cp->type));
                hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
        }
 
@@ -4207,17 +4521,17 @@ static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
        hci_dev_lock(hdev);
 
        if (!hci_discovery_active(hdev)) {
-               err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
-                                  MGMT_STATUS_FAILED, &cp->addr,
-                                  sizeof(cp->addr));
+               err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
+                                       MGMT_STATUS_FAILED, &cp->addr,
+                                       sizeof(cp->addr));
                goto failed;
        }
 
        e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
        if (!e) {
-               err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
-                                  MGMT_STATUS_INVALID_PARAMS, &cp->addr,
-                                  sizeof(cp->addr));
+               err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
+                                       MGMT_STATUS_INVALID_PARAMS, &cp->addr,
+                                       sizeof(cp->addr));
                goto failed;
        }
 
@@ -4229,8 +4543,8 @@ static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
                hci_inquiry_cache_update_resolve(hdev, e);
        }
 
-       err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0, &cp->addr,
-                          sizeof(cp->addr));
+       err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
+                               &cp->addr, sizeof(cp->addr));
 
 failed:
        hci_dev_unlock(hdev);
@@ -4247,9 +4561,9 @@ static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
        BT_DBG("%s", hdev->name);
 
        if (!bdaddr_type_is_valid(cp->addr.type))
-               return cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
-                                   MGMT_STATUS_INVALID_PARAMS,
-                                   &cp->addr, sizeof(cp->addr));
+               return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
+                                        MGMT_STATUS_INVALID_PARAMS,
+                                        &cp->addr, sizeof(cp->addr));
 
        hci_dev_lock(hdev);
 
@@ -4265,8 +4579,8 @@ static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
        status = MGMT_STATUS_SUCCESS;
 
 done:
-       err = cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
-                          &cp->addr, sizeof(cp->addr));
+       err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
+                               &cp->addr, sizeof(cp->addr));
 
        hci_dev_unlock(hdev);
 
@@ -4283,9 +4597,9 @@ static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
        BT_DBG("%s", hdev->name);
 
        if (!bdaddr_type_is_valid(cp->addr.type))
-               return cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
-                                   MGMT_STATUS_INVALID_PARAMS,
-                                   &cp->addr, sizeof(cp->addr));
+               return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
+                                        MGMT_STATUS_INVALID_PARAMS,
+                                        &cp->addr, sizeof(cp->addr));
 
        hci_dev_lock(hdev);
 
@@ -4301,8 +4615,8 @@ static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
        status = MGMT_STATUS_SUCCESS;
 
 done:
-       err = cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
-                          &cp->addr, sizeof(cp->addr));
+       err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
+                               &cp->addr, sizeof(cp->addr));
 
        hci_dev_unlock(hdev);
 
@@ -4322,8 +4636,8 @@ static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
        source = __le16_to_cpu(cp->source);
 
        if (source > 0x0002)
-               return cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
-                                 MGMT_STATUS_INVALID_PARAMS);
+               return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
+                                      MGMT_STATUS_INVALID_PARAMS);
 
        hci_dev_lock(hdev);
 
@@ -4332,7 +4646,8 @@ static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
        hdev->devid_product = __le16_to_cpu(cp->product);
        hdev->devid_version = __le16_to_cpu(cp->version);
 
-       err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0, NULL, 0);
+       err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
+                               NULL, 0);
 
        hci_req_init(&req, hdev);
        update_eir(&req);
@@ -4343,10 +4658,17 @@ static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
        return err;
 }
 
+static void enable_advertising_instance(struct hci_dev *hdev, u8 status,
+                                       u16 opcode)
+{
+       BT_DBG("status %d", status);
+}
+
 static void set_advertising_complete(struct hci_dev *hdev, u8 status,
                                     u16 opcode)
 {
        struct cmd_lookup match = { NULL, hdev };
+       struct hci_request req;
 
        hci_dev_lock(hdev);
 
@@ -4358,10 +4680,10 @@ static void set_advertising_complete(struct hci_dev *hdev, u8 status,
                goto unlock;
        }
 
-       if (test_bit(HCI_LE_ADV, &hdev->dev_flags))
-               set_bit(HCI_ADVERTISING, &hdev->dev_flags);
+       if (hci_dev_test_flag(hdev, HCI_LE_ADV))
+               hci_dev_set_flag(hdev, HCI_ADVERTISING);
        else
-               clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
+               hci_dev_clear_flag(hdev, HCI_ADVERTISING);
 
        mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
                             &match);
@@ -4371,6 +4693,21 @@ static void set_advertising_complete(struct hci_dev *hdev, u8 status,
        if (match.sk)
                sock_put(match.sk);
 
+       /* If "Set Advertising" was just disabled and instance advertising was
+        * set up earlier, then enable the advertising instance.
+        */
+       if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
+           !hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE))
+               goto unlock;
+
+       hci_req_init(&req, hdev);
+
+       update_adv_data(&req);
+       enable_advertising(&req);
+
+       if (hci_req_run(&req, enable_advertising_instance) < 0)
+               BT_ERR("Failed to re-configure advertising");
+
 unlock:
        hci_dev_unlock(hdev);
 }
@@ -4379,41 +4716,48 @@ static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
                           u16 len)
 {
        struct mgmt_mode *cp = data;
-       struct pending_cmd *cmd;
+       struct mgmt_pending_cmd *cmd;
        struct hci_request req;
-       u8 val, enabled, status;
+       u8 val, status;
        int err;
 
        BT_DBG("request for %s", hdev->name);
 
        status = mgmt_le_support(hdev);
        if (status)
-               return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
-                                 status);
+               return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
+                                      status);
 
-       if (cp->val != 0x00 && cp->val != 0x01)
-               return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
-                                 MGMT_STATUS_INVALID_PARAMS);
+       if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
+               return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
+                                      MGMT_STATUS_INVALID_PARAMS);
 
        hci_dev_lock(hdev);
 
        val = !!cp->val;
-       enabled = test_bit(HCI_ADVERTISING, &hdev->dev_flags);
 
        /* The following conditions are ones which mean that we should
         * not do any HCI communication but directly send a mgmt
         * response to user space (after toggling the flag if
         * necessary).
         */
-       if (!hdev_is_powered(hdev) || val == enabled ||
+       if (!hdev_is_powered(hdev) ||
+           (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
+            (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
            hci_conn_num(hdev, LE_LINK) > 0 ||
-           (test_bit(HCI_LE_SCAN, &hdev->dev_flags) &&
+           (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
             hdev->le_scan_type == LE_SCAN_ACTIVE)) {
-               bool changed = false;
+               bool changed;
 
-               if (val != test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
-                       change_bit(HCI_ADVERTISING, &hdev->dev_flags);
-                       changed = true;
+               if (cp->val) {
+                       changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
+                       if (cp->val == 0x02)
+                               hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
+                       else
+                               hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
+               } else {
+                       changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
+                       hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
                }
 
                err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
@@ -4426,10 +4770,10 @@ static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
                goto unlock;
        }
 
-       if (mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
-           mgmt_pending_find(MGMT_OP_SET_LE, hdev)) {
-               err = cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
-                                MGMT_STATUS_BUSY);
+       if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
+           pending_find(MGMT_OP_SET_LE, hdev)) {
+               err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
+                                     MGMT_STATUS_BUSY);
                goto unlock;
        }
 
@@ -4441,10 +4785,19 @@ static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
 
        hci_req_init(&req, hdev);
 
-       if (val)
-               enable_advertising(&req);
+       if (cp->val == 0x02)
+               hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
        else
+               hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
+
+       if (val) {
+               /* Switch to instance "0" for the Set Advertising setting. */
+               update_adv_data_for_instance(&req, 0);
+               update_scan_rsp_data_for_instance(&req, 0);
+               enable_advertising(&req);
+       } else {
                disable_advertising(&req);
+       }
 
        err = hci_req_run(&req, set_advertising_complete);
        if (err < 0)
@@ -4464,34 +4817,38 @@ static int set_static_address(struct sock *sk, struct hci_dev *hdev,
        BT_DBG("%s", hdev->name);
 
        if (!lmp_le_capable(hdev))
-               return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
-                                 MGMT_STATUS_NOT_SUPPORTED);
+               return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
+                                      MGMT_STATUS_NOT_SUPPORTED);
 
        if (hdev_is_powered(hdev))
-               return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
-                                 MGMT_STATUS_REJECTED);
+               return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
+                                      MGMT_STATUS_REJECTED);
 
        if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
                if (!bacmp(&cp->bdaddr, BDADDR_NONE))
-                       return cmd_status(sk, hdev->id,
-                                         MGMT_OP_SET_STATIC_ADDRESS,
-                                         MGMT_STATUS_INVALID_PARAMS);
+                       return mgmt_cmd_status(sk, hdev->id,
+                                              MGMT_OP_SET_STATIC_ADDRESS,
+                                              MGMT_STATUS_INVALID_PARAMS);
 
                /* Two most significant bits shall be set */
                if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
-                       return cmd_status(sk, hdev->id,
-                                         MGMT_OP_SET_STATIC_ADDRESS,
-                                         MGMT_STATUS_INVALID_PARAMS);
+                       return mgmt_cmd_status(sk, hdev->id,
+                                              MGMT_OP_SET_STATIC_ADDRESS,
+                                              MGMT_STATUS_INVALID_PARAMS);
        }
 
        hci_dev_lock(hdev);
 
        bacpy(&hdev->static_addr, &cp->bdaddr);
 
-       err = cmd_complete(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS, 0, NULL, 0);
+       err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
+       if (err < 0)
+               goto unlock;
 
-       hci_dev_unlock(hdev);
+       err = new_settings(hdev, sk);
 
+unlock:
+       hci_dev_unlock(hdev);
        return err;
 }
 
@@ -4505,36 +4862,37 @@ static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
        BT_DBG("%s", hdev->name);
 
        if (!lmp_le_capable(hdev))
-               return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
-                                 MGMT_STATUS_NOT_SUPPORTED);
+               return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
+                                      MGMT_STATUS_NOT_SUPPORTED);
 
        interval = __le16_to_cpu(cp->interval);
 
        if (interval < 0x0004 || interval > 0x4000)
-               return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
-                                 MGMT_STATUS_INVALID_PARAMS);
+               return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
+                                      MGMT_STATUS_INVALID_PARAMS);
 
        window = __le16_to_cpu(cp->window);
 
        if (window < 0x0004 || window > 0x4000)
-               return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
-                                 MGMT_STATUS_INVALID_PARAMS);
+               return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
+                                      MGMT_STATUS_INVALID_PARAMS);
 
        if (window > interval)
-               return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
-                                 MGMT_STATUS_INVALID_PARAMS);
+               return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
+                                      MGMT_STATUS_INVALID_PARAMS);
 
        hci_dev_lock(hdev);
 
        hdev->le_scan_interval = interval;
        hdev->le_scan_window = window;
 
-       err = cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0, NULL, 0);
+       err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
+                               NULL, 0);
 
        /* If background scan is running, restart it so new parameters are
         * loaded.
         */
-       if (test_bit(HCI_LE_SCAN, &hdev->dev_flags) &&
+       if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
            hdev->discovery.state == DISCOVERY_STOPPED) {
                struct hci_request req;
 
@@ -4554,26 +4912,26 @@ static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
 static void fast_connectable_complete(struct hci_dev *hdev, u8 status,
                                      u16 opcode)
 {
-       struct pending_cmd *cmd;
+       struct mgmt_pending_cmd *cmd;
 
        BT_DBG("status 0x%02x", status);
 
        hci_dev_lock(hdev);
 
-       cmd = mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
+       cmd = pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
        if (!cmd)
                goto unlock;
 
        if (status) {
-               cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
-                          mgmt_status(status));
+               mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
+                               mgmt_status(status));
        } else {
                struct mgmt_mode *cp = cmd->param;
 
                if (cp->val)
-                       set_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
+                       hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
                else
-                       clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
+                       hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
 
                send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
                new_settings(hdev, cmd->sk);
@@ -4589,40 +4947,40 @@ static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
                                void *data, u16 len)
 {
        struct mgmt_mode *cp = data;
-       struct pending_cmd *cmd;
+       struct mgmt_pending_cmd *cmd;
        struct hci_request req;
        int err;
 
        BT_DBG("%s", hdev->name);
 
-       if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) ||
+       if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
            hdev->hci_ver < BLUETOOTH_VER_1_2)
-               return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
-                                 MGMT_STATUS_NOT_SUPPORTED);
+               return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
+                                      MGMT_STATUS_NOT_SUPPORTED);
 
        if (cp->val != 0x00 && cp->val != 0x01)
-               return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
-                                 MGMT_STATUS_INVALID_PARAMS);
-
-       if (!hdev_is_powered(hdev))
-               return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
-                                 MGMT_STATUS_NOT_POWERED);
-
-       if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
-               return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
-                                 MGMT_STATUS_REJECTED);
+               return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
+                                      MGMT_STATUS_INVALID_PARAMS);
 
        hci_dev_lock(hdev);
 
-       if (mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
-               err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
-                                MGMT_STATUS_BUSY);
+       if (pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
+               err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
+                                     MGMT_STATUS_BUSY);
+               goto unlock;
+       }
+
+       if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
+               err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
+                                       hdev);
                goto unlock;
        }
 
-       if (!!cp->val == test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags)) {
+       if (!hdev_is_powered(hdev)) {
+               hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
                err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
                                        hdev);
+               new_settings(hdev, sk);
                goto unlock;
        }
 
@@ -4639,8 +4997,8 @@ static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
 
        err = hci_req_run(&req, fast_connectable_complete);
        if (err < 0) {
-               err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
-                                MGMT_STATUS_FAILED);
+               err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
+                                     MGMT_STATUS_FAILED);
                mgmt_pending_remove(cmd);
        }
 
@@ -4652,13 +5010,13 @@ unlock:
 
 static void set_bredr_complete(struct hci_dev *hdev, u8 status, u16 opcode)
 {
-       struct pending_cmd *cmd;
+       struct mgmt_pending_cmd *cmd;
 
        BT_DBG("status 0x%02x", status);
 
        hci_dev_lock(hdev);
 
-       cmd = mgmt_pending_find(MGMT_OP_SET_BREDR, hdev);
+       cmd = pending_find(MGMT_OP_SET_BREDR, hdev);
        if (!cmd)
                goto unlock;
 
@@ -4668,9 +5026,9 @@ static void set_bredr_complete(struct hci_dev *hdev, u8 status, u16 opcode)
                /* We need to restore the flag if related HCI commands
                 * failed.
                 */
-               clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
+               hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
 
-               cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
+               mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
        } else {
                send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
                new_settings(hdev, cmd->sk);
@@ -4685,41 +5043,41 @@ unlock:
 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
 {
        struct mgmt_mode *cp = data;
-       struct pending_cmd *cmd;
+       struct mgmt_pending_cmd *cmd;
        struct hci_request req;
        int err;
 
        BT_DBG("request for %s", hdev->name);
 
        if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
-               return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
-                                 MGMT_STATUS_NOT_SUPPORTED);
+               return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
+                                      MGMT_STATUS_NOT_SUPPORTED);
 
-       if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
-               return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
-                                 MGMT_STATUS_REJECTED);
+       if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
+               return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
+                                      MGMT_STATUS_REJECTED);
 
        if (cp->val != 0x00 && cp->val != 0x01)
-               return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
-                                 MGMT_STATUS_INVALID_PARAMS);
+               return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
+                                      MGMT_STATUS_INVALID_PARAMS);
 
        hci_dev_lock(hdev);
 
-       if (cp->val == test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
+       if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
                err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
                goto unlock;
        }
 
        if (!hdev_is_powered(hdev)) {
                if (!cp->val) {
-                       clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
-                       clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
-                       clear_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
-                       clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
-                       clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
+                       hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
+                       hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
+                       hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
+                       hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
+                       hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
                }
 
-               change_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
+               hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
 
                err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
                if (err < 0)
@@ -4731,8 +5089,8 @@ static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
 
        /* Reject disabling when powered on */
        if (!cp->val) {
-               err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
-                                MGMT_STATUS_REJECTED);
+               err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
+                                     MGMT_STATUS_REJECTED);
                goto unlock;
        } else {
                /* When configuring a dual-mode controller to operate
@@ -4749,18 +5107,18 @@ static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
                 * switching BR/EDR back on when secure connections has been
                 * enabled is not a supported transaction.
                 */
-               if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) &&
+               if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
                    (bacmp(&hdev->static_addr, BDADDR_ANY) ||
-                    test_bit(HCI_SC_ENABLED, &hdev->dev_flags))) {
-                       err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
-                                        MGMT_STATUS_REJECTED);
+                    hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
+                       err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
+                                             MGMT_STATUS_REJECTED);
                        goto unlock;
                }
        }
 
-       if (mgmt_pending_find(MGMT_OP_SET_BREDR, hdev)) {
-               err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
-                                MGMT_STATUS_BUSY);
+       if (pending_find(MGMT_OP_SET_BREDR, hdev)) {
+               err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
+                                     MGMT_STATUS_BUSY);
                goto unlock;
        }
 
@@ -4773,7 +5131,7 @@ static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
        /* We need to flip the bit already here so that update_adv_data
         * generates the correct flags.
         */
-       set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
+       hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
 
        hci_req_init(&req, hdev);
 
@@ -4796,20 +5154,20 @@ unlock:
 
 static void sc_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
 {
-       struct pending_cmd *cmd;
+       struct mgmt_pending_cmd *cmd;
        struct mgmt_mode *cp;
 
        BT_DBG("%s status %u", hdev->name, status);
 
        hci_dev_lock(hdev);
 
-       cmd = mgmt_pending_find(MGMT_OP_SET_SECURE_CONN, hdev);
+       cmd = pending_find(MGMT_OP_SET_SECURE_CONN, hdev);
        if (!cmd)
                goto unlock;
 
        if (status) {
-               cmd_status(cmd->sk, cmd->index, cmd->opcode,
-                          mgmt_status(status));
+               mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
+                               mgmt_status(status));
                goto remove;
        }
 
@@ -4817,16 +5175,16 @@ static void sc_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
 
        switch (cp->val) {
        case 0x00:
-               clear_bit(HCI_SC_ENABLED, &hdev->dev_flags);
-               clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
+               hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
+               hci_dev_clear_flag(hdev, HCI_SC_ONLY);
                break;
        case 0x01:
-               set_bit(HCI_SC_ENABLED, &hdev->dev_flags);
-               clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
+               hci_dev_set_flag(hdev, HCI_SC_ENABLED);
+               hci_dev_clear_flag(hdev, HCI_SC_ONLY);
                break;
        case 0x02:
-               set_bit(HCI_SC_ENABLED, &hdev->dev_flags);
-               set_bit(HCI_SC_ONLY, &hdev->dev_flags);
+               hci_dev_set_flag(hdev, HCI_SC_ENABLED);
+               hci_dev_set_flag(hdev, HCI_SC_ONLY);
                break;
        }
 
@@ -4843,7 +5201,7 @@ static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
                           void *data, u16 len)
 {
        struct mgmt_mode *cp = data;
-       struct pending_cmd *cmd;
+       struct mgmt_pending_cmd *cmd;
        struct hci_request req;
        u8 val;
        int err;
@@ -4851,37 +5209,37 @@ static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
        BT_DBG("request for %s", hdev->name);
 
        if (!lmp_sc_capable(hdev) &&
-           !test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
-               return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
-                                 MGMT_STATUS_NOT_SUPPORTED);
+           !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
+               return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
+                                      MGMT_STATUS_NOT_SUPPORTED);
 
-       if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) &&
+       if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
            lmp_sc_capable(hdev) &&
-           !test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
-               return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
-                                 MGMT_STATUS_REJECTED);
+           !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
+               return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
+                                      MGMT_STATUS_REJECTED);
 
        if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
-               return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
+               return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
                                  MGMT_STATUS_INVALID_PARAMS);
 
        hci_dev_lock(hdev);
 
        if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
-           !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
+           !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
                bool changed;
 
                if (cp->val) {
-                       changed = !test_and_set_bit(HCI_SC_ENABLED,
-                                                   &hdev->dev_flags);
+                       changed = !hci_dev_test_and_set_flag(hdev,
+                                                            HCI_SC_ENABLED);
                        if (cp->val == 0x02)
-                               set_bit(HCI_SC_ONLY, &hdev->dev_flags);
+                               hci_dev_set_flag(hdev, HCI_SC_ONLY);
                        else
-                               clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
+                               hci_dev_clear_flag(hdev, HCI_SC_ONLY);
                } else {
-                       changed = test_and_clear_bit(HCI_SC_ENABLED,
-                                                    &hdev->dev_flags);
-                       clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
+                       changed = hci_dev_test_and_clear_flag(hdev,
+                                                             HCI_SC_ENABLED);
+                       hci_dev_clear_flag(hdev, HCI_SC_ONLY);
                }
 
                err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
@@ -4894,16 +5252,16 @@ static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
                goto failed;
        }
 
-       if (mgmt_pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) {
-               err = cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
-                                MGMT_STATUS_BUSY);
+       if (pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) {
+               err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
+                                     MGMT_STATUS_BUSY);
                goto failed;
        }
 
        val = !!cp->val;
 
-       if (val == test_bit(HCI_SC_ENABLED, &hdev->dev_flags) &&
-           (cp->val == 0x02) == test_bit(HCI_SC_ONLY, &hdev->dev_flags)) {
+       if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
+           (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
                err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
                goto failed;
        }
@@ -4937,27 +5295,26 @@ static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
        BT_DBG("request for %s", hdev->name);
 
        if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
-               return cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
-                                 MGMT_STATUS_INVALID_PARAMS);
+               return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
+                                      MGMT_STATUS_INVALID_PARAMS);
 
        hci_dev_lock(hdev);
 
        if (cp->val)
-               changed = !test_and_set_bit(HCI_KEEP_DEBUG_KEYS,
-                                           &hdev->dev_flags);
+               changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
        else
-               changed = test_and_clear_bit(HCI_KEEP_DEBUG_KEYS,
-                                            &hdev->dev_flags);
+               changed = hci_dev_test_and_clear_flag(hdev,
+                                                     HCI_KEEP_DEBUG_KEYS);
 
        if (cp->val == 0x02)
-               use_changed = !test_and_set_bit(HCI_USE_DEBUG_KEYS,
-                                               &hdev->dev_flags);
+               use_changed = !hci_dev_test_and_set_flag(hdev,
+                                                        HCI_USE_DEBUG_KEYS);
        else
-               use_changed = test_and_clear_bit(HCI_USE_DEBUG_KEYS,
-                                                &hdev->dev_flags);
+               use_changed = hci_dev_test_and_clear_flag(hdev,
+                                                         HCI_USE_DEBUG_KEYS);
 
        if (hdev_is_powered(hdev) && use_changed &&
-           test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
+           hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
                u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
                hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
                             sizeof(mode), &mode);
@@ -4985,32 +5342,32 @@ static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
        BT_DBG("request for %s", hdev->name);
 
        if (!lmp_le_capable(hdev))
-               return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
-                                 MGMT_STATUS_NOT_SUPPORTED);
+               return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
+                                      MGMT_STATUS_NOT_SUPPORTED);
 
        if (cp->privacy != 0x00 && cp->privacy != 0x01)
-               return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
-                                 MGMT_STATUS_INVALID_PARAMS);
+               return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
+                                      MGMT_STATUS_INVALID_PARAMS);
 
        if (hdev_is_powered(hdev))
-               return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
-                                 MGMT_STATUS_REJECTED);
+               return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
+                                      MGMT_STATUS_REJECTED);
 
        hci_dev_lock(hdev);
 
        /* If user space supports this command it is also expected to
         * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
         */
-       set_bit(HCI_RPA_RESOLVING, &hdev->dev_flags);
+       hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
 
        if (cp->privacy) {
-               changed = !test_and_set_bit(HCI_PRIVACY, &hdev->dev_flags);
+               changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
                memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
-               set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
+               hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
        } else {
-               changed = test_and_clear_bit(HCI_PRIVACY, &hdev->dev_flags);
+               changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
                memset(hdev->irk, 0, sizeof(hdev->irk));
-               clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
+               hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
        }
 
        err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
@@ -5053,22 +5410,22 @@ static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
        BT_DBG("request for %s", hdev->name);
 
        if (!lmp_le_capable(hdev))
-               return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
-                                 MGMT_STATUS_NOT_SUPPORTED);
+               return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
+                                      MGMT_STATUS_NOT_SUPPORTED);
 
        irk_count = __le16_to_cpu(cp->irk_count);
        if (irk_count > max_irk_count) {
                BT_ERR("load_irks: too big irk_count value %u", irk_count);
-               return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
-                                 MGMT_STATUS_INVALID_PARAMS);
+               return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
+                                      MGMT_STATUS_INVALID_PARAMS);
        }
 
        expected_len = sizeof(*cp) + irk_count * sizeof(struct mgmt_irk_info);
        if (expected_len != len) {
                BT_ERR("load_irks: expected %u bytes, got %u bytes",
                       expected_len, len);
-               return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
-                                 MGMT_STATUS_INVALID_PARAMS);
+               return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
+                                      MGMT_STATUS_INVALID_PARAMS);
        }
 
        BT_DBG("%s irk_count %u", hdev->name, irk_count);
@@ -5077,9 +5434,9 @@ static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
                struct mgmt_irk_info *key = &cp->irks[i];
 
                if (!irk_is_valid(key))
-                       return cmd_status(sk, hdev->id,
-                                         MGMT_OP_LOAD_IRKS,
-                                         MGMT_STATUS_INVALID_PARAMS);
+                       return mgmt_cmd_status(sk, hdev->id,
+                                              MGMT_OP_LOAD_IRKS,
+                                              MGMT_STATUS_INVALID_PARAMS);
        }
 
        hci_dev_lock(hdev);
@@ -5099,9 +5456,9 @@ static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
                            BDADDR_ANY);
        }
 
-       set_bit(HCI_RPA_RESOLVING, &hdev->dev_flags);
+       hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
 
-       err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
+       err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
 
        hci_dev_unlock(hdev);
 
@@ -5139,14 +5496,14 @@ static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
        BT_DBG("request for %s", hdev->name);
 
        if (!lmp_le_capable(hdev))
-               return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
-                                 MGMT_STATUS_NOT_SUPPORTED);
+               return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
+                                      MGMT_STATUS_NOT_SUPPORTED);
 
        key_count = __le16_to_cpu(cp->key_count);
        if (key_count > max_key_count) {
                BT_ERR("load_ltks: too big key_count value %u", key_count);
-               return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
-                                 MGMT_STATUS_INVALID_PARAMS);
+               return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
+                                      MGMT_STATUS_INVALID_PARAMS);
        }
 
        expected_len = sizeof(*cp) + key_count *
@@ -5154,8 +5511,8 @@ static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
        if (expected_len != len) {
                BT_ERR("load_keys: expected %u bytes, got %u bytes",
                       expected_len, len);
-               return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
-                                 MGMT_STATUS_INVALID_PARAMS);
+               return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
+                                      MGMT_STATUS_INVALID_PARAMS);
        }
 
        BT_DBG("%s key_count %u", hdev->name, key_count);
@@ -5164,9 +5521,9 @@ static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
                struct mgmt_ltk_info *key = &cp->keys[i];
 
                if (!ltk_is_valid(key))
-                       return cmd_status(sk, hdev->id,
-                                         MGMT_OP_LOAD_LONG_TERM_KEYS,
-                                         MGMT_STATUS_INVALID_PARAMS);
+                       return mgmt_cmd_status(sk, hdev->id,
+                                              MGMT_OP_LOAD_LONG_TERM_KEYS,
+                                              MGMT_STATUS_INVALID_PARAMS);
        }
 
        hci_dev_lock(hdev);
@@ -5211,7 +5568,7 @@ static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
                            key->rand);
        }
 
-       err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
+       err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
                           NULL, 0);
 
        hci_dev_unlock(hdev);
@@ -5219,7 +5576,7 @@ static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
        return err;
 }
 
-static int conn_info_cmd_complete(struct pending_cmd *cmd, u8 status)
+static int conn_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
 {
        struct hci_conn *conn = cmd->user_data;
        struct mgmt_rp_get_conn_info rp;
@@ -5237,8 +5594,8 @@ static int conn_info_cmd_complete(struct pending_cmd *cmd, u8 status)
                rp.max_tx_power = HCI_TX_POWER_INVALID;
        }
 
-       err = cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO, status,
-                          &rp, sizeof(rp));
+       err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO,
+                               status, &rp, sizeof(rp));
 
        hci_conn_drop(conn);
        hci_conn_put(conn);
@@ -5250,7 +5607,7 @@ static void conn_info_refresh_complete(struct hci_dev *hdev, u8 hci_status,
                                       u16 opcode)
 {
        struct hci_cp_read_rssi *cp;
-       struct pending_cmd *cmd;
+       struct mgmt_pending_cmd *cmd;
        struct hci_conn *conn;
        u16 handle;
        u8 status;
@@ -5288,7 +5645,7 @@ static void conn_info_refresh_complete(struct hci_dev *hdev, u8 hci_status,
                goto unlock;
        }
 
-       cmd = mgmt_pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn);
+       cmd = pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn);
        if (!cmd)
                goto unlock;
 
@@ -5315,15 +5672,16 @@ static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
        rp.addr.type = cp->addr.type;
 
        if (!bdaddr_type_is_valid(cp->addr.type))
-               return cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
-                                   MGMT_STATUS_INVALID_PARAMS,
-                                   &rp, sizeof(rp));
+               return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
+                                        MGMT_STATUS_INVALID_PARAMS,
+                                        &rp, sizeof(rp));
 
        hci_dev_lock(hdev);
 
        if (!hdev_is_powered(hdev)) {
-               err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
-                                  MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
+               err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
+                                       MGMT_STATUS_NOT_POWERED, &rp,
+                                       sizeof(rp));
                goto unlock;
        }
 
@@ -5334,14 +5692,15 @@ static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
                conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
 
        if (!conn || conn->state != BT_CONNECTED) {
-               err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
-                                  MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
+               err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
+                                       MGMT_STATUS_NOT_CONNECTED, &rp,
+                                       sizeof(rp));
                goto unlock;
        }
 
-       if (mgmt_pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn)) {
-               err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
-                                  MGMT_STATUS_BUSY, &rp, sizeof(rp));
+       if (pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn)) {
+               err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
+                                       MGMT_STATUS_BUSY, &rp, sizeof(rp));
                goto unlock;
        }
 
@@ -5361,7 +5720,7 @@ static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
                struct hci_request req;
                struct hci_cp_read_tx_power req_txp_cp;
                struct hci_cp_read_rssi req_rssi_cp;
-               struct pending_cmd *cmd;
+               struct mgmt_pending_cmd *cmd;
 
                hci_req_init(&req, hdev);
                req_rssi_cp.handle = cpu_to_le16(conn->handle);
@@ -5409,8 +5768,8 @@ static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
                rp.tx_power = conn->tx_power;
                rp.max_tx_power = conn->max_tx_power;
 
-               err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
-                                  MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
+               err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
+                                       MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
        }
 
 unlock:
@@ -5418,7 +5777,7 @@ unlock:
        return err;
 }
 
-static int clock_info_cmd_complete(struct pending_cmd *cmd, u8 status)
+static int clock_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
 {
        struct hci_conn *conn = cmd->user_data;
        struct mgmt_rp_get_clock_info rp;
@@ -5443,8 +5802,8 @@ static int clock_info_cmd_complete(struct pending_cmd *cmd, u8 status)
        }
 
 complete:
-       err = cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
-                          sizeof(rp));
+       err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
+                               sizeof(rp));
 
        if (conn) {
                hci_conn_drop(conn);
@@ -5457,7 +5816,7 @@ complete:
 static void get_clock_info_complete(struct hci_dev *hdev, u8 status, u16 opcode)
 {
        struct hci_cp_read_clock *hci_cp;
-       struct pending_cmd *cmd;
+       struct mgmt_pending_cmd *cmd;
        struct hci_conn *conn;
 
        BT_DBG("%s status %u", hdev->name, status);
@@ -5475,7 +5834,7 @@ static void get_clock_info_complete(struct hci_dev *hdev, u8 status, u16 opcode)
                conn = NULL;
        }
 
-       cmd = mgmt_pending_find_data(MGMT_OP_GET_CLOCK_INFO, hdev, conn);
+       cmd = pending_find_data(MGMT_OP_GET_CLOCK_INFO, hdev, conn);
        if (!cmd)
                goto unlock;
 
@@ -5492,7 +5851,7 @@ static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
        struct mgmt_cp_get_clock_info *cp = data;
        struct mgmt_rp_get_clock_info rp;
        struct hci_cp_read_clock hci_cp;
-       struct pending_cmd *cmd;
+       struct mgmt_pending_cmd *cmd;
        struct hci_request req;
        struct hci_conn *conn;
        int err;
@@ -5504,15 +5863,16 @@ static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
        rp.addr.type = cp->addr.type;
 
        if (cp->addr.type != BDADDR_BREDR)
-               return cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
-                                   MGMT_STATUS_INVALID_PARAMS,
-                                   &rp, sizeof(rp));
+               return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
+                                        MGMT_STATUS_INVALID_PARAMS,
+                                        &rp, sizeof(rp));
 
        hci_dev_lock(hdev);
 
        if (!hdev_is_powered(hdev)) {
-               err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
-                                  MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
+               err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
+                                       MGMT_STATUS_NOT_POWERED, &rp,
+                                       sizeof(rp));
                goto unlock;
        }
 
@@ -5520,10 +5880,10 @@ static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
                conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
                                               &cp->addr.bdaddr);
                if (!conn || conn->state != BT_CONNECTED) {
-                       err = cmd_complete(sk, hdev->id,
-                                          MGMT_OP_GET_CLOCK_INFO,
-                                          MGMT_STATUS_NOT_CONNECTED,
-                                          &rp, sizeof(rp));
+                       err = mgmt_cmd_complete(sk, hdev->id,
+                                               MGMT_OP_GET_CLOCK_INFO,
+                                               MGMT_STATUS_NOT_CONNECTED,
+                                               &rp, sizeof(rp));
                        goto unlock;
                }
        } else {
@@ -5634,13 +5994,13 @@ static void device_added(struct sock *sk, struct hci_dev *hdev,
 
 static void add_device_complete(struct hci_dev *hdev, u8 status, u16 opcode)
 {
-       struct pending_cmd *cmd;
+       struct mgmt_pending_cmd *cmd;
 
        BT_DBG("status 0x%02x", status);
 
        hci_dev_lock(hdev);
 
-       cmd = mgmt_pending_find(MGMT_OP_ADD_DEVICE, hdev);
+       cmd = pending_find(MGMT_OP_ADD_DEVICE, hdev);
        if (!cmd)
                goto unlock;
 
@@ -5655,7 +6015,7 @@ static int add_device(struct sock *sk, struct hci_dev *hdev,
                      void *data, u16 len)
 {
        struct mgmt_cp_add_device *cp = data;
-       struct pending_cmd *cmd;
+       struct mgmt_pending_cmd *cmd;
        struct hci_request req;
        u8 auto_conn, addr_type;
        int err;
@@ -5664,14 +6024,14 @@ static int add_device(struct sock *sk, struct hci_dev *hdev,
 
        if (!bdaddr_type_is_valid(cp->addr.type) ||
            !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
-               return cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
-                                   MGMT_STATUS_INVALID_PARAMS,
-                                   &cp->addr, sizeof(cp->addr));
+               return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
+                                        MGMT_STATUS_INVALID_PARAMS,
+                                        &cp->addr, sizeof(cp->addr));
 
        if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
-               return cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
-                                   MGMT_STATUS_INVALID_PARAMS,
-                                   &cp->addr, sizeof(cp->addr));
+               return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
+                                        MGMT_STATUS_INVALID_PARAMS,
+                                        &cp->addr, sizeof(cp->addr));
 
        hci_req_init(&req, hdev);
 
@@ -5757,13 +6117,13 @@ static void device_removed(struct sock *sk, struct hci_dev *hdev,
 
 static void remove_device_complete(struct hci_dev *hdev, u8 status, u16 opcode)
 {
-       struct pending_cmd *cmd;
+       struct mgmt_pending_cmd *cmd;
 
        BT_DBG("status 0x%02x", status);
 
        hci_dev_lock(hdev);
 
-       cmd = mgmt_pending_find(MGMT_OP_REMOVE_DEVICE, hdev);
+       cmd = pending_find(MGMT_OP_REMOVE_DEVICE, hdev);
        if (!cmd)
                goto unlock;
 
@@ -5778,7 +6138,7 @@ static int remove_device(struct sock *sk, struct hci_dev *hdev,
                         void *data, u16 len)
 {
        struct mgmt_cp_remove_device *cp = data;
-       struct pending_cmd *cmd;
+       struct mgmt_pending_cmd *cmd;
        struct hci_request req;
        int err;
 
@@ -5911,15 +6271,15 @@ static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
        int i;
 
        if (!lmp_le_capable(hdev))
-               return cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
-                                 MGMT_STATUS_NOT_SUPPORTED);
+               return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
+                                      MGMT_STATUS_NOT_SUPPORTED);
 
        param_count = __le16_to_cpu(cp->param_count);
        if (param_count > max_param_count) {
                BT_ERR("load_conn_param: too big param_count value %u",
                       param_count);
-               return cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
-                                 MGMT_STATUS_INVALID_PARAMS);
+               return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
+                                      MGMT_STATUS_INVALID_PARAMS);
        }
 
        expected_len = sizeof(*cp) + param_count *
@@ -5927,8 +6287,8 @@ static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
        if (expected_len != len) {
                BT_ERR("load_conn_param: expected %u bytes, got %u bytes",
                       expected_len, len);
-               return cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
-                                 MGMT_STATUS_INVALID_PARAMS);
+               return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
+                                      MGMT_STATUS_INVALID_PARAMS);
        }
 
        BT_DBG("%s param_count %u", hdev->name, param_count);
@@ -5981,328 +6341,816 @@ static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
                hci_param->supervision_timeout = timeout;
        }
 
-       hci_dev_unlock(hdev);
+       hci_dev_unlock(hdev);
+
+       return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
+                                NULL, 0);
+}
+
+static int set_external_config(struct sock *sk, struct hci_dev *hdev,
+                              void *data, u16 len)
+{
+       struct mgmt_cp_set_external_config *cp = data;
+       bool changed;
+       int err;
+
+       BT_DBG("%s", hdev->name);
+
+       if (hdev_is_powered(hdev))
+               return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
+                                      MGMT_STATUS_REJECTED);
+
+       if (cp->config != 0x00 && cp->config != 0x01)
+               return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
+                                        MGMT_STATUS_INVALID_PARAMS);
+
+       if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
+               return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
+                                      MGMT_STATUS_NOT_SUPPORTED);
+
+       hci_dev_lock(hdev);
+
+       if (cp->config)
+               changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
+       else
+               changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
+
+       err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
+       if (err < 0)
+               goto unlock;
+
+       if (!changed)
+               goto unlock;
+
+       err = new_options(hdev, sk);
+
+       if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
+               mgmt_index_removed(hdev);
+
+               if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
+                       hci_dev_set_flag(hdev, HCI_CONFIG);
+                       hci_dev_set_flag(hdev, HCI_AUTO_OFF);
+
+                       queue_work(hdev->req_workqueue, &hdev->power_on);
+               } else {
+                       set_bit(HCI_RAW, &hdev->flags);
+                       mgmt_index_added(hdev);
+               }
+       }
+
+unlock:
+       hci_dev_unlock(hdev);
+       return err;
+}
+
+static int set_public_address(struct sock *sk, struct hci_dev *hdev,
+                             void *data, u16 len)
+{
+       struct mgmt_cp_set_public_address *cp = data;
+       bool changed;
+       int err;
+
+       BT_DBG("%s", hdev->name);
+
+       if (hdev_is_powered(hdev))
+               return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
+                                      MGMT_STATUS_REJECTED);
+
+       if (!bacmp(&cp->bdaddr, BDADDR_ANY))
+               return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
+                                      MGMT_STATUS_INVALID_PARAMS);
+
+       if (!hdev->set_bdaddr)
+               return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
+                                      MGMT_STATUS_NOT_SUPPORTED);
+
+       hci_dev_lock(hdev);
+
+       changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
+       bacpy(&hdev->public_addr, &cp->bdaddr);
+
+       err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
+       if (err < 0)
+               goto unlock;
+
+       if (!changed)
+               goto unlock;
+
+       if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
+               err = new_options(hdev, sk);
+
+       if (is_configured(hdev)) {
+               mgmt_index_removed(hdev);
+
+               hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
+
+               hci_dev_set_flag(hdev, HCI_CONFIG);
+               hci_dev_set_flag(hdev, HCI_AUTO_OFF);
+
+               queue_work(hdev->req_workqueue, &hdev->power_on);
+       }
+
+unlock:
+       hci_dev_unlock(hdev);
+       return err;
+}
+
+static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, u8 *data,
+                                 u8 data_len)
+{
+       eir[eir_len++] = sizeof(type) + data_len;
+       eir[eir_len++] = type;
+       memcpy(&eir[eir_len], data, data_len);
+       eir_len += data_len;
+
+       return eir_len;
+}
+
+static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
+                                  void *data, u16 data_len)
+{
+       struct mgmt_cp_read_local_oob_ext_data *cp = data;
+       struct mgmt_rp_read_local_oob_ext_data *rp;
+       size_t rp_len;
+       u16 eir_len;
+       u8 status, flags, role, addr[7], hash[16], rand[16];
+       int err;
+
+       BT_DBG("%s", hdev->name);
+
+       if (hdev_is_powered(hdev)) {
+               switch (cp->type) {
+               case BIT(BDADDR_BREDR):
+                       status = mgmt_bredr_support(hdev);
+                       if (status)
+                               eir_len = 0;
+                       else
+                               eir_len = 5;
+                       break;
+               case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
+                       status = mgmt_le_support(hdev);
+                       if (status)
+                               eir_len = 0;
+                       else
+                               eir_len = 9 + 3 + 18 + 18 + 3;
+                       break;
+               default:
+                       status = MGMT_STATUS_INVALID_PARAMS;
+                       eir_len = 0;
+                       break;
+               }
+       } else {
+               status = MGMT_STATUS_NOT_POWERED;
+               eir_len = 0;
+       }
+
+       rp_len = sizeof(*rp) + eir_len;
+       rp = kmalloc(rp_len, GFP_ATOMIC);
+       if (!rp)
+               return -ENOMEM;
+
+       if (status)
+               goto complete;
+
+       hci_dev_lock(hdev);
+
+       eir_len = 0;
+       switch (cp->type) {
+       case BIT(BDADDR_BREDR):
+               eir_len = eir_append_data(rp->eir, eir_len, EIR_CLASS_OF_DEV,
+                                         hdev->dev_class, 3);
+               break;
+       case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
+               if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
+                   smp_generate_oob(hdev, hash, rand) < 0) {
+                       hci_dev_unlock(hdev);
+                       status = MGMT_STATUS_FAILED;
+                       goto complete;
+               }
+
+               /* This should return the active RPA, but since the RPA
+                * is only programmed on demand, it is really hard to fill
+                * this in at the moment. For now disallow retrieving
+                * local out-of-band data when privacy is in use.
+                *
+                * Returning the identity address will not help here since
+                * pairing happens before the identity resolving key is
+                * known and thus the connection establishment happens
+                * based on the RPA and not the identity address.
+                */
+               if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
+                       hci_dev_unlock(hdev);
+                       status = MGMT_STATUS_REJECTED;
+                       goto complete;
+               }
+
+               if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
+                  !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
+                  (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
+                   bacmp(&hdev->static_addr, BDADDR_ANY))) {
+                       memcpy(addr, &hdev->static_addr, 6);
+                       addr[6] = 0x01;
+               } else {
+                       memcpy(addr, &hdev->bdaddr, 6);
+                       addr[6] = 0x00;
+               }
+
+               eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
+                                         addr, sizeof(addr));
+
+               if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
+                       role = 0x02;
+               else
+                       role = 0x01;
+
+               eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
+                                         &role, sizeof(role));
+
+               if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
+                       eir_len = eir_append_data(rp->eir, eir_len,
+                                                 EIR_LE_SC_CONFIRM,
+                                                 hash, sizeof(hash));
+
+                       eir_len = eir_append_data(rp->eir, eir_len,
+                                                 EIR_LE_SC_RANDOM,
+                                                 rand, sizeof(rand));
+               }
+
+               flags = get_adv_discov_flags(hdev);
+
+               if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
+                       flags |= LE_AD_NO_BREDR;
+
+               eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
+                                         &flags, sizeof(flags));
+               break;
+       }
+
+       hci_dev_unlock(hdev);
+
+       hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
+
+       status = MGMT_STATUS_SUCCESS;
+
+complete:
+       rp->type = cp->type;
+       rp->eir_len = cpu_to_le16(eir_len);
+
+       err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
+                               status, rp, sizeof(*rp) + eir_len);
+       if (err < 0 || status)
+               goto done;
+
+       err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
+                                rp, sizeof(*rp) + eir_len,
+                                HCI_MGMT_OOB_DATA_EVENTS, sk);
+
+done:
+       kfree(rp);
+
+       return err;
+}
+
+static u32 get_supported_adv_flags(struct hci_dev *hdev)
+{
+       u32 flags = 0;
+
+       flags |= MGMT_ADV_FLAG_CONNECTABLE;
+       flags |= MGMT_ADV_FLAG_DISCOV;
+       flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
+       flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;
+
+       if (hdev->adv_tx_power != HCI_TX_POWER_INVALID)
+               flags |= MGMT_ADV_FLAG_TX_POWER;
+
+       return flags;
+}
+
+static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
+                            void *data, u16 data_len)
+{
+       struct mgmt_rp_read_adv_features *rp;
+       size_t rp_len;
+       int err;
+       bool instance;
+       u32 supported_flags;
+
+       BT_DBG("%s", hdev->name);
+
+       if (!lmp_le_capable(hdev))
+               return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
+                                      MGMT_STATUS_REJECTED);
+
+       hci_dev_lock(hdev);
+
+       rp_len = sizeof(*rp);
+
+       /* Currently only one instance is supported, so just add 1 to the
+        * response length.
+        */
+       instance = hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE);
+       if (instance)
+               rp_len++;
+
+       rp = kmalloc(rp_len, GFP_ATOMIC);
+       if (!rp) {
+               hci_dev_unlock(hdev);
+               return -ENOMEM;
+       }
+
+       supported_flags = get_supported_adv_flags(hdev);
+
+       rp->supported_flags = cpu_to_le32(supported_flags);
+       rp->max_adv_data_len = HCI_MAX_AD_LENGTH;
+       rp->max_scan_rsp_len = HCI_MAX_AD_LENGTH;
+       rp->max_instances = 1;
+
+       /* Currently only one instance is supported, so simply return the
+        * current instance number.
+        */
+       if (instance) {
+               rp->num_instances = 1;
+               rp->instance[0] = 1;
+       } else {
+               rp->num_instances = 0;
+       }
+
+       hci_dev_unlock(hdev);
+
+       err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
+                               MGMT_STATUS_SUCCESS, rp, rp_len);
+
+       kfree(rp);
+
+       return err;
+}
+
+static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
+                             u8 len, bool is_adv_data)
+{
+       u8 max_len = HCI_MAX_AD_LENGTH;
+       int i, cur_len;
+       bool flags_managed = false;
+       bool tx_power_managed = false;
+       u32 flags_params = MGMT_ADV_FLAG_DISCOV | MGMT_ADV_FLAG_LIMITED_DISCOV |
+                          MGMT_ADV_FLAG_MANAGED_FLAGS;
+
+       if (is_adv_data && (adv_flags & flags_params)) {
+               flags_managed = true;
+               max_len -= 3;
+       }
+
+       if (is_adv_data && (adv_flags & MGMT_ADV_FLAG_TX_POWER)) {
+               tx_power_managed = true;
+               max_len -= 3;
+       }
+
+       if (len > max_len)
+               return false;
+
+       /* Make sure that the data is correctly formatted. */
+       for (i = 0, cur_len = 0; i < len; i += (cur_len + 1)) {
+               cur_len = data[i];
+
+               if (flags_managed && data[i + 1] == EIR_FLAGS)
+                       return false;
+
+               if (tx_power_managed && data[i + 1] == EIR_TX_POWER)
+                       return false;
+
+               /* If the current field length would exceed the total data
+                * length, then it's invalid.
+                */
+               if (i + cur_len >= len)
+                       return false;
+       }
+
+       return true;
+}
+
+static void add_advertising_complete(struct hci_dev *hdev, u8 status,
+                                    u16 opcode)
+{
+       struct mgmt_pending_cmd *cmd;
+       struct mgmt_rp_add_advertising rp;
+
+       BT_DBG("status %d", status);
+
+       hci_dev_lock(hdev);
+
+       cmd = pending_find(MGMT_OP_ADD_ADVERTISING, hdev);
+
+       if (status) {
+               hci_dev_clear_flag(hdev, HCI_ADVERTISING_INSTANCE);
+               memset(&hdev->adv_instance, 0, sizeof(hdev->adv_instance));
+               advertising_removed(cmd ? cmd->sk : NULL, hdev, 1);
+       }
+
+       if (!cmd)
+               goto unlock;
+
+       rp.instance = 0x01;
+
+       if (status)
+               mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
+                               mgmt_status(status));
+       else
+               mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
+                                 mgmt_status(status), &rp, sizeof(rp));
+
+       mgmt_pending_remove(cmd);
+
+unlock:
+       hci_dev_unlock(hdev);
+}
+
+static void adv_timeout_expired(struct work_struct *work)
+{
+       struct hci_dev *hdev = container_of(work, struct hci_dev,
+                                           adv_instance.timeout_exp.work);
+
+       hdev->adv_instance.timeout = 0;
 
-       return cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0, NULL, 0);
+       hci_dev_lock(hdev);
+       clear_adv_instance(hdev);
+       hci_dev_unlock(hdev);
 }
 
-static int set_external_config(struct sock *sk, struct hci_dev *hdev,
-                              void *data, u16 len)
+static int add_advertising(struct sock *sk, struct hci_dev *hdev,
+                          void *data, u16 data_len)
 {
-       struct mgmt_cp_set_external_config *cp = data;
-       bool changed;
+       struct mgmt_cp_add_advertising *cp = data;
+       struct mgmt_rp_add_advertising rp;
+       u32 flags;
+       u32 supported_flags;
+       u8 status;
+       u16 timeout;
        int err;
+       struct mgmt_pending_cmd *cmd;
+       struct hci_request req;
 
        BT_DBG("%s", hdev->name);
 
-       if (hdev_is_powered(hdev))
-               return cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
-                                 MGMT_STATUS_REJECTED);
+       status = mgmt_le_support(hdev);
+       if (status)
+               return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
+                                      status);
 
-       if (cp->config != 0x00 && cp->config != 0x01)
-               return cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
-                                   MGMT_STATUS_INVALID_PARAMS);
+       flags = __le32_to_cpu(cp->flags);
+       timeout = __le16_to_cpu(cp->timeout);
 
-       if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
-               return cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
-                                 MGMT_STATUS_NOT_SUPPORTED);
+       /* The current implementation only supports adding one instance and only
+        * a subset of the specified flags.
+        */
+       supported_flags = get_supported_adv_flags(hdev);
+       if (cp->instance != 0x01 || (flags & ~supported_flags))
+               return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
+                                      MGMT_STATUS_INVALID_PARAMS);
 
        hci_dev_lock(hdev);
 
-       if (cp->config)
-               changed = !test_and_set_bit(HCI_EXT_CONFIGURED,
-                                           &hdev->dev_flags);
-       else
-               changed = test_and_clear_bit(HCI_EXT_CONFIGURED,
-                                            &hdev->dev_flags);
+       if (timeout && !hdev_is_powered(hdev)) {
+               err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
+                                     MGMT_STATUS_REJECTED);
+               goto unlock;
+       }
 
-       err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
-       if (err < 0)
+       if (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
+           pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
+           pending_find(MGMT_OP_SET_LE, hdev)) {
+               err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
+                                     MGMT_STATUS_BUSY);
                goto unlock;
+       }
 
-       if (!changed)
+       if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
+           !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
+                              cp->scan_rsp_len, false)) {
+               err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
+                                     MGMT_STATUS_INVALID_PARAMS);
                goto unlock;
+       }
 
-       err = new_options(hdev, sk);
+       INIT_DELAYED_WORK(&hdev->adv_instance.timeout_exp, adv_timeout_expired);
 
-       if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) == is_configured(hdev)) {
-               mgmt_index_removed(hdev);
+       hdev->adv_instance.flags = flags;
+       hdev->adv_instance.adv_data_len = cp->adv_data_len;
+       hdev->adv_instance.scan_rsp_len = cp->scan_rsp_len;
 
-               if (test_and_change_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
-                       set_bit(HCI_CONFIG, &hdev->dev_flags);
-                       set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
+       if (cp->adv_data_len)
+               memcpy(hdev->adv_instance.adv_data, cp->data, cp->adv_data_len);
 
-                       queue_work(hdev->req_workqueue, &hdev->power_on);
-               } else {
-                       set_bit(HCI_RAW, &hdev->flags);
-                       mgmt_index_added(hdev);
-               }
-       }
+       if (cp->scan_rsp_len)
+               memcpy(hdev->adv_instance.scan_rsp_data,
+                      cp->data + cp->adv_data_len, cp->scan_rsp_len);
 
-unlock:
-       hci_dev_unlock(hdev);
-       return err;
-}
+       if (hdev->adv_instance.timeout)
+               cancel_delayed_work(&hdev->adv_instance.timeout_exp);
 
-static int set_public_address(struct sock *sk, struct hci_dev *hdev,
-                             void *data, u16 len)
-{
-       struct mgmt_cp_set_public_address *cp = data;
-       bool changed;
-       int err;
+       hdev->adv_instance.timeout = timeout;
 
-       BT_DBG("%s", hdev->name);
+       if (timeout)
+               queue_delayed_work(hdev->workqueue,
+                                  &hdev->adv_instance.timeout_exp,
+                                  msecs_to_jiffies(timeout * 1000));
 
-       if (hdev_is_powered(hdev))
-               return cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
-                                 MGMT_STATUS_REJECTED);
+       if (!hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING_INSTANCE))
+               advertising_added(sk, hdev, 1);
 
-       if (!bacmp(&cp->bdaddr, BDADDR_ANY))
-               return cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
-                                 MGMT_STATUS_INVALID_PARAMS);
+       /* If the HCI_ADVERTISING flag is set or the device isn't powered then
+        * we have no HCI communication to make. Simply return.
+        */
+       if (!hdev_is_powered(hdev) ||
+           hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
+               rp.instance = 0x01;
+               err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
+                                       MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
+               goto unlock;
+       }
 
-       if (!hdev->set_bdaddr)
-               return cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
-                                 MGMT_STATUS_NOT_SUPPORTED);
+       /* We're good to go, update advertising data, parameters, and start
+        * advertising.
+        */
+       cmd = mgmt_pending_add(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
+                              data_len);
+       if (!cmd) {
+               err = -ENOMEM;
+               goto unlock;
+       }
 
-       hci_dev_lock(hdev);
+       hci_req_init(&req, hdev);
 
-       changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
-       bacpy(&hdev->public_addr, &cp->bdaddr);
+       update_adv_data(&req);
+       update_scan_rsp_data(&req);
+       enable_advertising(&req);
 
-       err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
+       err = hci_req_run(&req, add_advertising_complete);
        if (err < 0)
-               goto unlock;
+               mgmt_pending_remove(cmd);
 
-       if (!changed)
-               goto unlock;
+unlock:
+       hci_dev_unlock(hdev);
 
-       if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
-               err = new_options(hdev, sk);
+       return err;
+}
 
-       if (is_configured(hdev)) {
-               mgmt_index_removed(hdev);
+static void remove_advertising_complete(struct hci_dev *hdev, u8 status,
+                                       u16 opcode)
+{
+       struct mgmt_pending_cmd *cmd;
+       struct mgmt_rp_remove_advertising rp;
 
-               clear_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
+       BT_DBG("status %d", status);
 
-               set_bit(HCI_CONFIG, &hdev->dev_flags);
-               set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
+       hci_dev_lock(hdev);
 
-               queue_work(hdev->req_workqueue, &hdev->power_on);
-       }
+       /* A failure status here only means that we failed to disable
+        * advertising. Otherwise, the advertising instance has been removed,
+        * so report success.
+        */
+       cmd = pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev);
+       if (!cmd)
+               goto unlock;
+
+       rp.instance = 1;
+
+       mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, MGMT_STATUS_SUCCESS,
+                         &rp, sizeof(rp));
+       mgmt_pending_remove(cmd);
 
 unlock:
        hci_dev_unlock(hdev);
-       return err;
 }
 
-static const struct mgmt_handler {
-       int (*func) (struct sock *sk, struct hci_dev *hdev, void *data,
-                    u16 data_len);
-       bool var_len;
-       size_t data_len;
-} mgmt_handlers[] = {
-       { NULL }, /* 0x0000 (no command) */
-       { read_version,           false, MGMT_READ_VERSION_SIZE },
-       { read_commands,          false, MGMT_READ_COMMANDS_SIZE },
-       { read_index_list,        false, MGMT_READ_INDEX_LIST_SIZE },
-       { read_controller_info,   false, MGMT_READ_INFO_SIZE },
-       { set_powered,            false, MGMT_SETTING_SIZE },
-       { set_discoverable,       false, MGMT_SET_DISCOVERABLE_SIZE },
-       { set_connectable,        false, MGMT_SETTING_SIZE },
-       { set_fast_connectable,   false, MGMT_SETTING_SIZE },
-       { set_bondable,           false, MGMT_SETTING_SIZE },
-       { set_link_security,      false, MGMT_SETTING_SIZE },
-       { set_ssp,                false, MGMT_SETTING_SIZE },
-       { set_hs,                 false, MGMT_SETTING_SIZE },
-       { set_le,                 false, MGMT_SETTING_SIZE },
-       { set_dev_class,          false, MGMT_SET_DEV_CLASS_SIZE },
-       { set_local_name,         false, MGMT_SET_LOCAL_NAME_SIZE },
-       { add_uuid,               false, MGMT_ADD_UUID_SIZE },
-       { remove_uuid,            false, MGMT_REMOVE_UUID_SIZE },
-       { load_link_keys,         true,  MGMT_LOAD_LINK_KEYS_SIZE },
-       { load_long_term_keys,    true,  MGMT_LOAD_LONG_TERM_KEYS_SIZE },
-       { disconnect,             false, MGMT_DISCONNECT_SIZE },
-       { get_connections,        false, MGMT_GET_CONNECTIONS_SIZE },
-       { pin_code_reply,         false, MGMT_PIN_CODE_REPLY_SIZE },
-       { pin_code_neg_reply,     false, MGMT_PIN_CODE_NEG_REPLY_SIZE },
-       { set_io_capability,      false, MGMT_SET_IO_CAPABILITY_SIZE },
-       { pair_device,            false, MGMT_PAIR_DEVICE_SIZE },
-       { cancel_pair_device,     false, MGMT_CANCEL_PAIR_DEVICE_SIZE },
-       { unpair_device,          false, MGMT_UNPAIR_DEVICE_SIZE },
-       { user_confirm_reply,     false, MGMT_USER_CONFIRM_REPLY_SIZE },
-       { user_confirm_neg_reply, false, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
-       { user_passkey_reply,     false, MGMT_USER_PASSKEY_REPLY_SIZE },
-       { user_passkey_neg_reply, false, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
-       { read_local_oob_data,    false, MGMT_READ_LOCAL_OOB_DATA_SIZE },
-       { add_remote_oob_data,    true,  MGMT_ADD_REMOTE_OOB_DATA_SIZE },
-       { remove_remote_oob_data, false, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
-       { start_discovery,        false, MGMT_START_DISCOVERY_SIZE },
-       { stop_discovery,         false, MGMT_STOP_DISCOVERY_SIZE },
-       { confirm_name,           false, MGMT_CONFIRM_NAME_SIZE },
-       { block_device,           false, MGMT_BLOCK_DEVICE_SIZE },
-       { unblock_device,         false, MGMT_UNBLOCK_DEVICE_SIZE },
-       { set_device_id,          false, MGMT_SET_DEVICE_ID_SIZE },
-       { set_advertising,        false, MGMT_SETTING_SIZE },
-       { set_bredr,              false, MGMT_SETTING_SIZE },
-       { set_static_address,     false, MGMT_SET_STATIC_ADDRESS_SIZE },
-       { set_scan_params,        false, MGMT_SET_SCAN_PARAMS_SIZE },
-       { set_secure_conn,        false, MGMT_SETTING_SIZE },
-       { set_debug_keys,         false, MGMT_SETTING_SIZE },
-       { set_privacy,            false, MGMT_SET_PRIVACY_SIZE },
-       { load_irks,              true,  MGMT_LOAD_IRKS_SIZE },
-       { get_conn_info,          false, MGMT_GET_CONN_INFO_SIZE },
-       { get_clock_info,         false, MGMT_GET_CLOCK_INFO_SIZE },
-       { add_device,             false, MGMT_ADD_DEVICE_SIZE },
-       { remove_device,          false, MGMT_REMOVE_DEVICE_SIZE },
-       { load_conn_param,        true,  MGMT_LOAD_CONN_PARAM_SIZE },
-       { read_unconf_index_list, false, MGMT_READ_UNCONF_INDEX_LIST_SIZE },
-       { read_config_info,       false, MGMT_READ_CONFIG_INFO_SIZE },
-       { set_external_config,    false, MGMT_SET_EXTERNAL_CONFIG_SIZE },
-       { set_public_address,     false, MGMT_SET_PUBLIC_ADDRESS_SIZE },
-       { start_service_discovery,true,  MGMT_START_SERVICE_DISCOVERY_SIZE },
-};
-
-int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
+static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
+                             void *data, u16 data_len)
 {
-       void *buf;
-       u8 *cp;
-       struct mgmt_hdr *hdr;
-       u16 opcode, index, len;
-       struct hci_dev *hdev = NULL;
-       const struct mgmt_handler *handler;
+       struct mgmt_cp_remove_advertising *cp = data;
+       struct mgmt_rp_remove_advertising rp;
        int err;
+       struct mgmt_pending_cmd *cmd;
+       struct hci_request req;
 
-       BT_DBG("got %zu bytes", msglen);
+       BT_DBG("%s", hdev->name);
 
-       if (msglen < sizeof(*hdr))
-               return -EINVAL;
+       /* The current implementation only allows modifying instance no 1. A
+        * value of 0 indicates that all instances should be cleared.
+        */
+       if (cp->instance > 1)
+               return mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
+                                      MGMT_STATUS_INVALID_PARAMS);
 
-       buf = kmalloc(msglen, GFP_KERNEL);
-       if (!buf)
-               return -ENOMEM;
+       hci_dev_lock(hdev);
 
-       if (memcpy_from_msg(buf, msg, msglen)) {
-               err = -EFAULT;
-               goto done;
+       if (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
+           pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
+           pending_find(MGMT_OP_SET_LE, hdev)) {
+               err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
+                                     MGMT_STATUS_BUSY);
+               goto unlock;
        }
 
-       hdr = buf;
-       opcode = __le16_to_cpu(hdr->opcode);
-       index = __le16_to_cpu(hdr->index);
-       len = __le16_to_cpu(hdr->len);
-
-       if (len != msglen - sizeof(*hdr)) {
-               err = -EINVAL;
-               goto done;
+       if (!hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE)) {
+               err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
+                                     MGMT_STATUS_INVALID_PARAMS);
+               goto unlock;
        }
 
-       if (index != MGMT_INDEX_NONE) {
-               hdev = hci_dev_get(index);
-               if (!hdev) {
-                       err = cmd_status(sk, index, opcode,
-                                        MGMT_STATUS_INVALID_INDEX);
-                       goto done;
-               }
-
-               if (test_bit(HCI_SETUP, &hdev->dev_flags) ||
-                   test_bit(HCI_CONFIG, &hdev->dev_flags) ||
-                   test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
-                       err = cmd_status(sk, index, opcode,
-                                        MGMT_STATUS_INVALID_INDEX);
-                       goto done;
-               }
+       if (hdev->adv_instance.timeout)
+               cancel_delayed_work(&hdev->adv_instance.timeout_exp);
 
-               if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
-                   opcode != MGMT_OP_READ_CONFIG_INFO &&
-                   opcode != MGMT_OP_SET_EXTERNAL_CONFIG &&
-                   opcode != MGMT_OP_SET_PUBLIC_ADDRESS) {
-                       err = cmd_status(sk, index, opcode,
-                                        MGMT_STATUS_INVALID_INDEX);
-                       goto done;
-               }
-       }
+       memset(&hdev->adv_instance, 0, sizeof(hdev->adv_instance));
 
-       if (opcode >= ARRAY_SIZE(mgmt_handlers) ||
-           mgmt_handlers[opcode].func == NULL) {
-               BT_DBG("Unknown op %u", opcode);
-               err = cmd_status(sk, index, opcode,
-                                MGMT_STATUS_UNKNOWN_COMMAND);
-               goto done;
-       }
+       advertising_removed(sk, hdev, 1);
 
-       if (hdev && (opcode <= MGMT_OP_READ_INDEX_LIST ||
-                    opcode == MGMT_OP_READ_UNCONF_INDEX_LIST)) {
-               err = cmd_status(sk, index, opcode,
-                                MGMT_STATUS_INVALID_INDEX);
-               goto done;
-       }
+       hci_dev_clear_flag(hdev, HCI_ADVERTISING_INSTANCE);
 
-       if (!hdev && (opcode > MGMT_OP_READ_INDEX_LIST &&
-                     opcode != MGMT_OP_READ_UNCONF_INDEX_LIST)) {
-               err = cmd_status(sk, index, opcode,
-                                MGMT_STATUS_INVALID_INDEX);
-               goto done;
+       /* If the HCI_ADVERTISING flag is set or the device isn't powered then
+        * we have no HCI communication to make. Simply return.
+        */
+       if (!hdev_is_powered(hdev) ||
+           hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
+               rp.instance = 1;
+               err = mgmt_cmd_complete(sk, hdev->id,
+                                       MGMT_OP_REMOVE_ADVERTISING,
+                                       MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
+               goto unlock;
        }
 
-       handler = &mgmt_handlers[opcode];
-
-       if ((handler->var_len && len < handler->data_len) ||
-           (!handler->var_len && len != handler->data_len)) {
-               err = cmd_status(sk, index, opcode,
-                                MGMT_STATUS_INVALID_PARAMS);
-               goto done;
+       cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
+                              data_len);
+       if (!cmd) {
+               err = -ENOMEM;
+               goto unlock;
        }
 
-       if (hdev)
-               mgmt_init_hdev(sk, hdev);
-
-       cp = buf + sizeof(*hdr);
+       hci_req_init(&req, hdev);
+       disable_advertising(&req);
 
-       err = handler->func(sk, hdev, cp, len);
+       err = hci_req_run(&req, remove_advertising_complete);
        if (err < 0)
-               goto done;
-
-       err = msglen;
+               mgmt_pending_remove(cmd);
 
-done:
-       if (hdev)
-               hci_dev_put(hdev);
+unlock:
+       hci_dev_unlock(hdev);
 
-       kfree(buf);
        return err;
 }
 
+static const struct hci_mgmt_handler mgmt_handlers[] = {
+       { NULL }, /* 0x0000 (no command) */
+       { read_version,            MGMT_READ_VERSION_SIZE,
+                                               HCI_MGMT_NO_HDEV |
+                                               HCI_MGMT_UNTRUSTED },
+       { read_commands,           MGMT_READ_COMMANDS_SIZE,
+                                               HCI_MGMT_NO_HDEV |
+                                               HCI_MGMT_UNTRUSTED },
+       { read_index_list,         MGMT_READ_INDEX_LIST_SIZE,
+                                               HCI_MGMT_NO_HDEV |
+                                               HCI_MGMT_UNTRUSTED },
+       { read_controller_info,    MGMT_READ_INFO_SIZE,
+                                               HCI_MGMT_UNTRUSTED },
+       { set_powered,             MGMT_SETTING_SIZE },
+       { set_discoverable,        MGMT_SET_DISCOVERABLE_SIZE },
+       { set_connectable,         MGMT_SETTING_SIZE },
+       { set_fast_connectable,    MGMT_SETTING_SIZE },
+       { set_bondable,            MGMT_SETTING_SIZE },
+       { set_link_security,       MGMT_SETTING_SIZE },
+       { set_ssp,                 MGMT_SETTING_SIZE },
+       { set_hs,                  MGMT_SETTING_SIZE },
+       { set_le,                  MGMT_SETTING_SIZE },
+       { set_dev_class,           MGMT_SET_DEV_CLASS_SIZE },
+       { set_local_name,          MGMT_SET_LOCAL_NAME_SIZE },
+       { add_uuid,                MGMT_ADD_UUID_SIZE },
+       { remove_uuid,             MGMT_REMOVE_UUID_SIZE },
+       { load_link_keys,          MGMT_LOAD_LINK_KEYS_SIZE,
+                                               HCI_MGMT_VAR_LEN },
+       { load_long_term_keys,     MGMT_LOAD_LONG_TERM_KEYS_SIZE,
+                                               HCI_MGMT_VAR_LEN },
+       { disconnect,              MGMT_DISCONNECT_SIZE },
+       { get_connections,         MGMT_GET_CONNECTIONS_SIZE },
+       { pin_code_reply,          MGMT_PIN_CODE_REPLY_SIZE },
+       { pin_code_neg_reply,      MGMT_PIN_CODE_NEG_REPLY_SIZE },
+       { set_io_capability,       MGMT_SET_IO_CAPABILITY_SIZE },
+       { pair_device,             MGMT_PAIR_DEVICE_SIZE },
+       { cancel_pair_device,      MGMT_CANCEL_PAIR_DEVICE_SIZE },
+       { unpair_device,           MGMT_UNPAIR_DEVICE_SIZE },
+       { user_confirm_reply,      MGMT_USER_CONFIRM_REPLY_SIZE },
+       { user_confirm_neg_reply,  MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
+       { user_passkey_reply,      MGMT_USER_PASSKEY_REPLY_SIZE },
+       { user_passkey_neg_reply,  MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
+       { read_local_oob_data,     MGMT_READ_LOCAL_OOB_DATA_SIZE },
+       { add_remote_oob_data,     MGMT_ADD_REMOTE_OOB_DATA_SIZE,
+                                               HCI_MGMT_VAR_LEN },
+       { remove_remote_oob_data,  MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
+       { start_discovery,         MGMT_START_DISCOVERY_SIZE },
+       { stop_discovery,          MGMT_STOP_DISCOVERY_SIZE },
+       { confirm_name,            MGMT_CONFIRM_NAME_SIZE },
+       { block_device,            MGMT_BLOCK_DEVICE_SIZE },
+       { unblock_device,          MGMT_UNBLOCK_DEVICE_SIZE },
+       { set_device_id,           MGMT_SET_DEVICE_ID_SIZE },
+       { set_advertising,         MGMT_SETTING_SIZE },
+       { set_bredr,               MGMT_SETTING_SIZE },
+       { set_static_address,      MGMT_SET_STATIC_ADDRESS_SIZE },
+       { set_scan_params,         MGMT_SET_SCAN_PARAMS_SIZE },
+       { set_secure_conn,         MGMT_SETTING_SIZE },
+       { set_debug_keys,          MGMT_SETTING_SIZE },
+       { set_privacy,             MGMT_SET_PRIVACY_SIZE },
+       { load_irks,               MGMT_LOAD_IRKS_SIZE,
+                                               HCI_MGMT_VAR_LEN },
+       { get_conn_info,           MGMT_GET_CONN_INFO_SIZE },
+       { get_clock_info,          MGMT_GET_CLOCK_INFO_SIZE },
+       { add_device,              MGMT_ADD_DEVICE_SIZE },
+       { remove_device,           MGMT_REMOVE_DEVICE_SIZE },
+       { load_conn_param,         MGMT_LOAD_CONN_PARAM_SIZE,
+                                               HCI_MGMT_VAR_LEN },
+       { read_unconf_index_list,  MGMT_READ_UNCONF_INDEX_LIST_SIZE,
+                                               HCI_MGMT_NO_HDEV |
+                                               HCI_MGMT_UNTRUSTED },
+       { read_config_info,        MGMT_READ_CONFIG_INFO_SIZE,
+                                               HCI_MGMT_UNCONFIGURED |
+                                               HCI_MGMT_UNTRUSTED },
+       { set_external_config,     MGMT_SET_EXTERNAL_CONFIG_SIZE,
+                                               HCI_MGMT_UNCONFIGURED },
+       { set_public_address,      MGMT_SET_PUBLIC_ADDRESS_SIZE,
+                                               HCI_MGMT_UNCONFIGURED },
+       { start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
+                                               HCI_MGMT_VAR_LEN },
+       { read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
+       { read_ext_index_list,     MGMT_READ_EXT_INDEX_LIST_SIZE,
+                                               HCI_MGMT_NO_HDEV |
+                                               HCI_MGMT_UNTRUSTED },
+       { read_adv_features,       MGMT_READ_ADV_FEATURES_SIZE },
+       { add_advertising,         MGMT_ADD_ADVERTISING_SIZE,
+                                               HCI_MGMT_VAR_LEN },
+       { remove_advertising,      MGMT_REMOVE_ADVERTISING_SIZE },
+};
+
 void mgmt_index_added(struct hci_dev *hdev)
 {
-       if (hdev->dev_type != HCI_BREDR)
-               return;
+       struct mgmt_ev_ext_index ev;
 
        if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
                return;
 
-       if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
-               mgmt_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev, NULL, 0, NULL);
-       else
-               mgmt_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0, NULL);
+       switch (hdev->dev_type) {
+       case HCI_BREDR:
+               if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
+                       mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev,
+                                        NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
+                       ev.type = 0x01;
+               } else {
+                       mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
+                                        HCI_MGMT_INDEX_EVENTS);
+                       ev.type = 0x00;
+               }
+               break;
+       case HCI_AMP:
+               ev.type = 0x02;
+               break;
+       default:
+               return;
+       }
+
+       ev.bus = hdev->bus;
+
+       mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
+                        HCI_MGMT_EXT_INDEX_EVENTS);
 }
 
 void mgmt_index_removed(struct hci_dev *hdev)
 {
+       struct mgmt_ev_ext_index ev;
        u8 status = MGMT_STATUS_INVALID_INDEX;
 
-       if (hdev->dev_type != HCI_BREDR)
+       if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
                return;
 
-       if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
+       switch (hdev->dev_type) {
+       case HCI_BREDR:
+               mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
+
+               if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
+                       mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev,
+                                        NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
+                       ev.type = 0x01;
+               } else {
+                       mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
+                                        HCI_MGMT_INDEX_EVENTS);
+                       ev.type = 0x00;
+               }
+               break;
+       case HCI_AMP:
+               ev.type = 0x02;
+               break;
+       default:
                return;
+       }
 
-       mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
+       ev.bus = hdev->bus;
 
-       if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
-               mgmt_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev, NULL, 0, NULL);
-       else
-               mgmt_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0, NULL);
+       mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
+                        HCI_MGMT_EXT_INDEX_EVENTS);
 }
 
 /* This function requires the caller holds hdev->lock */
@@ -6367,7 +7215,7 @@ static int powered_update_hci(struct hci_dev *hdev)
 
        hci_req_init(&req, hdev);
 
-       if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) &&
+       if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
            !lmp_host_ssp_capable(hdev)) {
                u8 mode = 0x01;
 
@@ -6381,7 +7229,7 @@ static int powered_update_hci(struct hci_dev *hdev)
                }
        }
 
-       if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
+       if (hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
            lmp_bredr_capable(hdev)) {
                struct hci_cp_write_le_host_supported cp;
 
@@ -6402,24 +7250,28 @@ static int powered_update_hci(struct hci_dev *hdev)
                 * advertising data. This also applies to the case
                 * where BR/EDR was toggled during the AUTO_OFF phase.
                 */
-               if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
+               if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
                        update_adv_data(&req);
                        update_scan_rsp_data(&req);
                }
 
-               if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
+               if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
+                   hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE))
                        enable_advertising(&req);
 
                restart_le_actions(&req);
        }
 
-       link_sec = test_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
+       link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY);
        if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
                hci_req_add(&req, HCI_OP_WRITE_AUTH_ENABLE,
                            sizeof(link_sec), &link_sec);
 
        if (lmp_bredr_capable(hdev)) {
-               write_fast_connectable(&req, false);
+               if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
+                       write_fast_connectable(&req, true);
+               else
+                       write_fast_connectable(&req, false);
                __hci_update_page_scan(&req);
                update_class(&req);
                update_name(&req);
@@ -6435,7 +7287,7 @@ int mgmt_powered(struct hci_dev *hdev, u8 powered)
        u8 status, zero_cod[] = { 0, 0, 0 };
        int err;
 
-       if (!test_bit(HCI_MGMT, &hdev->dev_flags))
+       if (!hci_dev_test_flag(hdev, HCI_MGMT))
                return 0;
 
        if (powered) {
@@ -6456,7 +7308,7 @@ int mgmt_powered(struct hci_dev *hdev, u8 powered)
         * been triggered, potentially causing misleading DISCONNECTED
         * status responses.
         */
-       if (test_bit(HCI_UNREGISTER, &hdev->dev_flags))
+       if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
                status = MGMT_STATUS_INVALID_INDEX;
        else
                status = MGMT_STATUS_NOT_POWERED;
@@ -6464,8 +7316,8 @@ int mgmt_powered(struct hci_dev *hdev, u8 powered)
        mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
 
        if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0)
-               mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
-                          zero_cod, sizeof(zero_cod), NULL);
+               mgmt_generic_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
+                                  zero_cod, sizeof(zero_cod), NULL);
 
 new_settings:
        err = new_settings(hdev, match.sk);
@@ -6478,10 +7330,10 @@ new_settings:
 
 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
 {
-       struct pending_cmd *cmd;
+       struct mgmt_pending_cmd *cmd;
        u8 status;
 
-       cmd = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
+       cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
        if (!cmd)
                return;
 
@@ -6490,7 +7342,7 @@ void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
        else
                status = MGMT_STATUS_FAILED;
 
-       cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
+       mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
 
        mgmt_pending_remove(cmd);
 }
@@ -6506,17 +7358,23 @@ void mgmt_discoverable_timeout(struct hci_dev *hdev)
         * of a timeout triggered from general discoverable, it is
         * safe to unconditionally clear the flag.
         */
-       clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
-       clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
+       hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
+       hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
 
        hci_req_init(&req, hdev);
-       if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
+       if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
                u8 scan = SCAN_PAGE;
                hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE,
                            sizeof(scan), &scan);
        }
        update_class(&req);
-       update_adv_data(&req);
+
+       /* Advertising instances don't use the global discoverable setting, so
+        * only update AD if advertising was enabled using Set Advertising.
+        */
+       if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
+               update_adv_data(&req);
+
        hci_req_run(&req, NULL);
 
        hdev->discov_timeout = 0;
@@ -6654,7 +7512,7 @@ void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
 
        bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
        ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
-       ev.key.master = csrk->master;
+       ev.key.type = csrk->type;
        memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
 
        mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
@@ -6681,17 +7539,6 @@ void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
        mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
 }
 
-static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, u8 *data,
-                                 u8 data_len)
-{
-       eir[eir_len++] = sizeof(type) + data_len;
-       eir[eir_len++] = type;
-       memcpy(&eir[eir_len], data, data_len);
-       eir_len += data_len;
-
-       return eir_len;
-}
-
 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
                           u32 flags, u8 *name, u8 name_len)
 {
@@ -6729,7 +7576,7 @@ void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
                    sizeof(*ev) + eir_len, NULL);
 }
 
-static void disconnect_rsp(struct pending_cmd *cmd, void *data)
+static void disconnect_rsp(struct mgmt_pending_cmd *cmd, void *data)
 {
        struct sock **sk = data;
 
@@ -6741,7 +7588,7 @@ static void disconnect_rsp(struct pending_cmd *cmd, void *data)
        mgmt_pending_remove(cmd);
 }
 
-static void unpair_device_rsp(struct pending_cmd *cmd, void *data)
+static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
 {
        struct hci_dev *hdev = data;
        struct mgmt_cp_unpair_device *cp = cmd->param;
@@ -6754,10 +7601,10 @@ static void unpair_device_rsp(struct pending_cmd *cmd, void *data)
 
 bool mgmt_powering_down(struct hci_dev *hdev)
 {
-       struct pending_cmd *cmd;
+       struct mgmt_pending_cmd *cmd;
        struct mgmt_mode *cp;
 
-       cmd = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
+       cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
        if (!cmd)
                return false;
 
@@ -6809,12 +7656,12 @@ void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
 {
        u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
        struct mgmt_cp_disconnect *cp;
-       struct pending_cmd *cmd;
+       struct mgmt_pending_cmd *cmd;
 
        mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
                             hdev);
 
-       cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, hdev);
+       cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
        if (!cmd)
                return;
 
@@ -6864,9 +7711,9 @@ void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
                                  u8 status)
 {
-       struct pending_cmd *cmd;
+       struct mgmt_pending_cmd *cmd;
 
-       cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
+       cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
        if (!cmd)
                return;
 
@@ -6877,9 +7724,9 @@ void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
                                      u8 status)
 {
-       struct pending_cmd *cmd;
+       struct mgmt_pending_cmd *cmd;
 
-       cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
+       cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
        if (!cmd)
                return;
 
@@ -6922,9 +7769,9 @@ static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
                                      u8 link_type, u8 addr_type, u8 status,
                                      u8 opcode)
 {
-       struct pending_cmd *cmd;
+       struct mgmt_pending_cmd *cmd;
 
-       cmd = mgmt_pending_find(opcode, hdev);
+       cmd = pending_find(opcode, hdev);
        if (!cmd)
                return -ENOENT;
 
@@ -6983,7 +7830,7 @@ int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
 {
        struct mgmt_ev_auth_failed ev;
-       struct pending_cmd *cmd;
+       struct mgmt_pending_cmd *cmd;
        u8 status = mgmt_status(hci_status);
 
        bacpy(&ev.addr.bdaddr, &conn->dst);
@@ -7014,11 +7861,9 @@ void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
        }
 
        if (test_bit(HCI_AUTH, &hdev->flags))
-               changed = !test_and_set_bit(HCI_LINK_SECURITY,
-                                           &hdev->dev_flags);
+               changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
        else
-               changed = test_and_clear_bit(HCI_LINK_SECURITY,
-                                            &hdev->dev_flags);
+               changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
 
        mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
                             &match);
@@ -7054,9 +7899,9 @@ void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
        if (status) {
                u8 mgmt_err = mgmt_status(status);
 
-               if (enable && test_and_clear_bit(HCI_SSP_ENABLED,
-                                                &hdev->dev_flags)) {
-                       clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
+               if (enable && hci_dev_test_and_clear_flag(hdev,
+                                                         HCI_SSP_ENABLED)) {
+                       hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
                        new_settings(hdev, NULL);
                }
 
@@ -7066,14 +7911,14 @@ void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
        }
 
        if (enable) {
-               changed = !test_and_set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
+               changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
        } else {
-               changed = test_and_clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
+               changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
                if (!changed)
-                       changed = test_and_clear_bit(HCI_HS_ENABLED,
-                                                    &hdev->dev_flags);
+                       changed = hci_dev_test_and_clear_flag(hdev,
+                                                             HCI_HS_ENABLED);
                else
-                       clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
+                       hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
        }
 
        mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
@@ -7086,8 +7931,8 @@ void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
 
        hci_req_init(&req, hdev);
 
-       if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
-               if (test_bit(HCI_USE_DEBUG_KEYS, &hdev->dev_flags))
+       if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
+               if (hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
                        hci_req_add(&req, HCI_OP_WRITE_SSP_DEBUG_MODE,
                                    sizeof(enable), &enable);
                update_eir(&req);
@@ -7098,7 +7943,7 @@ void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
        hci_req_run(&req, NULL);
 }
 
-static void sk_lookup(struct pending_cmd *cmd, void *data)
+static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
 {
        struct cmd_lookup *match = data;
 
@@ -7118,8 +7963,8 @@ void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
        mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
 
        if (!status)
-               mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class, 3,
-                          NULL);
+               mgmt_generic_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
+                                  dev_class, 3, NULL);
 
        if (match.sk)
                sock_put(match.sk);
@@ -7128,7 +7973,7 @@ void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
 {
        struct mgmt_cp_set_local_name ev;
-       struct pending_cmd *cmd;
+       struct mgmt_pending_cmd *cmd;
 
        if (status)
                return;
@@ -7137,55 +7982,19 @@ void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
        memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
        memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
 
-       cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
+       cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
        if (!cmd) {
                memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
 
                /* If this is a HCI command related to powering on the
                 * HCI dev don't send any mgmt signals.
                 */
-               if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
+               if (pending_find(MGMT_OP_SET_POWERED, hdev))
                        return;
        }
 
-       mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
-                  cmd ? cmd->sk : NULL);
-}
-
-void mgmt_read_local_oob_data_complete(struct hci_dev *hdev, u8 *hash192,
-                                      u8 *rand192, u8 *hash256, u8 *rand256,
-                                      u8 status)
-{
-       struct pending_cmd *cmd;
-
-       BT_DBG("%s status %u", hdev->name, status);
-
-       cmd = mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
-       if (!cmd)
-               return;
-
-       if (status) {
-               cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
-                          mgmt_status(status));
-       } else {
-               struct mgmt_rp_read_local_oob_data rp;
-               size_t rp_size = sizeof(rp);
-
-               memcpy(rp.hash192, hash192, sizeof(rp.hash192));
-               memcpy(rp.rand192, rand192, sizeof(rp.rand192));
-
-               if (bredr_sc_enabled(hdev) && hash256 && rand256) {
-                       memcpy(rp.hash256, hash256, sizeof(rp.hash256));
-                       memcpy(rp.rand256, rand256, sizeof(rp.rand256));
-               } else {
-                       rp_size -= sizeof(rp.hash256) + sizeof(rp.rand256);
-               }
-
-               cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA, 0,
-                            &rp, rp_size);
-       }
-
-       mgmt_pending_remove(cmd);
+       mgmt_generic_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
+                          cmd ? cmd->sk : NULL);
 }
 
 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
@@ -7258,7 +8067,7 @@ static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
 static void restart_le_scan(struct hci_dev *hdev)
 {
        /* If controller is not scanning we are done. */
-       if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
+       if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
                return;
 
        if (time_after(jiffies + DISCOV_LE_RESTART_DELAY,
@@ -7270,14 +8079,58 @@ static void restart_le_scan(struct hci_dev *hdev)
                           DISCOV_LE_RESTART_DELAY);
 }
 
+static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
+                           u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
+{
+       /* If a RSSI threshold has been specified, and
+        * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
+        * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
+        * is set, let it through for further processing, as we might need to
+        * restart the scan.
+        *
+        * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
+        * the results are also dropped.
+        */
+       if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
+           (rssi == HCI_RSSI_INVALID ||
+           (rssi < hdev->discovery.rssi &&
+            !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
+               return  false;
+
+       if (hdev->discovery.uuid_count != 0) {
+               /* If a list of UUIDs is provided in filter, results with no
+                * matching UUID should be dropped.
+                */
+               if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
+                                  hdev->discovery.uuids) &&
+                   !eir_has_uuids(scan_rsp, scan_rsp_len,
+                                  hdev->discovery.uuid_count,
+                                  hdev->discovery.uuids))
+                       return false;
+       }
+
+       /* If duplicate filtering does not report RSSI changes, then restart
+        * scanning to ensure updated result with updated RSSI values.
+        */
+       if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
+               restart_le_scan(hdev);
+
+               /* Validate RSSI value against the RSSI threshold once more. */
+               if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
+                   rssi < hdev->discovery.rssi)
+                       return false;
+       }
+
+       return true;
+}
+
 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
                       u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
                       u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
 {
        char buf[512];
-       struct mgmt_ev_device_found *ev = (void *) buf;
+       struct mgmt_ev_device_found *ev = (void *)buf;
        size_t ev_size;
-       bool match;
 
        /* Don't send events for a non-kernel initiated discovery. With
         * LE one exception is if we have pend_le_reports > 0 in which
@@ -7290,21 +8143,12 @@ void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
                        return;
        }
 
-       /* When using service discovery with a RSSI threshold, then check
-        * if such a RSSI threshold is specified. If a RSSI threshold has
-        * been specified, and HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set,
-        * then all results with a RSSI smaller than the RSSI threshold will be
-        * dropped. If the quirk is set, let it through for further processing,
-        * as we might need to restart the scan.
-        *
-        * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
-        * the results are also dropped.
-        */
-       if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
-           (rssi == HCI_RSSI_INVALID ||
-           (rssi < hdev->discovery.rssi &&
-            !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
-               return;
+       if (hdev->discovery.result_filtering) {
+               /* We are using service discovery */
+               if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
+                                    scan_rsp_len))
+                       return;
+       }
 
        /* Make sure that the buffer is big enough. The 5 extra bytes
         * are for the potential CoD field.
@@ -7331,87 +8175,17 @@ void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
        ev->rssi = rssi;
        ev->flags = cpu_to_le32(flags);
 
-       if (eir_len > 0) {
-               /* When using service discovery and a list of UUID is
-                * provided, results with no matching UUID should be
-                * dropped. In case there is a match the result is
-                * kept and checking possible scan response data
-                * will be skipped.
-                */
-               if (hdev->discovery.uuid_count > 0) {
-                       match = eir_has_uuids(eir, eir_len,
-                                             hdev->discovery.uuid_count,
-                                             hdev->discovery.uuids);
-                       /* If duplicate filtering does not report RSSI changes,
-                        * then restart scanning to ensure updated result with
-                        * updated RSSI values.
-                        */
-                       if (match && test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER,
-                                             &hdev->quirks))
-                               restart_le_scan(hdev);
-               } else {
-                       match = true;
-               }
-
-               if (!match && !scan_rsp_len)
-                       return;
-
+       if (eir_len > 0)
                /* Copy EIR or advertising data into event */
                memcpy(ev->eir, eir, eir_len);
-       } else {
-               /* When using service discovery and a list of UUID is
-                * provided, results with empty EIR or advertising data
-                * should be dropped since they do not match any UUID.
-                */
-               if (hdev->discovery.uuid_count > 0 && !scan_rsp_len)
-                       return;
-
-               match = false;
-       }
 
        if (dev_class && !eir_has_data_type(ev->eir, eir_len, EIR_CLASS_OF_DEV))
                eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
                                          dev_class, 3);
 
-       if (scan_rsp_len > 0) {
-               /* When using service discovery and a list of UUID is
-                * provided, results with no matching UUID should be
-                * dropped if there is no previous match from the
-                * advertising data.
-                */
-               if (hdev->discovery.uuid_count > 0) {
-                       if (!match && !eir_has_uuids(scan_rsp, scan_rsp_len,
-                                                    hdev->discovery.uuid_count,
-                                                    hdev->discovery.uuids))
-                               return;
-
-                       /* If duplicate filtering does not report RSSI changes,
-                        * then restart scanning to ensure updated result with
-                        * updated RSSI values.
-                        */
-                       if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER,
-                                    &hdev->quirks))
-                               restart_le_scan(hdev);
-               }
-
+       if (scan_rsp_len > 0)
                /* Append scan response data to event */
                memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);
-       } else {
-               /* When using service discovery and a list of UUID is
-                * provided, results with empty scan response and no
-                * previous matched advertising data should be dropped.
-                */
-               if (hdev->discovery.uuid_count > 0 && !match)
-                       return;
-       }
-
-       /* Validate the reported RSSI value against the RSSI threshold once more
-        * incase HCI_QUIRK_STRICT_DUPLICATE_FILTER forced a restart of LE
-        * scanning.
-        */
-       if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
-           rssi < hdev->discovery.rssi)
-               return;
 
        ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
        ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
@@ -7464,10 +8238,28 @@ void mgmt_reenable_advertising(struct hci_dev *hdev)
 {
        struct hci_request req;
 
-       if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags))
+       if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
+           !hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE))
                return;
 
        hci_req_init(&req, hdev);
        enable_advertising(&req);
        hci_req_run(&req, adv_enable_complete);
 }
+
+static struct hci_mgmt_chan chan = {
+       .channel        = HCI_CHANNEL_CONTROL,
+       .handler_count  = ARRAY_SIZE(mgmt_handlers),
+       .handlers       = mgmt_handlers,
+       .hdev_init      = mgmt_init_hdev,
+};
+
+int mgmt_init(void)
+{
+       return hci_mgmt_chan_register(&chan);
+}
+
+void mgmt_exit(void)
+{
+       hci_mgmt_chan_unregister(&chan);
+}
diff --git a/net/bluetooth/mgmt_util.c b/net/bluetooth/mgmt_util.c
new file mode 100644 (file)
index 0000000..8c30c7e
--- /dev/null
@@ -0,0 +1,210 @@
+/*
+   BlueZ - Bluetooth protocol stack for Linux
+
+   Copyright (C) 2015  Intel Corporation
+
+   This program is free software; you can redistribute it and/or modify
+   it under the terms of the GNU General Public License version 2 as
+   published by the Free Software Foundation;
+
+   THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+   OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+   FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
+   IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
+   CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
+   WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+   ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+   OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+   ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
+   COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
+   SOFTWARE IS DISCLAIMED.
+*/
+
+#include <net/bluetooth/bluetooth.h>
+#include <net/bluetooth/hci_core.h>
+#include <net/bluetooth/mgmt.h>
+
+#include "mgmt_util.h"
+
+int mgmt_send_event(u16 event, struct hci_dev *hdev, unsigned short channel,
+                   void *data, u16 data_len, int flag, struct sock *skip_sk)
+{
+       struct sk_buff *skb;
+       struct mgmt_hdr *hdr;
+
+       skb = alloc_skb(sizeof(*hdr) + data_len, GFP_KERNEL);
+       if (!skb)
+               return -ENOMEM;
+
+       hdr = (void *) skb_put(skb, sizeof(*hdr));
+       hdr->opcode = cpu_to_le16(event);
+       if (hdev)
+               hdr->index = cpu_to_le16(hdev->id);
+       else
+               hdr->index = cpu_to_le16(MGMT_INDEX_NONE);
+       hdr->len = cpu_to_le16(data_len);
+
+       if (data)
+               memcpy(skb_put(skb, data_len), data, data_len);
+
+       /* Time stamp */
+       __net_timestamp(skb);
+
+       hci_send_to_channel(channel, skb, flag, skip_sk);
+       kfree_skb(skb);
+
+       return 0;
+}
+
+int mgmt_cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
+{
+       struct sk_buff *skb;
+       struct mgmt_hdr *hdr;
+       struct mgmt_ev_cmd_status *ev;
+       int err;
+
+       BT_DBG("sock %p, index %u, cmd %u, status %u", sk, index, cmd, status);
+
+       skb = alloc_skb(sizeof(*hdr) + sizeof(*ev), GFP_KERNEL);
+       if (!skb)
+               return -ENOMEM;
+
+       hdr = (void *) skb_put(skb, sizeof(*hdr));
+
+       hdr->opcode = cpu_to_le16(MGMT_EV_CMD_STATUS);
+       hdr->index = cpu_to_le16(index);
+       hdr->len = cpu_to_le16(sizeof(*ev));
+
+       ev = (void *) skb_put(skb, sizeof(*ev));
+       ev->status = status;
+       ev->opcode = cpu_to_le16(cmd);
+
+       err = sock_queue_rcv_skb(sk, skb);
+       if (err < 0)
+               kfree_skb(skb);
+
+       return err;
+}
+
+int mgmt_cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status,
+                     void *rp, size_t rp_len)
+{
+       struct sk_buff *skb;
+       struct mgmt_hdr *hdr;
+       struct mgmt_ev_cmd_complete *ev;
+       int err;
+
+       BT_DBG("sock %p", sk);
+
+       skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + rp_len, GFP_KERNEL);
+       if (!skb)
+               return -ENOMEM;
+
+       hdr = (void *) skb_put(skb, sizeof(*hdr));
+
+       hdr->opcode = cpu_to_le16(MGMT_EV_CMD_COMPLETE);
+       hdr->index = cpu_to_le16(index);
+       hdr->len = cpu_to_le16(sizeof(*ev) + rp_len);
+
+       ev = (void *) skb_put(skb, sizeof(*ev) + rp_len);
+       ev->opcode = cpu_to_le16(cmd);
+       ev->status = status;
+
+       if (rp)
+               memcpy(ev->data, rp, rp_len);
+
+       err = sock_queue_rcv_skb(sk, skb);
+       if (err < 0)
+               kfree_skb(skb);
+
+       return err;
+}
+
+struct mgmt_pending_cmd *mgmt_pending_find(unsigned short channel, u16 opcode,
+                                          struct hci_dev *hdev)
+{
+       struct mgmt_pending_cmd *cmd;
+
+       list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
+               if (hci_sock_get_channel(cmd->sk) != channel)
+                       continue;
+               if (cmd->opcode == opcode)
+                       return cmd;
+       }
+
+       return NULL;
+}
+
+struct mgmt_pending_cmd *mgmt_pending_find_data(unsigned short channel,
+                                               u16 opcode,
+                                               struct hci_dev *hdev,
+                                               const void *data)
+{
+       struct mgmt_pending_cmd *cmd;
+
+       list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
+               if (cmd->user_data != data)
+                       continue;
+               if (cmd->opcode == opcode)
+                       return cmd;
+       }
+
+       return NULL;
+}
+
+void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev,
+                         void (*cb)(struct mgmt_pending_cmd *cmd, void *data),
+                         void *data)
+{
+       struct mgmt_pending_cmd *cmd, *tmp;
+
+       list_for_each_entry_safe(cmd, tmp, &hdev->mgmt_pending, list) {
+               if (opcode > 0 && cmd->opcode != opcode)
+                       continue;
+
+               cb(cmd, data);
+       }
+}
+
+struct mgmt_pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
+                                         struct hci_dev *hdev,
+                                         void *data, u16 len)
+{
+       struct mgmt_pending_cmd *cmd;
+
+       cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+       if (!cmd)
+               return NULL;
+
+       cmd->opcode = opcode;
+       cmd->index = hdev->id;
+
+       cmd->param = kmemdup(data, len, GFP_KERNEL);
+       if (!cmd->param) {
+               kfree(cmd);
+               return NULL;
+       }
+
+       cmd->param_len = len;
+
+       cmd->sk = sk;
+       sock_hold(sk);
+
+       list_add(&cmd->list, &hdev->mgmt_pending);
+
+       return cmd;
+}
+
+void mgmt_pending_free(struct mgmt_pending_cmd *cmd)
+{
+       sock_put(cmd->sk);
+       kfree(cmd->param);
+       kfree(cmd);
+}
+
+void mgmt_pending_remove(struct mgmt_pending_cmd *cmd)
+{
+       list_del(&cmd->list);
+       mgmt_pending_free(cmd);
+}
diff --git a/net/bluetooth/mgmt_util.h b/net/bluetooth/mgmt_util.h
new file mode 100644 (file)
index 0000000..6559f18
--- /dev/null
@@ -0,0 +1,53 @@
+/*
+   BlueZ - Bluetooth protocol stack for Linux
+   Copyright (C) 2015  Intel Coropration
+
+   This program is free software; you can redistribute it and/or modify
+   it under the terms of the GNU General Public License version 2 as
+   published by the Free Software Foundation;
+
+   THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+   OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+   FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
+   IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
+   CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
+   WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+   ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+   OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+   ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
+   COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
+   SOFTWARE IS DISCLAIMED.
+*/
+
+struct mgmt_pending_cmd {
+       struct list_head list;
+       u16 opcode;
+       int index;
+       void *param;
+       size_t param_len;
+       struct sock *sk;
+       void *user_data;
+       int (*cmd_complete)(struct mgmt_pending_cmd *cmd, u8 status);
+};
+
+int mgmt_send_event(u16 event, struct hci_dev *hdev, unsigned short channel,
+                   void *data, u16 data_len, int flag, struct sock *skip_sk);
+int mgmt_cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status);
+int mgmt_cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status,
+                     void *rp, size_t rp_len);
+
+struct mgmt_pending_cmd *mgmt_pending_find(unsigned short channel, u16 opcode,
+                                          struct hci_dev *hdev);
+struct mgmt_pending_cmd *mgmt_pending_find_data(unsigned short channel,
+                                               u16 opcode,
+                                               struct hci_dev *hdev,
+                                               const void *data);
+void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev,
+                         void (*cb)(struct mgmt_pending_cmd *cmd, void *data),
+                         void *data);
+struct mgmt_pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
+                                         struct hci_dev *hdev,
+                                         void *data, u16 len);
+void mgmt_pending_free(struct mgmt_pending_cmd *cmd);
+void mgmt_pending_remove(struct mgmt_pending_cmd *cmd);
index 3c6d2c8ac1a47bc7f5a96b576ca93e9542ee4fe3..825e8fb5114b16a5276bf7c6ee251883c6f0bfd2 100644 (file)
@@ -549,8 +549,8 @@ static int rfcomm_sock_getname(struct socket *sock, struct sockaddr *addr, int *
        return 0;
 }
 
-static int rfcomm_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
-                              struct msghdr *msg, size_t len)
+static int rfcomm_sock_sendmsg(struct socket *sock, struct msghdr *msg,
+                              size_t len)
 {
        struct sock *sk = sock->sk;
        struct rfcomm_dlc *d = rfcomm_pi(sk)->dlc;
@@ -615,8 +615,8 @@ done:
        return sent;
 }
 
-static int rfcomm_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
-                              struct msghdr *msg, size_t size, int flags)
+static int rfcomm_sock_recvmsg(struct socket *sock, struct msghdr *msg,
+                              size_t size, int flags)
 {
        struct sock *sk = sock->sk;
        struct rfcomm_dlc *d = rfcomm_pi(sk)->dlc;
@@ -627,7 +627,7 @@ static int rfcomm_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
                return 0;
        }
 
-       len = bt_sock_stream_recvmsg(iocb, sock, msg, size, flags);
+       len = bt_sock_stream_recvmsg(sock, msg, size, flags);
 
        lock_sock(sk);
        if (!(flags & MSG_PEEK) && len > 0)
index 76321b546e8426146dba242c281c2915464e7d6f..4322c833e74891d20a627f21e64cdedd67cd806f 100644 (file)
@@ -688,8 +688,8 @@ static int sco_sock_getname(struct socket *sock, struct sockaddr *addr, int *len
        return 0;
 }
 
-static int sco_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
-                           struct msghdr *msg, size_t len)
+static int sco_sock_sendmsg(struct socket *sock, struct msghdr *msg,
+                           size_t len)
 {
        struct sock *sk = sock->sk;
        int err;
@@ -758,8 +758,8 @@ static void sco_conn_defer_accept(struct hci_conn *conn, u16 setting)
        }
 }
 
-static int sco_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
-                           struct msghdr *msg, size_t len, int flags)
+static int sco_sock_recvmsg(struct socket *sock, struct msghdr *msg,
+                           size_t len, int flags)
 {
        struct sock *sk = sock->sk;
        struct sco_pinfo *pi = sco_pi(sk);
@@ -777,7 +777,7 @@ static int sco_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
 
        release_sock(sk);
 
-       return bt_sock_recvmsg(iocb, sock, msg, len, flags);
+       return bt_sock_recvmsg(sock, msg, len, flags);
 }
 
 static int sco_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
@@ -1083,9 +1083,13 @@ int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 *flags)
        return lm;
 }
 
-void sco_connect_cfm(struct hci_conn *hcon, __u8 status)
+static void sco_connect_cfm(struct hci_conn *hcon, __u8 status)
 {
+       if (hcon->type != SCO_LINK && hcon->type != ESCO_LINK)
+               return;
+
        BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
+
        if (!status) {
                struct sco_conn *conn;
 
@@ -1096,8 +1100,11 @@ void sco_connect_cfm(struct hci_conn *hcon, __u8 status)
                sco_conn_del(hcon, bt_to_errno(status));
 }
 
-void sco_disconn_cfm(struct hci_conn *hcon, __u8 reason)
+static void sco_disconn_cfm(struct hci_conn *hcon, __u8 reason)
 {
+       if (hcon->type != SCO_LINK && hcon->type != ESCO_LINK)
+               return;
+
        BT_DBG("hcon %p reason %d", hcon, reason);
 
        sco_conn_del(hcon, bt_to_errno(reason));
@@ -1122,6 +1129,12 @@ drop:
        return 0;
 }
 
+static struct hci_cb sco_cb = {
+       .name           = "SCO",
+       .connect_cfm    = sco_connect_cfm,
+       .disconn_cfm    = sco_disconn_cfm,
+};
+
 static int sco_debugfs_show(struct seq_file *f, void *p)
 {
        struct sock *sk;
@@ -1203,6 +1216,8 @@ int __init sco_init(void)
 
        BT_INFO("SCO socket layer initialized");
 
+       hci_register_cb(&sco_cb);
+
        if (IS_ERR_OR_NULL(bt_debugfs))
                return 0;
 
@@ -1216,12 +1231,14 @@ error:
        return err;
 }
 
-void __exit sco_exit(void)
+void sco_exit(void)
 {
        bt_procfs_cleanup(&init_net, "sco");
 
        debugfs_remove(sco_debugfs);
 
+       hci_unregister_cb(&sco_cb);
+
        bt_sock_unregister(BTPROTO_SCO);
 
        proto_unregister(&sco_proto);
index 378f4064952cfd0fe954511e76d307a486118ca4..dc688f13e49612cd74decf8853b8c270803942f3 100644 (file)
@@ -21,6 +21,8 @@
    SOFTWARE IS DISCLAIMED.
 */
 
+#include <linux/debugfs.h>
+
 #include <net/bluetooth/bluetooth.h>
 #include <net/bluetooth/hci_core.h>
 
@@ -154,6 +156,21 @@ static int __init test_ecdh_sample(const u8 priv_a[32], const u8 priv_b[32],
        return 0;
 }
 
+static char test_ecdh_buffer[32];
+
+static ssize_t test_ecdh_read(struct file *file, char __user *user_buf,
+                             size_t count, loff_t *ppos)
+{
+       return simple_read_from_buffer(user_buf, count, ppos, test_ecdh_buffer,
+                                      strlen(test_ecdh_buffer));
+}
+
+static const struct file_operations test_ecdh_fops = {
+       .open           = simple_open,
+       .read           = test_ecdh_read,
+       .llseek         = default_llseek,
+};
+
 static int __init test_ecdh(void)
 {
        ktime_t calltime, delta, rettime;
@@ -165,19 +182,19 @@ static int __init test_ecdh(void)
        err = test_ecdh_sample(priv_a_1, priv_b_1, pub_a_1, pub_b_1, dhkey_1);
        if (err) {
                BT_ERR("ECDH sample 1 failed");
-               return err;
+               goto done;
        }
 
        err = test_ecdh_sample(priv_a_2, priv_b_2, pub_a_2, pub_b_2, dhkey_2);
        if (err) {
                BT_ERR("ECDH sample 2 failed");
-               return err;
+               goto done;
        }
 
        err = test_ecdh_sample(priv_a_3, priv_a_3, pub_a_3, pub_a_3, dhkey_3);
        if (err) {
                BT_ERR("ECDH sample 3 failed");
-               return err;
+               goto done;
        }
 
        rettime = ktime_get();
@@ -186,7 +203,17 @@ static int __init test_ecdh(void)
 
        BT_INFO("ECDH test passed in %llu usecs", duration);
 
-       return 0;
+done:
+       if (!err)
+               snprintf(test_ecdh_buffer, sizeof(test_ecdh_buffer),
+                        "PASS (%llu usecs)\n", duration);
+       else
+               snprintf(test_ecdh_buffer, sizeof(test_ecdh_buffer), "FAIL\n");
+
+       debugfs_create_file("selftest_ecdh", 0444, bt_debugfs, NULL,
+                           &test_ecdh_fops);
+
+       return err;
 }
 
 #else
index c09a821f381d0b648b45ca5c722d89161775f638..1ab3dc9c8f99bf425a2a24403cfe6e54ddbbd550 100644 (file)
@@ -52,7 +52,7 @@
 
 #define SMP_TIMEOUT    msecs_to_jiffies(30000)
 
-#define AUTH_REQ_MASK(dev)     (test_bit(HCI_SC_ENABLED, &(dev)->dev_flags) ? \
+#define AUTH_REQ_MASK(dev)     (hci_dev_test_flag(dev, HCI_SC_ENABLED) ? \
                                 0x1f : 0x07)
 #define KEY_DIST_MASK          0x07
 
@@ -70,7 +70,19 @@ enum {
        SMP_FLAG_DEBUG_KEY,
        SMP_FLAG_WAIT_USER,
        SMP_FLAG_DHKEY_PENDING,
-       SMP_FLAG_OOB,
+       SMP_FLAG_REMOTE_OOB,
+       SMP_FLAG_LOCAL_OOB,
+};
+
+struct smp_dev {
+       /* Secure Connections OOB data */
+       u8                      local_pk[64];
+       u8                      local_sk[32];
+       u8                      local_rand[16];
+       bool                    debug_key;
+
+       struct crypto_blkcipher *tfm_aes;
+       struct crypto_hash      *tfm_cmac;
 };
 
 struct smp_chan {
@@ -84,7 +96,8 @@ struct smp_chan {
        u8              rrnd[16]; /* SMP Pairing Random (remote) */
        u8              pcnf[16]; /* SMP Pairing Confirm */
        u8              tk[16]; /* SMP Temporary Key */
-       u8              rr[16];
+       u8              rr[16]; /* Remote OOB ra/rb value */
+       u8              lr[16]; /* Local OOB ra/rb value */
        u8              enc_key_size;
        u8              remote_key_dist;
        bdaddr_t        id_addr;
@@ -478,18 +491,18 @@ bool smp_irk_matches(struct hci_dev *hdev, const u8 irk[16],
                     const bdaddr_t *bdaddr)
 {
        struct l2cap_chan *chan = hdev->smp_data;
-       struct crypto_blkcipher *tfm;
+       struct smp_dev *smp;
        u8 hash[3];
        int err;
 
        if (!chan || !chan->data)
                return false;
 
-       tfm = chan->data;
+       smp = chan->data;
 
        BT_DBG("RPA %pMR IRK %*phN", bdaddr, 16, irk);
 
-       err = smp_ah(tfm, irk, &bdaddr->b[3], hash);
+       err = smp_ah(smp->tfm_aes, irk, &bdaddr->b[3], hash);
        if (err)
                return false;
 
@@ -499,20 +512,20 @@ bool smp_irk_matches(struct hci_dev *hdev, const u8 irk[16],
 int smp_generate_rpa(struct hci_dev *hdev, const u8 irk[16], bdaddr_t *rpa)
 {
        struct l2cap_chan *chan = hdev->smp_data;
-       struct crypto_blkcipher *tfm;
+       struct smp_dev *smp;
        int err;
 
        if (!chan || !chan->data)
                return -EOPNOTSUPP;
 
-       tfm = chan->data;
+       smp = chan->data;
 
        get_random_bytes(&rpa->b[3], 3);
 
        rpa->b[5] &= 0x3f;      /* Clear two most significant bits */
        rpa->b[5] |= 0x40;      /* Set second most significant bit */
 
-       err = smp_ah(tfm, irk, &rpa->b[3], rpa->b);
+       err = smp_ah(smp->tfm_aes, irk, &rpa->b[3], rpa->b);
        if (err < 0)
                return err;
 
@@ -521,6 +534,53 @@ int smp_generate_rpa(struct hci_dev *hdev, const u8 irk[16], bdaddr_t *rpa)
        return 0;
 }
 
+int smp_generate_oob(struct hci_dev *hdev, u8 hash[16], u8 rand[16])
+{
+       struct l2cap_chan *chan = hdev->smp_data;
+       struct smp_dev *smp;
+       int err;
+
+       if (!chan || !chan->data)
+               return -EOPNOTSUPP;
+
+       smp = chan->data;
+
+       if (hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS)) {
+               BT_DBG("Using debug keys");
+               memcpy(smp->local_pk, debug_pk, 64);
+               memcpy(smp->local_sk, debug_sk, 32);
+               smp->debug_key = true;
+       } else {
+               while (true) {
+                       /* Generate local key pair for Secure Connections */
+                       if (!ecc_make_key(smp->local_pk, smp->local_sk))
+                               return -EIO;
+
+                       /* This is unlikely, but we need to check that
+                        * we didn't accidentially generate a debug key.
+                        */
+                       if (memcmp(smp->local_sk, debug_sk, 32))
+                               break;
+               }
+               smp->debug_key = false;
+       }
+
+       SMP_DBG("OOB Public Key X: %32phN", smp->local_pk);
+       SMP_DBG("OOB Public Key Y: %32phN", smp->local_pk + 32);
+       SMP_DBG("OOB Private Key:  %32phN", smp->local_sk);
+
+       get_random_bytes(smp->local_rand, 16);
+
+       err = smp_f4(smp->tfm_cmac, smp->local_pk, smp->local_pk,
+                    smp->local_rand, 0, hash);
+       if (err < 0)
+               return err;
+
+       memcpy(rand, smp->local_rand, 16);
+
+       return 0;
+}
+
 static void smp_send_cmd(struct l2cap_conn *conn, u8 code, u16 len, void *data)
 {
        struct l2cap_chan *chan = conn->smp;
@@ -589,7 +649,7 @@ static void build_pairing_cmd(struct l2cap_conn *conn,
        struct hci_dev *hdev = hcon->hdev;
        u8 local_dist = 0, remote_dist = 0, oob_flag = SMP_OOB_NOT_PRESENT;
 
-       if (test_bit(HCI_BONDABLE, &conn->hcon->hdev->dev_flags)) {
+       if (hci_dev_test_flag(hdev, HCI_BONDABLE)) {
                local_dist = SMP_DIST_ENC_KEY | SMP_DIST_SIGN;
                remote_dist = SMP_DIST_ENC_KEY | SMP_DIST_SIGN;
                authreq |= SMP_AUTH_BONDING;
@@ -597,18 +657,18 @@ static void build_pairing_cmd(struct l2cap_conn *conn,
                authreq &= ~SMP_AUTH_BONDING;
        }
 
-       if (test_bit(HCI_RPA_RESOLVING, &hdev->dev_flags))
+       if (hci_dev_test_flag(hdev, HCI_RPA_RESOLVING))
                remote_dist |= SMP_DIST_ID_KEY;
 
-       if (test_bit(HCI_PRIVACY, &hdev->dev_flags))
+       if (hci_dev_test_flag(hdev, HCI_PRIVACY))
                local_dist |= SMP_DIST_ID_KEY;
 
-       if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags) &&
+       if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
            (authreq & SMP_AUTH_SC)) {
                struct oob_data *oob_data;
                u8 bdaddr_type;
 
-               if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
+               if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
                        local_dist |= SMP_DIST_LINK_KEY;
                        remote_dist |= SMP_DIST_LINK_KEY;
                }
@@ -621,10 +681,12 @@ static void build_pairing_cmd(struct l2cap_conn *conn,
                oob_data = hci_find_remote_oob_data(hdev, &hcon->dst,
                                                    bdaddr_type);
                if (oob_data && oob_data->present) {
-                       set_bit(SMP_FLAG_OOB, &smp->flags);
+                       set_bit(SMP_FLAG_REMOTE_OOB, &smp->flags);
                        oob_flag = SMP_OOB_PRESENT;
                        memcpy(smp->rr, oob_data->rand256, 16);
                        memcpy(smp->pcnf, oob_data->hash256, 16);
+                       SMP_DBG("OOB Remote Confirmation: %16phN", smp->pcnf);
+                       SMP_DBG("OOB Remote Random: %16phN", smp->rr);
                }
 
        } else {
@@ -681,9 +743,9 @@ static void smp_chan_destroy(struct l2cap_conn *conn)
        complete = test_bit(SMP_FLAG_COMPLETE, &smp->flags);
        mgmt_smp_complete(hcon, complete);
 
-       kfree(smp->csrk);
-       kfree(smp->slave_csrk);
-       kfree(smp->link_key);
+       kzfree(smp->csrk);
+       kzfree(smp->slave_csrk);
+       kzfree(smp->link_key);
 
        crypto_free_blkcipher(smp->tfm_aes);
        crypto_free_hash(smp->tfm_cmac);
@@ -692,7 +754,7 @@ static void smp_chan_destroy(struct l2cap_conn *conn)
         * support hasn't been explicitly enabled.
         */
        if (smp->ltk && smp->ltk->type == SMP_LTK_P256_DEBUG &&
-           !test_bit(HCI_KEEP_DEBUG_KEYS, &hcon->hdev->dev_flags)) {
+           !hci_dev_test_flag(hcon->hdev, HCI_KEEP_DEBUG_KEYS)) {
                list_del_rcu(&smp->ltk->list);
                kfree_rcu(smp->ltk, rcu);
                smp->ltk = NULL;
@@ -717,7 +779,7 @@ static void smp_chan_destroy(struct l2cap_conn *conn)
        }
 
        chan->data = NULL;
-       kfree(smp);
+       kzfree(smp);
        hci_conn_drop(hcon);
 }
 
@@ -818,6 +880,12 @@ static int tk_request(struct l2cap_conn *conn, u8 remote_oob, u8 auth,
                return 0;
        }
 
+       /* If this function is used for SC -> legacy fallback we
+        * can only recover the just-works case.
+        */
+       if (test_bit(SMP_FLAG_SC, &smp->flags))
+               return -EINVAL;
+
        /* Not Just Works/Confirm results in MITM Authentication */
        if (smp->method != JUST_CFM) {
                set_bit(SMP_FLAG_MITM_AUTH, &smp->flags);
@@ -1052,7 +1120,7 @@ static void smp_notify_keys(struct l2cap_conn *conn)
                        /* Don't keep debug keys around if the relevant
                         * flag is not set.
                         */
-                       if (!test_bit(HCI_KEEP_DEBUG_KEYS, &hdev->dev_flags) &&
+                       if (!hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS) &&
                            key->type == HCI_LK_DEBUG_COMBINATION) {
                                list_del_rcu(&key->list);
                                kfree_rcu(key, rcu);
@@ -1097,13 +1165,13 @@ static void sc_generate_link_key(struct smp_chan *smp)
                return;
 
        if (smp_h6(smp->tfm_cmac, smp->tk, tmp1, smp->link_key)) {
-               kfree(smp->link_key);
+               kzfree(smp->link_key);
                smp->link_key = NULL;
                return;
        }
 
        if (smp_h6(smp->tfm_cmac, smp->link_key, lebr, smp->link_key)) {
-               kfree(smp->link_key);
+               kzfree(smp->link_key);
                smp->link_key = NULL;
                return;
        }
@@ -1252,7 +1320,10 @@ static void smp_distribute_keys(struct smp_chan *smp)
 
                csrk = kzalloc(sizeof(*csrk), GFP_KERNEL);
                if (csrk) {
-                       csrk->master = 0x00;
+                       if (hcon->sec_level > BT_SECURITY_MEDIUM)
+                               csrk->type = MGMT_CSRK_LOCAL_AUTHENTICATED;
+                       else
+                               csrk->type = MGMT_CSRK_LOCAL_UNAUTHENTICATED;
                        memcpy(csrk->val, sign.csrk, sizeof(csrk->val));
                }
                smp->slave_csrk = csrk;
@@ -1297,7 +1368,7 @@ static struct smp_chan *smp_chan_create(struct l2cap_conn *conn)
        smp->tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0, CRYPTO_ALG_ASYNC);
        if (IS_ERR(smp->tfm_aes)) {
                BT_ERR("Unable to create ECB crypto context");
-               kfree(smp);
+               kzfree(smp);
                return NULL;
        }
 
@@ -1305,7 +1376,7 @@ static struct smp_chan *smp_chan_create(struct l2cap_conn *conn)
        if (IS_ERR(smp->tfm_cmac)) {
                BT_ERR("Unable to create CMAC crypto context");
                crypto_free_blkcipher(smp->tfm_aes);
-               kfree(smp);
+               kzfree(smp);
                return NULL;
        }
 
@@ -1601,15 +1672,15 @@ static void build_bredr_pairing_cmd(struct smp_chan *smp,
        struct hci_dev *hdev = conn->hcon->hdev;
        u8 local_dist = 0, remote_dist = 0;
 
-       if (test_bit(HCI_BONDABLE, &hdev->dev_flags)) {
+       if (hci_dev_test_flag(hdev, HCI_BONDABLE)) {
                local_dist = SMP_DIST_ENC_KEY | SMP_DIST_SIGN;
                remote_dist = SMP_DIST_ENC_KEY | SMP_DIST_SIGN;
        }
 
-       if (test_bit(HCI_RPA_RESOLVING, &hdev->dev_flags))
+       if (hci_dev_test_flag(hdev, HCI_RPA_RESOLVING))
                remote_dist |= SMP_DIST_ID_KEY;
 
-       if (test_bit(HCI_PRIVACY, &hdev->dev_flags))
+       if (hci_dev_test_flag(hdev, HCI_PRIVACY))
                local_dist |= SMP_DIST_ID_KEY;
 
        if (!rsp) {
@@ -1661,22 +1732,29 @@ static u8 smp_cmd_pairing_req(struct l2cap_conn *conn, struct sk_buff *skb)
        /* We didn't start the pairing, so match remote */
        auth = req->auth_req & AUTH_REQ_MASK(hdev);
 
-       if (!test_bit(HCI_BONDABLE, &hdev->dev_flags) &&
+       if (!hci_dev_test_flag(hdev, HCI_BONDABLE) &&
            (auth & SMP_AUTH_BONDING))
                return SMP_PAIRING_NOTSUPP;
 
-       if (test_bit(HCI_SC_ONLY, &hdev->dev_flags) && !(auth & SMP_AUTH_SC))
+       if (hci_dev_test_flag(hdev, HCI_SC_ONLY) && !(auth & SMP_AUTH_SC))
                return SMP_AUTH_REQUIREMENTS;
 
        smp->preq[0] = SMP_CMD_PAIRING_REQ;
        memcpy(&smp->preq[1], req, sizeof(*req));
        skb_pull(skb, sizeof(*req));
 
+       /* If the remote side's OOB flag is set it means it has
+        * successfully received our local OOB data - therefore set the
+        * flag to indicate that local OOB is in use.
+        */
+       if (req->oob_flag == SMP_OOB_PRESENT)
+               set_bit(SMP_FLAG_LOCAL_OOB, &smp->flags);
+
        /* SMP over BR/EDR requires special treatment */
        if (conn->hcon->type == ACL_LINK) {
                /* We must have a BR/EDR SC link */
                if (!test_bit(HCI_CONN_AES_CCM, &conn->hcon->flags) &&
-                   !test_bit(HCI_FORCE_BREDR_SMP, &hdev->dbg_flags))
+                   !hci_dev_test_flag(hdev, HCI_FORCE_BREDR_SMP))
                        return SMP_CROSS_TRANSP_NOT_ALLOWED;
 
                set_bit(SMP_FLAG_SC, &smp->flags);
@@ -1734,14 +1812,19 @@ static u8 smp_cmd_pairing_req(struct l2cap_conn *conn, struct sk_buff *skb)
 
        clear_bit(SMP_FLAG_INITIATOR, &smp->flags);
 
+       /* Strictly speaking we shouldn't allow Pairing Confirm for the
+        * SC case, however some implementations incorrectly copy RFU auth
+        * req bits from our security request, which may create a false
+        * positive SC enablement.
+        */
+       SMP_ALLOW_CMD(smp, SMP_CMD_PAIRING_CONFIRM);
+
        if (test_bit(SMP_FLAG_SC, &smp->flags)) {
                SMP_ALLOW_CMD(smp, SMP_CMD_PUBLIC_KEY);
                /* Clear bits which are generated but not distributed */
                smp->remote_key_dist &= ~SMP_SC_NO_DIST;
                /* Wait for Public Key from Initiating Device */
                return 0;
-       } else {
-               SMP_ALLOW_CMD(smp, SMP_CMD_PAIRING_CONFIRM);
        }
 
        /* Request setup of TK */
@@ -1758,7 +1841,26 @@ static u8 sc_send_public_key(struct smp_chan *smp)
 
        BT_DBG("");
 
-       if (test_bit(HCI_USE_DEBUG_KEYS, &hdev->dev_flags)) {
+       if (test_bit(SMP_FLAG_LOCAL_OOB, &smp->flags)) {
+               struct l2cap_chan *chan = hdev->smp_data;
+               struct smp_dev *smp_dev;
+
+               if (!chan || !chan->data)
+                       return SMP_UNSPECIFIED;
+
+               smp_dev = chan->data;
+
+               memcpy(smp->local_pk, smp_dev->local_pk, 64);
+               memcpy(smp->local_sk, smp_dev->local_sk, 32);
+               memcpy(smp->lr, smp_dev->local_rand, 16);
+
+               if (smp_dev->debug_key)
+                       set_bit(SMP_FLAG_DEBUG_KEY, &smp->flags);
+
+               goto done;
+       }
+
+       if (hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS)) {
                BT_DBG("Using debug keys");
                memcpy(smp->local_pk, debug_pk, 64);
                memcpy(smp->local_sk, debug_sk, 32);
@@ -1777,8 +1879,9 @@ static u8 sc_send_public_key(struct smp_chan *smp)
                }
        }
 
+done:
        SMP_DBG("Local Public Key X: %32phN", smp->local_pk);
-       SMP_DBG("Local Public Key Y: %32phN", &smp->local_pk[32]);
+       SMP_DBG("Local Public Key Y: %32phN", smp->local_pk + 32);
        SMP_DBG("Local Private Key:  %32phN", smp->local_sk);
 
        smp_send_cmd(smp->conn, SMP_CMD_PUBLIC_KEY, 64, smp->local_pk);
@@ -1813,9 +1916,16 @@ static u8 smp_cmd_pairing_rsp(struct l2cap_conn *conn, struct sk_buff *skb)
 
        auth = rsp->auth_req & AUTH_REQ_MASK(hdev);
 
-       if (test_bit(HCI_SC_ONLY, &hdev->dev_flags) && !(auth & SMP_AUTH_SC))
+       if (hci_dev_test_flag(hdev, HCI_SC_ONLY) && !(auth & SMP_AUTH_SC))
                return SMP_AUTH_REQUIREMENTS;
 
+       /* If the remote side's OOB flag is set it means it has
+        * successfully received our local OOB data - therefore set the
+        * flag to indicate that local OOB is in use.
+        */
+       if (rsp->oob_flag == SMP_OOB_PRESENT)
+               set_bit(SMP_FLAG_LOCAL_OOB, &smp->flags);
+
        smp->prsp[0] = SMP_CMD_PAIRING_RSP;
        memcpy(&smp->prsp[1], rsp, sizeof(*rsp));
 
@@ -1882,10 +1992,6 @@ static u8 sc_check_confirm(struct smp_chan *smp)
 
        BT_DBG("");
 
-       /* Public Key exchange must happen before any other steps */
-       if (!test_bit(SMP_FLAG_REMOTE_PK, &smp->flags))
-               return SMP_UNSPECIFIED;
-
        if (smp->method == REQ_PASSKEY || smp->method == DSP_PASSKEY)
                return sc_passkey_round(smp, SMP_CMD_PAIRING_CONFIRM);
 
@@ -1898,6 +2004,47 @@ static u8 sc_check_confirm(struct smp_chan *smp)
        return 0;
 }
 
+/* Work-around for some implementations that incorrectly copy RFU bits
+ * from our security request and thereby create the impression that
+ * we're doing SC when in fact the remote doesn't support it.
+ */
+static int fixup_sc_false_positive(struct smp_chan *smp)
+{
+       struct l2cap_conn *conn = smp->conn;
+       struct hci_conn *hcon = conn->hcon;
+       struct hci_dev *hdev = hcon->hdev;
+       struct smp_cmd_pairing *req, *rsp;
+       u8 auth;
+
+       /* The issue is only observed when we're in slave role */
+       if (hcon->out)
+               return SMP_UNSPECIFIED;
+
+       if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
+               BT_ERR("Refusing SMP SC -> legacy fallback in SC-only mode");
+               return SMP_UNSPECIFIED;
+       }
+
+       BT_ERR("Trying to fall back to legacy SMP");
+
+       req = (void *) &smp->preq[1];
+       rsp = (void *) &smp->prsp[1];
+
+       /* Rebuild key dist flags which may have been cleared for SC */
+       smp->remote_key_dist = (req->init_key_dist & rsp->resp_key_dist);
+
+       auth = req->auth_req & AUTH_REQ_MASK(hdev);
+
+       if (tk_request(conn, 0, auth, rsp->io_capability, req->io_capability)) {
+               BT_ERR("Failed to fall back to legacy SMP");
+               return SMP_UNSPECIFIED;
+       }
+
+       clear_bit(SMP_FLAG_SC, &smp->flags);
+
+       return 0;
+}
+
 static u8 smp_cmd_pairing_confirm(struct l2cap_conn *conn, struct sk_buff *skb)
 {
        struct l2cap_chan *chan = conn->smp;
@@ -1911,8 +2058,19 @@ static u8 smp_cmd_pairing_confirm(struct l2cap_conn *conn, struct sk_buff *skb)
        memcpy(smp->pcnf, skb->data, sizeof(smp->pcnf));
        skb_pull(skb, sizeof(smp->pcnf));
 
-       if (test_bit(SMP_FLAG_SC, &smp->flags))
-               return sc_check_confirm(smp);
+       if (test_bit(SMP_FLAG_SC, &smp->flags)) {
+               int ret;
+
+               /* Public Key exchange must happen before any other steps */
+               if (test_bit(SMP_FLAG_REMOTE_PK, &smp->flags))
+                       return sc_check_confirm(smp);
+
+               BT_ERR("Unexpected SMP Pairing Confirm");
+
+               ret = fixup_sc_false_positive(smp);
+               if (ret)
+                       return ret;
+       }
 
        if (conn->hcon->out) {
                smp_send_cmd(conn, SMP_CMD_PAIRING_RANDOM, sizeof(smp->prnd),
@@ -1923,8 +2081,8 @@ static u8 smp_cmd_pairing_confirm(struct l2cap_conn *conn, struct sk_buff *skb)
 
        if (test_bit(SMP_FLAG_TK_VALID, &smp->flags))
                return smp_confirm(smp);
-       else
-               set_bit(SMP_FLAG_CFM_PENDING, &smp->flags);
+
+       set_bit(SMP_FLAG_CFM_PENDING, &smp->flags);
 
        return 0;
 }
@@ -2083,7 +2241,7 @@ static u8 smp_cmd_security_req(struct l2cap_conn *conn, struct sk_buff *skb)
 
        auth = rp->auth_req & AUTH_REQ_MASK(hdev);
 
-       if (test_bit(HCI_SC_ONLY, &hdev->dev_flags) && !(auth & SMP_AUTH_SC))
+       if (hci_dev_test_flag(hdev, HCI_SC_ONLY) && !(auth & SMP_AUTH_SC))
                return SMP_AUTH_REQUIREMENTS;
 
        if (hcon->io_capability == HCI_IO_NO_INPUT_OUTPUT)
@@ -2104,7 +2262,7 @@ static u8 smp_cmd_security_req(struct l2cap_conn *conn, struct sk_buff *skb)
        if (!smp)
                return SMP_UNSPECIFIED;
 
-       if (!test_bit(HCI_BONDABLE, &hcon->hdev->dev_flags) &&
+       if (!hci_dev_test_flag(hdev, HCI_BONDABLE) &&
            (auth & SMP_AUTH_BONDING))
                return SMP_PAIRING_NOTSUPP;
 
@@ -2138,7 +2296,7 @@ int smp_conn_security(struct hci_conn *hcon, __u8 sec_level)
 
        chan = conn->smp;
 
-       if (!test_bit(HCI_LE_ENABLED, &hcon->hdev->dev_flags))
+       if (!hci_dev_test_flag(hcon->hdev, HCI_LE_ENABLED))
                return 1;
 
        if (smp_sufficient_security(hcon, sec_level, SMP_USE_LTK))
@@ -2167,7 +2325,7 @@ int smp_conn_security(struct hci_conn *hcon, __u8 sec_level)
 
        authreq = seclevel_to_authreq(sec_level);
 
-       if (test_bit(HCI_SC_ENABLED, &hcon->hdev->dev_flags))
+       if (hci_dev_test_flag(hcon->hdev, HCI_SC_ENABLED))
                authreq |= SMP_AUTH_SC;
 
        /* Require MITM if IO Capability allows or the security level
@@ -2352,7 +2510,10 @@ static int smp_cmd_sign_info(struct l2cap_conn *conn, struct sk_buff *skb)
 
        csrk = kzalloc(sizeof(*csrk), GFP_KERNEL);
        if (csrk) {
-               csrk->master = 0x01;
+               if (conn->hcon->sec_level > BT_SECURITY_MEDIUM)
+                       csrk->type = MGMT_CSRK_REMOTE_AUTHENTICATED;
+               else
+                       csrk->type = MGMT_CSRK_REMOTE_UNAUTHENTICATED;
                memcpy(csrk->val, rp->csrk, sizeof(csrk->val));
        }
        smp->csrk = csrk;
@@ -2368,7 +2529,8 @@ static u8 sc_select_method(struct smp_chan *smp)
        struct smp_cmd_pairing *local, *remote;
        u8 local_mitm, remote_mitm, local_io, remote_io, method;
 
-       if (test_bit(SMP_FLAG_OOB, &smp->flags))
+       if (test_bit(SMP_FLAG_REMOTE_OOB, &smp->flags) ||
+           test_bit(SMP_FLAG_LOCAL_OOB, &smp->flags))
                return REQ_OOB;
 
        /* The preq/prsp contain the raw Pairing Request/Response PDUs
@@ -2422,6 +2584,16 @@ static int smp_cmd_public_key(struct l2cap_conn *conn, struct sk_buff *skb)
 
        memcpy(smp->remote_pk, key, 64);
 
+       if (test_bit(SMP_FLAG_REMOTE_OOB, &smp->flags)) {
+               err = smp_f4(smp->tfm_cmac, smp->remote_pk, smp->remote_pk,
+                            smp->rr, 0, cfm.confirm_val);
+               if (err)
+                       return SMP_UNSPECIFIED;
+
+               if (memcmp(cfm.confirm_val, smp->pcnf, 16))
+                       return SMP_CONFIRM_FAILED;
+       }
+
        /* Non-initiating device sends its public key after receiving
         * the key from the initiating device.
         */
@@ -2432,7 +2604,7 @@ static int smp_cmd_public_key(struct l2cap_conn *conn, struct sk_buff *skb)
        }
 
        SMP_DBG("Remote Public Key X: %32phN", smp->remote_pk);
-       SMP_DBG("Remote Public Key Y: %32phN", &smp->remote_pk[32]);
+       SMP_DBG("Remote Public Key Y: %32phN", smp->remote_pk + 32);
 
        if (!ecdh_shared_secret(smp->remote_pk, smp->local_sk, smp->dhkey))
                return SMP_UNSPECIFIED;
@@ -2470,14 +2642,6 @@ static int smp_cmd_public_key(struct l2cap_conn *conn, struct sk_buff *skb)
        }
 
        if (smp->method == REQ_OOB) {
-               err = smp_f4(smp->tfm_cmac, smp->remote_pk, smp->remote_pk,
-                            smp->rr, 0, cfm.confirm_val);
-               if (err)
-                       return SMP_UNSPECIFIED;
-
-               if (memcmp(cfm.confirm_val, smp->pcnf, 16))
-                       return SMP_CONFIRM_FAILED;
-
                if (hcon->out)
                        smp_send_cmd(conn, SMP_CMD_PAIRING_RANDOM,
                                     sizeof(smp->prnd), smp->prnd);
@@ -2550,6 +2714,8 @@ static int smp_cmd_dhkey_check(struct l2cap_conn *conn, struct sk_buff *skb)
 
        if (smp->method == REQ_PASSKEY || smp->method == DSP_PASSKEY)
                put_unaligned_le32(hcon->passkey_notify, r);
+       else if (smp->method == REQ_OOB)
+               memcpy(r, smp->lr, 16);
 
        err = smp_f6(smp->tfm_cmac, smp->mackey, smp->rrnd, smp->prnd, r,
                     io_cap, remote_addr, local_addr, e);
@@ -2600,7 +2766,7 @@ static int smp_sig_channel(struct l2cap_chan *chan, struct sk_buff *skb)
        if (skb->len < 1)
                return -EILSEQ;
 
-       if (!test_bit(HCI_LE_ENABLED, &hcon->hdev->dev_flags)) {
+       if (!hci_dev_test_flag(hcon->hdev, HCI_LE_ENABLED)) {
                reason = SMP_PAIRING_NOTSUPP;
                goto done;
        }
@@ -2738,16 +2904,16 @@ static void bredr_pairing(struct l2cap_chan *chan)
                return;
 
        /* Secure Connections support must be enabled */
-       if (!test_bit(HCI_SC_ENABLED, &hdev->dev_flags))
+       if (!hci_dev_test_flag(hdev, HCI_SC_ENABLED))
                return;
 
        /* BR/EDR must use Secure Connections for SMP */
        if (!test_bit(HCI_CONN_AES_CCM, &hcon->flags) &&
-           !test_bit(HCI_FORCE_BREDR_SMP, &hdev->dbg_flags))
+           !hci_dev_test_flag(hdev, HCI_FORCE_BREDR_SMP))
                return;
 
        /* If our LE support is not enabled don't do anything */
-       if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
+       if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
                return;
 
        /* Don't bother if remote LE support is not enabled */
@@ -2851,7 +3017,7 @@ static struct sk_buff *smp_alloc_skb_cb(struct l2cap_chan *chan,
                return ERR_PTR(-ENOMEM);
 
        skb->priority = HCI_PRIO_MAX;
-       bt_cb(skb)->chan = chan;
+       bt_cb(skb)->l2cap.chan = chan;
 
        return skb;
 }
@@ -2924,51 +3090,63 @@ static const struct l2cap_ops smp_root_chan_ops = {
 static struct l2cap_chan *smp_add_cid(struct hci_dev *hdev, u16 cid)
 {
        struct l2cap_chan *chan;
-       struct crypto_blkcipher *tfm_aes;
+       struct smp_dev *smp;
+       struct crypto_blkcipher *tfm_aes;
+       struct crypto_hash *tfm_cmac;
 
        if (cid == L2CAP_CID_SMP_BREDR) {
-               tfm_aes = NULL;
+               smp = NULL;
                goto create_chan;
        }
 
-       tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0, 0);
+       smp = kzalloc(sizeof(*smp), GFP_KERNEL);
+       if (!smp)
+               return ERR_PTR(-ENOMEM);
+
+       tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0, CRYPTO_ALG_ASYNC);
        if (IS_ERR(tfm_aes)) {
-               BT_ERR("Unable to create crypto context");
+               BT_ERR("Unable to create ECB crypto context");
+               kzfree(smp);
                return ERR_CAST(tfm_aes);
        }
 
+       tfm_cmac = crypto_alloc_hash("cmac(aes)", 0, CRYPTO_ALG_ASYNC);
+       if (IS_ERR(tfm_cmac)) {
+               BT_ERR("Unable to create CMAC crypto context");
+               crypto_free_blkcipher(tfm_aes);
+               kzfree(smp);
+               return ERR_CAST(tfm_cmac);
+       }
+
+       smp->tfm_aes = tfm_aes;
+       smp->tfm_cmac = tfm_cmac;
+
 create_chan:
        chan = l2cap_chan_create();
        if (!chan) {
-               crypto_free_blkcipher(tfm_aes);
+               if (smp) {
+                       crypto_free_blkcipher(smp->tfm_aes);
+                       crypto_free_hash(smp->tfm_cmac);
+                       kzfree(smp);
+               }
                return ERR_PTR(-ENOMEM);
        }
 
-       chan->data = tfm_aes;
+       chan->data = smp;
 
        l2cap_add_scid(chan, cid);
 
        l2cap_chan_set_defaults(chan);
 
        if (cid == L2CAP_CID_SMP) {
-               /* If usage of static address is forced or if the devices
-                * does not have a public address, then listen on the static
-                * address.
-                *
-                * In case BR/EDR has been disabled on a dual-mode controller
-                * and a static address has been configued, then listen on
-                * the static address instead.
-                */
-               if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
-                   !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
-                   (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) &&
-                    bacmp(&hdev->static_addr, BDADDR_ANY))) {
-                       bacpy(&chan->src, &hdev->static_addr);
-                       chan->src_type = BDADDR_LE_RANDOM;
-               } else {
-                       bacpy(&chan->src, &hdev->bdaddr);
+               u8 bdaddr_type;
+
+               hci_copy_identity_address(hdev, &chan->src, &bdaddr_type);
+
+               if (bdaddr_type == ADDR_LE_DEV_PUBLIC)
                        chan->src_type = BDADDR_LE_PUBLIC;
-               }
+               else
+                       chan->src_type = BDADDR_LE_RANDOM;
        } else {
                bacpy(&chan->src, &hdev->bdaddr);
                chan->src_type = BDADDR_BREDR;
@@ -2987,14 +3165,18 @@ create_chan:
 
 static void smp_del_chan(struct l2cap_chan *chan)
 {
-       struct crypto_blkcipher *tfm_aes;
+       struct smp_dev *smp;
 
        BT_DBG("chan %p", chan);
 
-       tfm_aes = chan->data;
-       if (tfm_aes) {
+       smp = chan->data;
+       if (smp) {
                chan->data = NULL;
-               crypto_free_blkcipher(tfm_aes);
+               if (smp->tfm_aes)
+                       crypto_free_blkcipher(smp->tfm_aes);
+               if (smp->tfm_cmac)
+                       crypto_free_hash(smp->tfm_cmac);
+               kzfree(smp);
        }
 
        l2cap_chan_put(chan);
@@ -3007,7 +3189,7 @@ static ssize_t force_bredr_smp_read(struct file *file,
        struct hci_dev *hdev = file->private_data;
        char buf[3];
 
-       buf[0] = test_bit(HCI_FORCE_BREDR_SMP, &hdev->dbg_flags) ? 'Y': 'N';
+       buf[0] = hci_dev_test_flag(hdev, HCI_FORCE_BREDR_SMP) ? 'Y': 'N';
        buf[1] = '\n';
        buf[2] = '\0';
        return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
@@ -3029,7 +3211,7 @@ static ssize_t force_bredr_smp_write(struct file *file,
        if (strtobool(buf, &enable))
                return -EINVAL;
 
-       if (enable == test_bit(HCI_FORCE_BREDR_SMP, &hdev->dbg_flags))
+       if (enable == hci_dev_test_flag(hdev, HCI_FORCE_BREDR_SMP))
                return -EALREADY;
 
        if (enable) {
@@ -3048,7 +3230,7 @@ static ssize_t force_bredr_smp_write(struct file *file,
                smp_del_chan(chan);
        }
 
-       change_bit(HCI_FORCE_BREDR_SMP, &hdev->dbg_flags);
+       hci_dev_change_flag(hdev, HCI_FORCE_BREDR_SMP);
 
        return count;
 }
@@ -3367,6 +3549,21 @@ static int __init test_h6(struct crypto_hash *tfm_cmac)
        return 0;
 }
 
+static char test_smp_buffer[32];
+
+static ssize_t test_smp_read(struct file *file, char __user *user_buf,
+                            size_t count, loff_t *ppos)
+{
+       return simple_read_from_buffer(user_buf, count, ppos, test_smp_buffer,
+                                      strlen(test_smp_buffer));
+}
+
+static const struct file_operations test_smp_fops = {
+       .open           = simple_open,
+       .read           = test_smp_read,
+       .llseek         = default_llseek,
+};
+
 static int __init run_selftests(struct crypto_blkcipher *tfm_aes,
                                struct crypto_hash *tfm_cmac)
 {
@@ -3379,49 +3576,49 @@ static int __init run_selftests(struct crypto_blkcipher *tfm_aes,
        err = test_ah(tfm_aes);
        if (err) {
                BT_ERR("smp_ah test failed");
-               return err;
+               goto done;
        }
 
        err = test_c1(tfm_aes);
        if (err) {
                BT_ERR("smp_c1 test failed");
-               return err;
+               goto done;
        }
 
        err = test_s1(tfm_aes);
        if (err) {
                BT_ERR("smp_s1 test failed");
-               return err;
+               goto done;
        }
 
        err = test_f4(tfm_cmac);
        if (err) {
                BT_ERR("smp_f4 test failed");
-               return err;
+               goto done;
        }
 
        err = test_f5(tfm_cmac);
        if (err) {
                BT_ERR("smp_f5 test failed");
-               return err;
+               goto done;
        }
 
        err = test_f6(tfm_cmac);
        if (err) {
                BT_ERR("smp_f6 test failed");
-               return err;
+               goto done;
        }
 
        err = test_g2(tfm_cmac);
        if (err) {
                BT_ERR("smp_g2 test failed");
-               return err;
+               goto done;
        }
 
        err = test_h6(tfm_cmac);
        if (err) {
                BT_ERR("smp_h6 test failed");
-               return err;
+               goto done;
        }
 
        rettime = ktime_get();
@@ -3430,7 +3627,17 @@ static int __init run_selftests(struct crypto_blkcipher *tfm_aes,
 
        BT_INFO("SMP test passed in %llu usecs", duration);
 
-       return 0;
+done:
+       if (!err)
+               snprintf(test_smp_buffer, sizeof(test_smp_buffer),
+                        "PASS (%llu usecs)\n", duration);
+       else
+               snprintf(test_smp_buffer, sizeof(test_smp_buffer), "FAIL\n");
+
+       debugfs_create_file("selftest_smp", 0444, bt_debugfs, NULL,
+                           &test_smp_fops);
+
+       return err;
 }
 
 int __init bt_selftest_smp(void)
index 60c5b73fcb4b26e3aab32604b58809ca30a4e6f4..6cf872563ea71d425dd1798314765edb56d39a51 100644 (file)
@@ -188,6 +188,7 @@ int smp_user_confirm_reply(struct hci_conn *conn, u16 mgmt_op, __le32 passkey);
 bool smp_irk_matches(struct hci_dev *hdev, const u8 irk[16],
                     const bdaddr_t *bdaddr);
 int smp_generate_rpa(struct hci_dev *hdev, const u8 irk[16], bdaddr_t *rpa);
+int smp_generate_oob(struct hci_dev *hdev, u8 hash[16], u8 rand[16]);
 
 int smp_register(struct hci_dev *hdev);
 void smp_unregister(struct hci_dev *hdev);
index ffd379db5938b17694be31e85d104289c7fb09bc..4ff77a16956c2740cdcae2c1a1e5a38209a7a6c9 100644 (file)
@@ -25,6 +25,9 @@
 #define COMMON_FEATURES (NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA | \
                         NETIF_F_GSO_MASK | NETIF_F_HW_CSUM)
 
+const struct nf_br_ops __rcu *nf_br_ops __read_mostly;
+EXPORT_SYMBOL_GPL(nf_br_ops);
+
 /* net device transmit always called with BH disabled */
 netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
 {
@@ -33,16 +36,15 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
        struct net_bridge_fdb_entry *dst;
        struct net_bridge_mdb_entry *mdst;
        struct pcpu_sw_netstats *brstats = this_cpu_ptr(br->stats);
+       const struct nf_br_ops *nf_ops;
        u16 vid = 0;
 
        rcu_read_lock();
-#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
-       if (skb->nf_bridge && (skb->nf_bridge->mask & BRNF_BRIDGED_DNAT)) {
-               br_nf_pre_routing_finish_bridge_slow(skb);
+       nf_ops = rcu_dereference(nf_br_ops);
+       if (nf_ops && nf_ops->br_dev_xmit_hook(skb)) {
                rcu_read_unlock();
                return NETDEV_TX_OK;
        }
-#endif
 
        u64_stats_update_begin(&brstats->syncp);
        brstats->tx_packets++;
index f96933a823e327fe56f64d1005101309e78a8e49..3304a544233174a3d1c7474cb19fffc05483be78 100644 (file)
@@ -37,9 +37,7 @@ static inline int should_deliver(const struct net_bridge_port *p,
 
 int br_dev_queue_push_xmit(struct sk_buff *skb)
 {
-       /* ip_fragment doesn't copy the MAC header */
-       if (nf_bridge_maybe_copy_header(skb) ||
-           !is_skb_forwardable(skb->dev, skb)) {
+       if (!is_skb_forwardable(skb->dev, skb)) {
                kfree_skb(skb);
        } else {
                skb_push(skb, ETH_HLEN);
@@ -188,6 +186,9 @@ static void br_flood(struct net_bridge *br, struct sk_buff *skb,
                /* Do not flood to ports that enable proxy ARP */
                if (p->flags & BR_PROXYARP)
                        continue;
+               if ((p->flags & BR_PROXYARP_WIFI) &&
+                   BR_INPUT_SKB_CB(skb)->proxyarp_replied)
+                       continue;
 
                prev = maybe_deliver(prev, p, skb, __packet_hook);
                if (IS_ERR(prev))
index e2aa7be3a847f448a404e0a43f6d1a09f1a0517a..052c5ebbc9472c833df81e28a4895b96ba3f389c 100644 (file)
@@ -60,7 +60,7 @@ static int br_pass_frame_up(struct sk_buff *skb)
 }
 
 static void br_do_proxy_arp(struct sk_buff *skb, struct net_bridge *br,
-                           u16 vid)
+                           u16 vid, struct net_bridge_port *p)
 {
        struct net_device *dev = br->dev;
        struct neighbour *n;
@@ -68,6 +68,8 @@ static void br_do_proxy_arp(struct sk_buff *skb, struct net_bridge *br,
        u8 *arpptr, *sha;
        __be32 sip, tip;
 
+       BR_INPUT_SKB_CB(skb)->proxyarp_replied = false;
+
        if (dev->flags & IFF_NOARP)
                return;
 
@@ -105,9 +107,12 @@ static void br_do_proxy_arp(struct sk_buff *skb, struct net_bridge *br,
                }
 
                f = __br_fdb_get(br, n->ha, vid);
-               if (f)
+               if (f && ((p->flags & BR_PROXYARP) ||
+                         (f->dst && (f->dst->flags & BR_PROXYARP_WIFI)))) {
                        arp_send(ARPOP_REPLY, ETH_P_ARP, sip, skb->dev, tip,
                                 sha, n->ha, sha);
+                       BR_INPUT_SKB_CB(skb)->proxyarp_replied = true;
+               }
 
                neigh_release(n);
        }
@@ -153,12 +158,10 @@ int br_handle_frame_finish(struct sk_buff *skb)
 
        dst = NULL;
 
-       if (is_broadcast_ether_addr(dest)) {
-               if (IS_ENABLED(CONFIG_INET) &&
-                   p->flags & BR_PROXYARP &&
-                   skb->protocol == htons(ETH_P_ARP))
-                       br_do_proxy_arp(skb, br, vid);
+       if (IS_ENABLED(CONFIG_INET) && skb->protocol == htons(ETH_P_ARP))
+               br_do_proxy_arp(skb, br, vid, p);
 
+       if (is_broadcast_ether_addr(dest)) {
                skb2 = skb;
                unicast = false;
        } else if (is_multicast_ether_addr(dest)) {
index 0ee453fad3de652142ddce3b6af6f1c3bb52e95a..7527e94dd5dce327064fe045506458d1a42f3c0c 100644 (file)
 #include <net/route.h>
 #include <net/netfilter/br_netfilter.h>
 
+#if IS_ENABLED(CONFIG_NF_CONNTRACK)
+#include <net/netfilter/nf_conntrack.h>
+#endif
+
 #include <asm/uaccess.h>
 #include "br_private.h"
 #ifdef CONFIG_SYSCTL
 #include <linux/sysctl.h>
 #endif
 
-#define skb_origaddr(skb)       (((struct bridge_skb_cb *) \
-                                (skb->nf_bridge->data))->daddr.ipv4)
-#define store_orig_dstaddr(skb)         (skb_origaddr(skb) = ip_hdr(skb)->daddr)
-#define dnat_took_place(skb)    (skb_origaddr(skb) != ip_hdr(skb)->daddr)
-
 #ifdef CONFIG_SYSCTL
 static struct ctl_table_header *brnf_sysctl_header;
 static int brnf_call_iptables __read_mostly = 1;
@@ -154,6 +153,18 @@ static inline struct nf_bridge_info *nf_bridge_unshare(struct sk_buff *skb)
        return nf_bridge;
 }
 
+static unsigned int nf_bridge_encap_header_len(const struct sk_buff *skb)
+{
+       switch (skb->protocol) {
+       case __cpu_to_be16(ETH_P_8021Q):
+               return VLAN_HLEN;
+       case __cpu_to_be16(ETH_P_PPP_SES):
+               return PPPOE_SES_HLEN;
+       default:
+               return 0;
+       }
+}
+
 static inline void nf_bridge_push_encap_header(struct sk_buff *skb)
 {
        unsigned int len = nf_bridge_encap_header_len(skb);
@@ -239,6 +250,14 @@ drop:
        return -1;
 }
 
+static void nf_bridge_update_protocol(struct sk_buff *skb)
+{
+       if (skb->nf_bridge->mask & BRNF_8021Q)
+               skb->protocol = htons(ETH_P_8021Q);
+       else if (skb->nf_bridge->mask & BRNF_PPPoE)
+               skb->protocol = htons(ETH_P_PPP_SES);
+}
+
 /* PF_BRIDGE/PRE_ROUTING *********************************************/
 /* Undo the changes made for ip6tables PREROUTING and continue the
  * bridge PRE_ROUTING hook. */
@@ -314,6 +333,22 @@ free_skb:
        return 0;
 }
 
+static bool dnat_took_place(const struct sk_buff *skb)
+{
+#if IS_ENABLED(CONFIG_NF_CONNTRACK)
+       enum ip_conntrack_info ctinfo;
+       struct nf_conn *ct;
+
+       ct = nf_ct_get(skb, &ctinfo);
+       if (!ct || nf_ct_is_untracked(ct))
+               return false;
+
+       return test_bit(IPS_DST_NAT_BIT, &ct->status);
+#else
+       return false;
+#endif
+}
+
 /* This requires some explaining. If DNAT has taken place,
  * we will need to fix up the destination Ethernet address.
  *
@@ -527,9 +562,7 @@ bad:
  * to ip6tables, which doesn't support NAT, so things are fairly simple. */
 static unsigned int br_nf_pre_routing_ipv6(const struct nf_hook_ops *ops,
                                           struct sk_buff *skb,
-                                          const struct net_device *in,
-                                          const struct net_device *out,
-                                          int (*okfn)(struct sk_buff *))
+                                          const struct nf_hook_state *state)
 {
        const struct ipv6hdr *hdr;
        u32 pkt_len;
@@ -577,9 +610,7 @@ static unsigned int br_nf_pre_routing_ipv6(const struct nf_hook_ops *ops,
  * address to be able to detect DNAT afterwards. */
 static unsigned int br_nf_pre_routing(const struct nf_hook_ops *ops,
                                      struct sk_buff *skb,
-                                     const struct net_device *in,
-                                     const struct net_device *out,
-                                     int (*okfn)(struct sk_buff *))
+                                     const struct nf_hook_state *state)
 {
        struct net_bridge_port *p;
        struct net_bridge *br;
@@ -588,7 +619,7 @@ static unsigned int br_nf_pre_routing(const struct nf_hook_ops *ops,
        if (unlikely(!pskb_may_pull(skb, len)))
                return NF_DROP;
 
-       p = br_port_get_rcu(in);
+       p = br_port_get_rcu(state->in);
        if (p == NULL)
                return NF_DROP;
        br = p->br;
@@ -598,7 +629,7 @@ static unsigned int br_nf_pre_routing(const struct nf_hook_ops *ops,
                        return NF_ACCEPT;
 
                nf_bridge_pull_encap_header_rcsum(skb);
-               return br_nf_pre_routing_ipv6(ops, skb, in, out, okfn);
+               return br_nf_pre_routing_ipv6(ops, skb, state);
        }
 
        if (!brnf_call_iptables && !br->nf_call_iptables)
@@ -617,7 +648,7 @@ static unsigned int br_nf_pre_routing(const struct nf_hook_ops *ops,
                return NF_DROP;
        if (!setup_pre_routing(skb))
                return NF_DROP;
-       store_orig_dstaddr(skb);
+
        skb->protocol = htons(ETH_P_IP);
 
        NF_HOOK(NFPROTO_IPV4, NF_INET_PRE_ROUTING, skb, skb->dev, NULL,
@@ -636,9 +667,7 @@ static unsigned int br_nf_pre_routing(const struct nf_hook_ops *ops,
  * prevent this from happening. */
 static unsigned int br_nf_local_in(const struct nf_hook_ops *ops,
                                   struct sk_buff *skb,
-                                  const struct net_device *in,
-                                  const struct net_device *out,
-                                  int (*okfn)(struct sk_buff *))
+                                  const struct nf_hook_state *state)
 {
        br_drop_fake_rtable(skb);
        return NF_ACCEPT;
@@ -675,9 +704,7 @@ static int br_nf_forward_finish(struct sk_buff *skb)
  * bridge ports. */
 static unsigned int br_nf_forward_ip(const struct nf_hook_ops *ops,
                                     struct sk_buff *skb,
-                                    const struct net_device *in,
-                                    const struct net_device *out,
-                                    int (*okfn)(struct sk_buff *))
+                                    const struct nf_hook_state *state)
 {
        struct nf_bridge_info *nf_bridge;
        struct net_device *parent;
@@ -691,7 +718,7 @@ static unsigned int br_nf_forward_ip(const struct nf_hook_ops *ops,
        if (!nf_bridge_unshare(skb))
                return NF_DROP;
 
-       parent = bridge_parent(out);
+       parent = bridge_parent(state->out);
        if (!parent)
                return NF_DROP;
 
@@ -713,31 +740,27 @@ static unsigned int br_nf_forward_ip(const struct nf_hook_ops *ops,
        if (pf == NFPROTO_IPV4 && br_parse_ip_options(skb))
                return NF_DROP;
 
-       /* The physdev module checks on this */
-       nf_bridge->mask |= BRNF_BRIDGED;
        nf_bridge->physoutdev = skb->dev;
        if (pf == NFPROTO_IPV4)
                skb->protocol = htons(ETH_P_IP);
        else
                skb->protocol = htons(ETH_P_IPV6);
 
-       NF_HOOK(pf, NF_INET_FORWARD, skb, brnf_get_logical_dev(skb, in), parent,
-               br_nf_forward_finish);
+       NF_HOOK(pf, NF_INET_FORWARD, skb, brnf_get_logical_dev(skb, state->in),
+               parent, br_nf_forward_finish);
 
        return NF_STOLEN;
 }
 
 static unsigned int br_nf_forward_arp(const struct nf_hook_ops *ops,
                                      struct sk_buff *skb,
-                                     const struct net_device *in,
-                                     const struct net_device *out,
-                                     int (*okfn)(struct sk_buff *))
+                                     const struct nf_hook_state *state)
 {
        struct net_bridge_port *p;
        struct net_bridge *br;
        struct net_device **d = (struct net_device **)(skb->cb);
 
-       p = br_port_get_rcu(out);
+       p = br_port_get_rcu(state->out);
        if (p == NULL)
                return NF_ACCEPT;
        br = p->br;
@@ -756,31 +779,61 @@ static unsigned int br_nf_forward_arp(const struct nf_hook_ops *ops,
                        nf_bridge_push_encap_header(skb);
                return NF_ACCEPT;
        }
-       *d = (struct net_device *)in;
-       NF_HOOK(NFPROTO_ARP, NF_ARP_FORWARD, skb, (struct net_device *)in,
-               (struct net_device *)out, br_nf_forward_finish);
+       *d = state->in;
+       NF_HOOK(NFPROTO_ARP, NF_ARP_FORWARD, skb, state->in,
+               state->out, br_nf_forward_finish);
 
        return NF_STOLEN;
 }
 
 #if IS_ENABLED(CONFIG_NF_DEFRAG_IPV4)
+static bool nf_bridge_copy_header(struct sk_buff *skb)
+{
+       int err;
+       unsigned int header_size;
+
+       nf_bridge_update_protocol(skb);
+       header_size = ETH_HLEN + nf_bridge_encap_header_len(skb);
+       err = skb_cow_head(skb, header_size);
+       if (err)
+               return false;
+
+       skb_copy_to_linear_data_offset(skb, -header_size,
+                                      skb->nf_bridge->data, header_size);
+       __skb_push(skb, nf_bridge_encap_header_len(skb));
+       return true;
+}
+
+static int br_nf_push_frag_xmit(struct sk_buff *skb)
+{
+       if (!nf_bridge_copy_header(skb)) {
+               kfree_skb(skb);
+               return 0;
+       }
+
+       return br_dev_queue_push_xmit(skb);
+}
+
 static int br_nf_dev_queue_xmit(struct sk_buff *skb)
 {
        int ret;
        int frag_max_size;
+       unsigned int mtu_reserved;
+
+       if (skb_is_gso(skb) || skb->protocol != htons(ETH_P_IP))
+               return br_dev_queue_push_xmit(skb);
 
+       mtu_reserved = nf_bridge_mtu_reduction(skb);
        /* This is wrong! We should preserve the original fragment
         * boundaries by preserving frag_list rather than refragmenting.
         */
-       if (skb->protocol == htons(ETH_P_IP) &&
-           skb->len + nf_bridge_mtu_reduction(skb) > skb->dev->mtu &&
-           !skb_is_gso(skb)) {
+       if (skb->len + mtu_reserved > skb->dev->mtu) {
                frag_max_size = BR_INPUT_SKB_CB(skb)->frag_max_size;
                if (br_parse_ip_options(skb))
                        /* Drop invalid packet */
                        return NF_DROP;
                IPCB(skb)->frag_max_size = frag_max_size;
-               ret = ip_fragment(skb, br_dev_queue_push_xmit);
+               ret = ip_fragment(skb, br_nf_push_frag_xmit);
        } else
                ret = br_dev_queue_push_xmit(skb);
 
@@ -796,15 +849,18 @@ static int br_nf_dev_queue_xmit(struct sk_buff *skb)
 /* PF_BRIDGE/POST_ROUTING ********************************************/
 static unsigned int br_nf_post_routing(const struct nf_hook_ops *ops,
                                       struct sk_buff *skb,
-                                      const struct net_device *in,
-                                      const struct net_device *out,
-                                      int (*okfn)(struct sk_buff *))
+                                      const struct nf_hook_state *state)
 {
        struct nf_bridge_info *nf_bridge = skb->nf_bridge;
        struct net_device *realoutdev = bridge_parent(skb->dev);
        u_int8_t pf;
 
-       if (!nf_bridge || !(nf_bridge->mask & BRNF_BRIDGED))
+       /* if nf_bridge is set, but ->physoutdev is NULL, this packet came in
+        * on a bridge, but was delivered locally and is now being routed:
+        *
+        * POST_ROUTING was already invoked from the ip stack.
+        */
+       if (!nf_bridge || !nf_bridge->physoutdev)
                return NF_ACCEPT;
 
        if (!realoutdev)
@@ -842,9 +898,7 @@ static unsigned int br_nf_post_routing(const struct nf_hook_ops *ops,
  * for the second time. */
 static unsigned int ip_sabotage_in(const struct nf_hook_ops *ops,
                                   struct sk_buff *skb,
-                                  const struct net_device *in,
-                                  const struct net_device *out,
-                                  int (*okfn)(struct sk_buff *))
+                                  const struct nf_hook_state *state)
 {
        if (skb->nf_bridge &&
            !(skb->nf_bridge->mask & BRNF_NF_BRIDGE_PREROUTING)) {
@@ -854,6 +908,41 @@ static unsigned int ip_sabotage_in(const struct nf_hook_ops *ops,
        return NF_ACCEPT;
 }
 
+/* This is called when br_netfilter has called into iptables/netfilter,
+ * and DNAT has taken place on a bridge-forwarded packet.
+ *
+ * neigh->output has created a new MAC header, with local br0 MAC
+ * as saddr.
+ *
+ * This restores the original MAC saddr of the bridged packet
+ * before invoking bridge forward logic to transmit the packet.
+ */
+static void br_nf_pre_routing_finish_bridge_slow(struct sk_buff *skb)
+{
+       struct nf_bridge_info *nf_bridge = skb->nf_bridge;
+
+       skb_pull(skb, ETH_HLEN);
+       nf_bridge->mask &= ~BRNF_BRIDGED_DNAT;
+
+       skb_copy_to_linear_data_offset(skb, -(ETH_HLEN-ETH_ALEN),
+                                      skb->nf_bridge->data, ETH_HLEN-ETH_ALEN);
+       skb->dev = nf_bridge->physindev;
+       br_handle_frame_finish(skb);
+}
+
+static int br_nf_dev_xmit(struct sk_buff *skb)
+{
+       if (skb->nf_bridge && (skb->nf_bridge->mask & BRNF_BRIDGED_DNAT)) {
+               br_nf_pre_routing_finish_bridge_slow(skb);
+               return 1;
+       }
+       return 0;
+}
+
+static const struct nf_br_ops br_ops = {
+       .br_dev_xmit_hook =     br_nf_dev_xmit,
+};
+
 void br_netfilter_enable(void)
 {
 }
@@ -991,12 +1080,14 @@ static int __init br_netfilter_init(void)
                return -ENOMEM;
        }
 #endif
+       RCU_INIT_POINTER(nf_br_ops, &br_ops);
        printk(KERN_NOTICE "Bridge firewalling registered\n");
        return 0;
 }
 
 static void __exit br_netfilter_fini(void)
 {
+       RCU_INIT_POINTER(nf_br_ops, NULL);
        nf_unregister_hooks(br_nf_ops, ARRAY_SIZE(br_nf_ops));
 #ifdef CONFIG_SYSCTL
        unregister_net_sysctl_table(brnf_sysctl_header);
index 4fbcea0e7ecb41cbad57e3320d1f46eeb752f244..0e4ddb81610d90ff51a45835424cef547bba73bf 100644 (file)
 #include "br_private.h"
 #include "br_private_stp.h"
 
+static int br_get_num_vlan_infos(const struct net_port_vlans *pv,
+                                u32 filter_mask)
+{
+       u16 vid_range_start = 0, vid_range_end = 0;
+       u16 vid_range_flags = 0;
+       u16 pvid, vid, flags;
+       int num_vlans = 0;
+
+       if (filter_mask & RTEXT_FILTER_BRVLAN)
+               return pv->num_vlans;
+
+       if (!(filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED))
+               return 0;
+
+       /* Count number of vlan info's
+        */
+       pvid = br_get_pvid(pv);
+       for_each_set_bit(vid, pv->vlan_bitmap, VLAN_N_VID) {
+               flags = 0;
+               if (vid == pvid)
+                       flags |= BRIDGE_VLAN_INFO_PVID;
+
+               if (test_bit(vid, pv->untagged_bitmap))
+                       flags |= BRIDGE_VLAN_INFO_UNTAGGED;
+
+               if (vid_range_start == 0) {
+                       goto initvars;
+               } else if ((vid - vid_range_end) == 1 &&
+                       flags == vid_range_flags) {
+                       vid_range_end = vid;
+                       continue;
+               } else {
+                       if ((vid_range_end - vid_range_start) > 0)
+                               num_vlans += 2;
+                       else
+                               num_vlans += 1;
+               }
+initvars:
+               vid_range_start = vid;
+               vid_range_end = vid;
+               vid_range_flags = flags;
+       }
+
+       if (vid_range_start != 0) {
+               if ((vid_range_end - vid_range_start) > 0)
+                       num_vlans += 2;
+               else
+                       num_vlans += 1;
+       }
+
+       return num_vlans;
+}
+
+static size_t br_get_link_af_size_filtered(const struct net_device *dev,
+                                          u32 filter_mask)
+{
+       struct net_port_vlans *pv;
+       int num_vlan_infos;
+
+       rcu_read_lock();
+       if (br_port_exists(dev))
+               pv = nbp_get_vlan_info(br_port_get_rcu(dev));
+       else if (dev->priv_flags & IFF_EBRIDGE)
+               pv = br_get_vlan_info((struct net_bridge *)netdev_priv(dev));
+       else
+               pv = NULL;
+       if (pv)
+               num_vlan_infos = br_get_num_vlan_infos(pv, filter_mask);
+       else
+               num_vlan_infos = 0;
+       rcu_read_unlock();
+
+       if (!num_vlan_infos)
+               return 0;
+
+       /* Each VLAN is returned in bridge_vlan_info along with flags */
+       return num_vlan_infos * nla_total_size(sizeof(struct bridge_vlan_info));
+}
+
 static inline size_t br_port_info_size(void)
 {
        return nla_total_size(1)        /* IFLA_BRPORT_STATE  */
@@ -36,7 +115,7 @@ static inline size_t br_port_info_size(void)
                + 0;
 }
 
-static inline size_t br_nlmsg_size(void)
+static inline size_t br_nlmsg_size(struct net_device *dev, u32 filter_mask)
 {
        return NLMSG_ALIGN(sizeof(struct ifinfomsg))
                + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */
@@ -45,7 +124,9 @@ static inline size_t br_nlmsg_size(void)
                + nla_total_size(4) /* IFLA_MTU */
                + nla_total_size(4) /* IFLA_LINK */
                + nla_total_size(1) /* IFLA_OPERSTATE */
-               + nla_total_size(br_port_info_size()); /* IFLA_PROTINFO */
+               + nla_total_size(br_port_info_size()) /* IFLA_PROTINFO */
+               + nla_total_size(br_get_link_af_size_filtered(dev,
+                                filter_mask)); /* IFLA_AF_SPEC */
 }
 
 static int br_port_fill_attrs(struct sk_buff *skb,
@@ -62,7 +143,9 @@ static int br_port_fill_attrs(struct sk_buff *skb,
            nla_put_u8(skb, IFLA_BRPORT_FAST_LEAVE, !!(p->flags & BR_MULTICAST_FAST_LEAVE)) ||
            nla_put_u8(skb, IFLA_BRPORT_LEARNING, !!(p->flags & BR_LEARNING)) ||
            nla_put_u8(skb, IFLA_BRPORT_UNICAST_FLOOD, !!(p->flags & BR_FLOOD)) ||
-           nla_put_u8(skb, IFLA_BRPORT_PROXYARP, !!(p->flags & BR_PROXYARP)))
+           nla_put_u8(skb, IFLA_BRPORT_PROXYARP, !!(p->flags & BR_PROXYARP)) ||
+           nla_put_u8(skb, IFLA_BRPORT_PROXYARP_WIFI,
+                      !!(p->flags & BR_PROXYARP_WIFI)))
                return -EMSGSIZE;
 
        return 0;
@@ -222,8 +305,8 @@ static int br_fill_ifinfo(struct sk_buff *skb,
            nla_put_u8(skb, IFLA_OPERSTATE, operstate) ||
            (dev->addr_len &&
             nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr)) ||
-           (dev->ifindex != dev->iflink &&
-            nla_put_u32(skb, IFLA_LINK, dev->iflink)))
+           (dev->ifindex != dev_get_iflink(dev) &&
+            nla_put_u32(skb, IFLA_LINK, dev_get_iflink(dev))))
                goto nla_put_failure;
 
        if (event == RTM_NEWLINK && port) {
@@ -280,6 +363,7 @@ void br_ifinfo_notify(int event, struct net_bridge_port *port)
        struct net *net;
        struct sk_buff *skb;
        int err = -ENOBUFS;
+       u32 filter = RTEXT_FILTER_BRVLAN_COMPRESSED;
 
        if (!port)
                return;
@@ -288,11 +372,11 @@ void br_ifinfo_notify(int event, struct net_bridge_port *port)
        br_debug(port->br, "port %u(%s) event %d\n",
                 (unsigned int)port->port_no, port->dev->name, event);
 
-       skb = nlmsg_new(br_nlmsg_size(), GFP_ATOMIC);
+       skb = nlmsg_new(br_nlmsg_size(port->dev, filter), GFP_ATOMIC);
        if (skb == NULL)
                goto errout;
 
-       err = br_fill_ifinfo(skb, port, 0, 0, event, 0, 0, port->dev);
+       err = br_fill_ifinfo(skb, port, 0, 0, event, 0, filter, port->dev);
        if (err < 0) {
                /* -EMSGSIZE implies BUG in br_nlmsg_size() */
                WARN_ON(err == -EMSGSIZE);
@@ -471,6 +555,7 @@ static int br_setport(struct net_bridge_port *p, struct nlattr *tb[])
        br_set_port_flag(p, tb, IFLA_BRPORT_LEARNING, BR_LEARNING);
        br_set_port_flag(p, tb, IFLA_BRPORT_UNICAST_FLOOD, BR_FLOOD);
        br_set_port_flag(p, tb, IFLA_BRPORT_PROXYARP, BR_PROXYARP);
+       br_set_port_flag(p, tb, IFLA_BRPORT_PROXYARP_WIFI, BR_PROXYARP_WIFI);
 
        if (tb[IFLA_BRPORT_COST]) {
                err = br_stp_set_path_cost(p, nla_get_u32(tb[IFLA_BRPORT_COST]));
@@ -648,6 +733,9 @@ static const struct nla_policy br_policy[IFLA_BR_MAX + 1] = {
        [IFLA_BR_FORWARD_DELAY] = { .type = NLA_U32 },
        [IFLA_BR_HELLO_TIME]    = { .type = NLA_U32 },
        [IFLA_BR_MAX_AGE]       = { .type = NLA_U32 },
+       [IFLA_BR_AGEING_TIME] = { .type = NLA_U32 },
+       [IFLA_BR_STP_STATE] = { .type = NLA_U32 },
+       [IFLA_BR_PRIORITY] = { .type = NLA_U16 },
 };
 
 static int br_changelink(struct net_device *brdev, struct nlattr *tb[],
@@ -677,6 +765,24 @@ static int br_changelink(struct net_device *brdev, struct nlattr *tb[],
                        return err;
        }
 
+       if (data[IFLA_BR_AGEING_TIME]) {
+               u32 ageing_time = nla_get_u32(data[IFLA_BR_AGEING_TIME]);
+
+               br->ageing_time = clock_t_to_jiffies(ageing_time);
+       }
+
+       if (data[IFLA_BR_STP_STATE]) {
+               u32 stp_enabled = nla_get_u32(data[IFLA_BR_STP_STATE]);
+
+               br_stp_set_enabled(br, stp_enabled);
+       }
+
+       if (data[IFLA_BR_PRIORITY]) {
+               u32 priority = nla_get_u16(data[IFLA_BR_PRIORITY]);
+
+               br_stp_set_bridge_priority(br, priority);
+       }
+
        return 0;
 }
 
@@ -685,6 +791,9 @@ static size_t br_get_size(const struct net_device *brdev)
        return nla_total_size(sizeof(u32)) +    /* IFLA_BR_FORWARD_DELAY  */
               nla_total_size(sizeof(u32)) +    /* IFLA_BR_HELLO_TIME */
               nla_total_size(sizeof(u32)) +    /* IFLA_BR_MAX_AGE */
+              nla_total_size(sizeof(u32)) +    /* IFLA_BR_AGEING_TIME */
+              nla_total_size(sizeof(u32)) +    /* IFLA_BR_STP_STATE */
+              nla_total_size(sizeof(u16)) +    /* IFLA_BR_PRIORITY */
               0;
 }
 
@@ -694,10 +803,16 @@ static int br_fill_info(struct sk_buff *skb, const struct net_device *brdev)
        u32 forward_delay = jiffies_to_clock_t(br->forward_delay);
        u32 hello_time = jiffies_to_clock_t(br->hello_time);
        u32 age_time = jiffies_to_clock_t(br->max_age);
+       u32 ageing_time = jiffies_to_clock_t(br->ageing_time);
+       u32 stp_enabled = br->stp_enabled;
+       u16 priority = (br->bridge_id.prio[0] << 8) | br->bridge_id.prio[1];
 
        if (nla_put_u32(skb, IFLA_BR_FORWARD_DELAY, forward_delay) ||
            nla_put_u32(skb, IFLA_BR_HELLO_TIME, hello_time) ||
-           nla_put_u32(skb, IFLA_BR_MAX_AGE, age_time))
+           nla_put_u32(skb, IFLA_BR_MAX_AGE, age_time) ||
+           nla_put_u32(skb, IFLA_BR_AGEING_TIME, ageing_time) ||
+           nla_put_u32(skb, IFLA_BR_STP_STATE, stp_enabled) ||
+           nla_put_u16(skb, IFLA_BR_PRIORITY, priority))
                return -EMSGSIZE;
 
        return 0;
index 387cb3bd017c0102b73ea2cf7089500ea00a19a9..20cbb727df4d005db840e9940b85177ecb61eec3 100644 (file)
@@ -54,7 +54,6 @@ static unsigned int fake_mtu(const struct dst_entry *dst)
 
 static struct dst_ops fake_dst_ops = {
        .family         = AF_INET,
-       .protocol       = cpu_to_be16(ETH_P_IP),
        .update_pmtu    = fake_update_pmtu,
        .redirect       = fake_redirect,
        .cow_metrics    = fake_cow_metrics,
index de0919975a25318093cfa640231239f58c113f61..b46fa0c5b8ece865017e23b29e18047f239edbf5 100644 (file)
@@ -305,6 +305,7 @@ struct br_input_skb_cb {
 #endif
 
        u16 frag_max_size;
+       bool proxyarp_replied;
 
 #ifdef CONFIG_BRIDGE_VLAN_FILTERING
        bool vlan_filtered;
@@ -762,6 +763,11 @@ static inline int br_vlan_enabled(struct net_bridge *br)
 }
 #endif
 
+struct nf_br_ops {
+       int (*br_dev_xmit_hook)(struct sk_buff *skb);
+};
+extern const struct nf_br_ops __rcu *nf_br_ops;
+
 /* br_netfilter.c */
 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
 int br_nf_core_init(void);
index 2de5d91199e8172f9356b104bbcfa772ff460d45..4905845a94e92f125accc4d1a4cf16689fcd3990 100644 (file)
@@ -171,6 +171,7 @@ BRPORT_ATTR_FLAG(root_block, BR_ROOT_BLOCK);
 BRPORT_ATTR_FLAG(learning, BR_LEARNING);
 BRPORT_ATTR_FLAG(unicast_flood, BR_FLOOD);
 BRPORT_ATTR_FLAG(proxyarp, BR_PROXYARP);
+BRPORT_ATTR_FLAG(proxyarp_wifi, BR_PROXYARP_WIFI);
 
 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING
 static ssize_t show_multicast_router(struct net_bridge_port *p, char *buf)
@@ -215,6 +216,7 @@ static const struct brport_attribute *brport_attrs[] = {
        &brport_attr_multicast_fast_leave,
 #endif
        &brport_attr_proxyarp,
+       &brport_attr_proxyarp_wifi,
        NULL
 };
 
index ce205aabf9c5333e2ea5286ef93a4d5a1d977cd9..8a3f63b2e8073d8081df5fbaac3bf63c348c0447 100644 (file)
@@ -58,20 +58,18 @@ static const struct ebt_table frame_filter = {
 
 static unsigned int
 ebt_in_hook(const struct nf_hook_ops *ops, struct sk_buff *skb,
-           const struct net_device *in, const struct net_device *out,
-           int (*okfn)(struct sk_buff *))
+           const struct nf_hook_state *state)
 {
-       return ebt_do_table(ops->hooknum, skb, in, out,
-                           dev_net(in)->xt.frame_filter);
+       return ebt_do_table(ops->hooknum, skb, state->in, state->out,
+                           dev_net(state->in)->xt.frame_filter);
 }
 
 static unsigned int
 ebt_out_hook(const struct nf_hook_ops *ops, struct sk_buff *skb,
-            const struct net_device *in, const struct net_device *out,
-            int (*okfn)(struct sk_buff *))
+            const struct nf_hook_state *state)
 {
-       return ebt_do_table(ops->hooknum, skb, in, out,
-                           dev_net(out)->xt.frame_filter);
+       return ebt_do_table(ops->hooknum, skb, state->in, state->out,
+                           dev_net(state->out)->xt.frame_filter);
 }
 
 static struct nf_hook_ops ebt_ops_filter[] __read_mostly = {
index a0ac2984fb6c1e1864efdb4bb05150acbf57ba16..c5ef5b1ab6786814830983d76ef46c6fd0051f51 100644 (file)
@@ -58,20 +58,18 @@ static struct ebt_table frame_nat = {
 
 static unsigned int
 ebt_nat_in(const struct nf_hook_ops *ops, struct sk_buff *skb,
-          const struct net_device *in, const struct net_device *out,
-          int (*okfn)(struct sk_buff *))
+          const struct nf_hook_state *state)
 {
-       return ebt_do_table(ops->hooknum, skb, in, out,
-                           dev_net(in)->xt.frame_nat);
+       return ebt_do_table(ops->hooknum, skb, state->in, state->out,
+                           dev_net(state->in)->xt.frame_nat);
 }
 
 static unsigned int
 ebt_nat_out(const struct nf_hook_ops *ops, struct sk_buff *skb,
-           const struct net_device *in, const struct net_device *out,
-           int (*okfn)(struct sk_buff *))
+           const struct nf_hook_state *state)
 {
-       return ebt_do_table(ops->hooknum, skb, in, out,
-                           dev_net(out)->xt.frame_nat);
+       return ebt_do_table(ops->hooknum, skb, state->in, state->out,
+                           dev_net(state->out)->xt.frame_nat);
 }
 
 static struct nf_hook_ops ebt_ops_nat[] __read_mostly = {
index 19473a9371b8a65ed6b1e8727a5c2b321862886b..a343e62442b1304eca4e23abeb2a3df92283552e 100644 (file)
@@ -67,47 +67,43 @@ EXPORT_SYMBOL_GPL(nft_bridge_ip6hdr_validate);
 static inline void nft_bridge_set_pktinfo_ipv4(struct nft_pktinfo *pkt,
                                               const struct nf_hook_ops *ops,
                                               struct sk_buff *skb,
-                                              const struct net_device *in,
-                                              const struct net_device *out)
+                                              const struct nf_hook_state *state)
 {
        if (nft_bridge_iphdr_validate(skb))
-               nft_set_pktinfo_ipv4(pkt, ops, skb, in, out);
+               nft_set_pktinfo_ipv4(pkt, ops, skb, state);
        else
-               nft_set_pktinfo(pkt, ops, skb, in, out);
+               nft_set_pktinfo(pkt, ops, skb, state);
 }
 
 static inline void nft_bridge_set_pktinfo_ipv6(struct nft_pktinfo *pkt,
-                                             const struct nf_hook_ops *ops,
-                                             struct sk_buff *skb,
-                                             const struct net_device *in,
-                                             const struct net_device *out)
+                                              const struct nf_hook_ops *ops,
+                                              struct sk_buff *skb,
+                                              const struct nf_hook_state *state)
 {
 #if IS_ENABLED(CONFIG_IPV6)
        if (nft_bridge_ip6hdr_validate(skb) &&
-           nft_set_pktinfo_ipv6(pkt, ops, skb, in, out) == 0)
+           nft_set_pktinfo_ipv6(pkt, ops, skb, state) == 0)
                return;
 #endif
-       nft_set_pktinfo(pkt, ops, skb, in, out);
+       nft_set_pktinfo(pkt, ops, skb, state);
 }
 
 static unsigned int
 nft_do_chain_bridge(const struct nf_hook_ops *ops,
                    struct sk_buff *skb,
-                   const struct net_device *in,
-                   const struct net_device *out,
-                   int (*okfn)(struct sk_buff *))
+                   const struct nf_hook_state *state)
 {
        struct nft_pktinfo pkt;
 
        switch (eth_hdr(skb)->h_proto) {
        case htons(ETH_P_IP):
-               nft_bridge_set_pktinfo_ipv4(&pkt, ops, skb, in, out);
+               nft_bridge_set_pktinfo_ipv4(&pkt, ops, skb, state);
                break;
        case htons(ETH_P_IPV6):
-               nft_bridge_set_pktinfo_ipv6(&pkt, ops, skb, in, out);
+               nft_bridge_set_pktinfo_ipv6(&pkt, ops, skb, state);
                break;
        default:
-               nft_set_pktinfo(&pkt, ops, skb, in, out);
+               nft_set_pktinfo(&pkt, ops, skb, state);
                break;
        }
 
index 3244aead09267dd77b0392a0aac7afa462593305..54a2fdf0f4574a4db6ba23193a511ffcf09293da 100644 (file)
@@ -21,6 +21,7 @@
 #include <net/ip.h>
 #include <net/ip6_checksum.h>
 #include <linux/netfilter_bridge.h>
+#include <linux/netfilter_ipv6.h>
 #include "../br_private.h"
 
 static void nft_reject_br_push_etherhdr(struct sk_buff *oldskb,
@@ -36,7 +37,12 @@ static void nft_reject_br_push_etherhdr(struct sk_buff *oldskb,
        skb_pull(nskb, ETH_HLEN);
 }
 
-static void nft_reject_br_send_v4_tcp_reset(struct sk_buff *oldskb, int hook)
+/* We cannot use oldskb->dev, it can be either bridge device (NF_BRIDGE INPUT)
+ * or the bridge port (NF_BRIDGE PREROUTING).
+ */
+static void nft_reject_br_send_v4_tcp_reset(struct sk_buff *oldskb,
+                                           const struct net_device *dev,
+                                           int hook)
 {
        struct sk_buff *nskb;
        struct iphdr *niph;
@@ -65,11 +71,12 @@ static void nft_reject_br_send_v4_tcp_reset(struct sk_buff *oldskb, int hook)
 
        nft_reject_br_push_etherhdr(oldskb, nskb);
 
-       br_deliver(br_port_get_rcu(oldskb->dev), nskb);
+       br_deliver(br_port_get_rcu(dev), nskb);
 }
 
-static void nft_reject_br_send_v4_unreach(struct sk_buff *oldskb, int hook,
-                                         u8 code)
+static void nft_reject_br_send_v4_unreach(struct sk_buff *oldskb,
+                                         const struct net_device *dev,
+                                         int hook, u8 code)
 {
        struct sk_buff *nskb;
        struct iphdr *niph;
@@ -77,8 +84,9 @@ static void nft_reject_br_send_v4_unreach(struct sk_buff *oldskb, int hook,
        unsigned int len;
        void *payload;
        __wsum csum;
+       u8 proto;
 
-       if (!nft_bridge_iphdr_validate(oldskb))
+       if (oldskb->csum_bad || !nft_bridge_iphdr_validate(oldskb))
                return;
 
        /* IP header checks: fragment. */
@@ -91,7 +99,17 @@ static void nft_reject_br_send_v4_unreach(struct sk_buff *oldskb, int hook,
        if (!pskb_may_pull(oldskb, len))
                return;
 
-       if (nf_ip_checksum(oldskb, hook, ip_hdrlen(oldskb), 0))
+       if (pskb_trim_rcsum(oldskb, ntohs(ip_hdr(oldskb)->tot_len)))
+               return;
+
+       if (ip_hdr(oldskb)->protocol == IPPROTO_TCP ||
+           ip_hdr(oldskb)->protocol == IPPROTO_UDP)
+               proto = ip_hdr(oldskb)->protocol;
+       else
+               proto = 0;
+
+       if (!skb_csum_unnecessary(oldskb) &&
+           nf_ip_checksum(oldskb, hook, ip_hdrlen(oldskb), proto))
                return;
 
        nskb = alloc_skb(sizeof(struct iphdr) + sizeof(struct icmphdr) +
@@ -120,11 +138,13 @@ static void nft_reject_br_send_v4_unreach(struct sk_buff *oldskb, int hook,
 
        nft_reject_br_push_etherhdr(oldskb, nskb);
 
-       br_deliver(br_port_get_rcu(oldskb->dev), nskb);
+       br_deliver(br_port_get_rcu(dev), nskb);
 }
 
 static void nft_reject_br_send_v6_tcp_reset(struct net *net,
-                                           struct sk_buff *oldskb, int hook)
+                                           struct sk_buff *oldskb,
+                                           const struct net_device *dev,
+                                           int hook)
 {
        struct sk_buff *nskb;
        const struct tcphdr *oth;
@@ -152,12 +172,37 @@ static void nft_reject_br_send_v6_tcp_reset(struct net *net,
 
        nft_reject_br_push_etherhdr(oldskb, nskb);
 
-       br_deliver(br_port_get_rcu(oldskb->dev), nskb);
+       br_deliver(br_port_get_rcu(dev), nskb);
+}
+
+static bool reject6_br_csum_ok(struct sk_buff *skb, int hook)
+{
+       const struct ipv6hdr *ip6h = ipv6_hdr(skb);
+       int thoff;
+       __be16 fo;
+       u8 proto = ip6h->nexthdr;
+
+       if (skb->csum_bad)
+               return false;
+
+       if (skb_csum_unnecessary(skb))
+               return true;
+
+       if (ip6h->payload_len &&
+           pskb_trim_rcsum(skb, ntohs(ip6h->payload_len) + sizeof(*ip6h)))
+               return false;
+
+       thoff = ipv6_skip_exthdr(skb, ((u8*)(ip6h+1) - skb->data), &proto, &fo);
+       if (thoff < 0 || thoff >= skb->len || (fo & htons(~0x7)) != 0)
+               return false;
+
+       return nf_ip6_checksum(skb, hook, thoff, proto) == 0;
 }
 
 static void nft_reject_br_send_v6_unreach(struct net *net,
-                                         struct sk_buff *oldskb, int hook,
-                                         u8 code)
+                                         struct sk_buff *oldskb,
+                                         const struct net_device *dev,
+                                         int hook, u8 code)
 {
        struct sk_buff *nskb;
        struct ipv6hdr *nip6h;
@@ -176,6 +221,9 @@ static void nft_reject_br_send_v6_unreach(struct net *net,
        if (!pskb_may_pull(oldskb, len))
                return;
 
+       if (!reject6_br_csum_ok(oldskb, hook))
+               return;
+
        nskb = alloc_skb(sizeof(struct iphdr) + sizeof(struct icmp6hdr) +
                         LL_MAX_HEADER + len, GFP_ATOMIC);
        if (!nskb)
@@ -205,7 +253,7 @@ static void nft_reject_br_send_v6_unreach(struct net *net,
 
        nft_reject_br_push_etherhdr(oldskb, nskb);
 
-       br_deliver(br_port_get_rcu(oldskb->dev), nskb);
+       br_deliver(br_port_get_rcu(dev), nskb);
 }
 
 static void nft_reject_bridge_eval(const struct nft_expr *expr,
@@ -224,16 +272,16 @@ static void nft_reject_bridge_eval(const struct nft_expr *expr,
        case htons(ETH_P_IP):
                switch (priv->type) {
                case NFT_REJECT_ICMP_UNREACH:
-                       nft_reject_br_send_v4_unreach(pkt->skb,
+                       nft_reject_br_send_v4_unreach(pkt->skb, pkt->in,
                                                      pkt->ops->hooknum,
                                                      priv->icmp_code);
                        break;
                case NFT_REJECT_TCP_RST:
-                       nft_reject_br_send_v4_tcp_reset(pkt->skb,
+                       nft_reject_br_send_v4_tcp_reset(pkt->skb, pkt->in,
                                                        pkt->ops->hooknum);
                        break;
                case NFT_REJECT_ICMPX_UNREACH:
-                       nft_reject_br_send_v4_unreach(pkt->skb,
+                       nft_reject_br_send_v4_unreach(pkt->skb, pkt->in,
                                                      pkt->ops->hooknum,
                                                      nft_reject_icmp_code(priv->icmp_code));
                        break;
@@ -242,16 +290,16 @@ static void nft_reject_bridge_eval(const struct nft_expr *expr,
        case htons(ETH_P_IPV6):
                switch (priv->type) {
                case NFT_REJECT_ICMP_UNREACH:
-                       nft_reject_br_send_v6_unreach(net, pkt->skb,
+                       nft_reject_br_send_v6_unreach(net, pkt->skb, pkt->in,
                                                      pkt->ops->hooknum,
                                                      priv->icmp_code);
                        break;
                case NFT_REJECT_TCP_RST:
-                       nft_reject_br_send_v6_tcp_reset(net, pkt->skb,
+                       nft_reject_br_send_v6_tcp_reset(net, pkt->skb, pkt->in,
                                                        pkt->ops->hooknum);
                        break;
                case NFT_REJECT_ICMPX_UNREACH:
-                       nft_reject_br_send_v6_unreach(net, pkt->skb,
+                       nft_reject_br_send_v6_unreach(net, pkt->skb, pkt->in,
                                                      pkt->ops->hooknum,
                                                      nft_reject_icmpv6_code(priv->icmp_code));
                        break;
index a6e2da0bc7184501ed5eb2bcef0e5169be2cf276..4ec0c803aef112196657503cd615fe7a83e800bb 100644 (file)
@@ -271,8 +271,8 @@ static void caif_check_flow_release(struct sock *sk)
  * Copied from unix_dgram_recvmsg, but removed credit checks,
  * changed locking, address handling and added MSG_TRUNC.
  */
-static int caif_seqpkt_recvmsg(struct kiocb *iocb, struct socket *sock,
-                              struct msghdr *m, size_t len, int flags)
+static int caif_seqpkt_recvmsg(struct socket *sock, struct msghdr *m,
+                              size_t len, int flags)
 
 {
        struct sock *sk = sock->sk;
@@ -343,9 +343,8 @@ static long caif_stream_data_wait(struct sock *sk, long timeo)
  * Copied from unix_stream_recvmsg, but removed credit checks,
  * changed locking calls, changed address handling.
  */
-static int caif_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
-                              struct msghdr *msg, size_t size,
-                              int flags)
+static int caif_stream_recvmsg(struct socket *sock, struct msghdr *msg,
+                              size_t size, int flags)
 {
        struct sock *sk = sock->sk;
        int copied = 0;
@@ -511,8 +510,8 @@ static int transmit_skb(struct sk_buff *skb, struct caifsock *cf_sk,
 }
 
 /* Copied from af_unix:unix_dgram_sendmsg, and adapted to CAIF */
-static int caif_seqpkt_sendmsg(struct kiocb *kiocb, struct socket *sock,
-                              struct msghdr *msg, size_t len)
+static int caif_seqpkt_sendmsg(struct socket *sock, struct msghdr *msg,
+                              size_t len)
 {
        struct sock *sk = sock->sk;
        struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
@@ -586,8 +585,8 @@ err:
  * Changed removed permission handling and added waiting for flow on
  * and other minor adaptations.
  */
-static int caif_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
-                              struct msghdr *msg, size_t len)
+static int caif_stream_sendmsg(struct socket *sock, struct msghdr *msg,
+                              size_t len)
 {
        struct sock *sk = sock->sk;
        struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
index ee9ffd9565526eb0336fba37e17f0a31b2dd3c34..b523453585be7f56f1e04eb97977b2eb4f90dadb 100644 (file)
@@ -328,7 +328,7 @@ static void bcm_send_to_user(struct bcm_op *op, struct bcm_msg_head *head,
         *  containing the interface index.
         */
 
-       BUILD_BUG_ON(sizeof(skb->cb) < sizeof(struct sockaddr_can));
+       sock_skb_cb_check_size(sizeof(struct sockaddr_can));
        addr = (struct sockaddr_can *)skb->cb;
        memset(addr, 0, sizeof(*addr));
        addr->can_family  = AF_CAN;
@@ -1231,8 +1231,7 @@ static int bcm_tx_send(struct msghdr *msg, int ifindex, struct sock *sk)
 /*
  * bcm_sendmsg - process BCM commands (opcodes) from the userspace
  */
-static int bcm_sendmsg(struct kiocb *iocb, struct socket *sock,
-                      struct msghdr *msg, size_t size)
+static int bcm_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
 {
        struct sock *sk = sock->sk;
        struct bcm_sock *bo = bcm_sk(sk);
@@ -1535,8 +1534,8 @@ static int bcm_connect(struct socket *sock, struct sockaddr *uaddr, int len,
        return 0;
 }
 
-static int bcm_recvmsg(struct kiocb *iocb, struct socket *sock,
-                      struct msghdr *msg, size_t size, int flags)
+static int bcm_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
+                      int flags)
 {
        struct sock *sk = sock->sk;
        struct sk_buff *skb;
index 00c13ef23661bd92133fff7db45588d3bd83fc62..31b9748cbb4ec6c1caa9ddc9e3a80609077a81eb 100644 (file)
@@ -74,6 +74,12 @@ MODULE_ALIAS("can-proto-1");
  * storing the single filter in dfilter, to avoid using dynamic memory.
  */
 
+struct uniqframe {
+       ktime_t tstamp;
+       const struct sk_buff *skb;
+       unsigned int join_rx_count;
+};
+
 struct raw_sock {
        struct sock sk;
        int bound;
@@ -82,10 +88,12 @@ struct raw_sock {
        int loopback;
        int recv_own_msgs;
        int fd_frames;
+       int join_filters;
        int count;                 /* number of active filters */
        struct can_filter dfilter; /* default/single filter */
        struct can_filter *filter; /* pointer to filter(s) */
        can_err_mask_t err_mask;
+       struct uniqframe __percpu *uniq;
 };
 
 /*
@@ -95,8 +103,8 @@ struct raw_sock {
  */
 static inline unsigned int *raw_flags(struct sk_buff *skb)
 {
-       BUILD_BUG_ON(sizeof(skb->cb) <= (sizeof(struct sockaddr_can) +
-                                        sizeof(unsigned int)));
+       sock_skb_cb_check_size(sizeof(struct sockaddr_can) +
+                              sizeof(unsigned int));
 
        /* return pointer after struct sockaddr_can */
        return (unsigned int *)(&((struct sockaddr_can *)skb->cb)[1]);
@@ -123,6 +131,26 @@ static void raw_rcv(struct sk_buff *oskb, void *data)
        if (!ro->fd_frames && oskb->len != CAN_MTU)
                return;
 
+       /* eliminate multiple filter matches for the same skb */
+       if (this_cpu_ptr(ro->uniq)->skb == oskb &&
+           ktime_equal(this_cpu_ptr(ro->uniq)->tstamp, oskb->tstamp)) {
+               if (ro->join_filters) {
+                       this_cpu_inc(ro->uniq->join_rx_count);
+                       /* drop frame until all enabled filters matched */
+                       if (this_cpu_ptr(ro->uniq)->join_rx_count < ro->count)
+                               return;
+               } else {
+                       return;
+               }
+       } else {
+               this_cpu_ptr(ro->uniq)->skb = oskb;
+               this_cpu_ptr(ro->uniq)->tstamp = oskb->tstamp;
+               this_cpu_ptr(ro->uniq)->join_rx_count = 1;
+               /* drop first frame to check all enabled filters? */
+               if (ro->join_filters && ro->count > 1)
+                       return;
+       }
+
        /* clone the given skb to be able to enqueue it into the rcv queue */
        skb = skb_clone(oskb, GFP_ATOMIC);
        if (!skb)
@@ -135,7 +163,7 @@ static void raw_rcv(struct sk_buff *oskb, void *data)
         *  containing the interface index.
         */
 
-       BUILD_BUG_ON(sizeof(skb->cb) < sizeof(struct sockaddr_can));
+       sock_skb_cb_check_size(sizeof(struct sockaddr_can));
        addr = (struct sockaddr_can *)skb->cb;
        memset(addr, 0, sizeof(*addr));
        addr->can_family  = AF_CAN;
@@ -296,6 +324,12 @@ static int raw_init(struct sock *sk)
        ro->loopback         = 1;
        ro->recv_own_msgs    = 0;
        ro->fd_frames        = 0;
+       ro->join_filters     = 0;
+
+       /* alloc_percpu provides zero'ed memory */
+       ro->uniq = alloc_percpu(struct uniqframe);
+       if (unlikely(!ro->uniq))
+               return -ENOMEM;
 
        /* set notifier */
        ro->notifier.notifier_call = raw_notifier;
@@ -339,6 +373,7 @@ static int raw_release(struct socket *sock)
        ro->ifindex = 0;
        ro->bound   = 0;
        ro->count   = 0;
+       free_percpu(ro->uniq);
 
        sock_orphan(sk);
        sock->sk = NULL;
@@ -583,6 +618,15 @@ static int raw_setsockopt(struct socket *sock, int level, int optname,
 
                break;
 
+       case CAN_RAW_JOIN_FILTERS:
+               if (optlen != sizeof(ro->join_filters))
+                       return -EINVAL;
+
+               if (copy_from_user(&ro->join_filters, optval, optlen))
+                       return -EFAULT;
+
+               break;
+
        default:
                return -ENOPROTOOPT;
        }
@@ -647,6 +691,12 @@ static int raw_getsockopt(struct socket *sock, int level, int optname,
                val = &ro->fd_frames;
                break;
 
+       case CAN_RAW_JOIN_FILTERS:
+               if (len > sizeof(int))
+                       len = sizeof(int);
+               val = &ro->join_filters;
+               break;
+
        default:
                return -ENOPROTOOPT;
        }
@@ -658,8 +708,7 @@ static int raw_getsockopt(struct socket *sock, int level, int optname,
        return 0;
 }
 
-static int raw_sendmsg(struct kiocb *iocb, struct socket *sock,
-                      struct msghdr *msg, size_t size)
+static int raw_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
 {
        struct sock *sk = sock->sk;
        struct raw_sock *ro = raw_sk(sk);
@@ -728,8 +777,8 @@ send_failed:
        return err;
 }
 
-static int raw_recvmsg(struct kiocb *iocb, struct socket *sock,
-                      struct msghdr *msg, size_t size, int flags)
+static int raw_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
+                      int flags)
 {
        struct sock *sk = sock->sk;
        struct sk_buff *skb;
index f7bd286a82807148bed32622e5de2f975450c877..5cfd26a0006f07d15ee62a5660e0177745f454cf 100644 (file)
 #include <asm/uaccess.h>
 #include <net/compat.h>
 
-ssize_t get_compat_msghdr(struct msghdr *kmsg,
-                         struct compat_msghdr __user *umsg,
-                         struct sockaddr __user **save_addr,
-                         struct iovec **iov)
+int get_compat_msghdr(struct msghdr *kmsg,
+                     struct compat_msghdr __user *umsg,
+                     struct sockaddr __user **save_addr,
+                     struct iovec **iov)
 {
        compat_uptr_t uaddr, uiov, tmp3;
        compat_size_t nr_segs;
@@ -79,13 +79,11 @@ ssize_t get_compat_msghdr(struct msghdr *kmsg,
        if (nr_segs > UIO_MAXIOV)
                return -EMSGSIZE;
 
-       err = compat_rw_copy_check_uvector(save_addr ? READ : WRITE,
-                                          compat_ptr(uiov), nr_segs,
-                                          UIO_FASTIOV, *iov, iov);
-       if (err >= 0)
-               iov_iter_init(&kmsg->msg_iter, save_addr ? READ : WRITE,
-                             *iov, nr_segs, err);
-       return err;
+       kmsg->msg_iocb = NULL;
+
+       return compat_import_iovec(save_addr ? READ : WRITE,
+                                  compat_ptr(uiov), nr_segs,
+                                  UIO_FASTIOV, iov, &kmsg->msg_iter);
 }
 
 /* Bleech... */
@@ -515,25 +513,25 @@ COMPAT_SYSCALL_DEFINE5(getsockopt, int, fd, int, level, int, optname,
 struct compat_group_req {
        __u32                            gr_interface;
        struct __kernel_sockaddr_storage gr_group
-               __attribute__ ((aligned(4)));
+               __aligned(4);
 } __packed;
 
 struct compat_group_source_req {
        __u32                            gsr_interface;
        struct __kernel_sockaddr_storage gsr_group
-               __attribute__ ((aligned(4)));
+               __aligned(4);
        struct __kernel_sockaddr_storage gsr_source
-               __attribute__ ((aligned(4)));
+               __aligned(4);
 } __packed;
 
 struct compat_group_filter {
        __u32                            gf_interface;
        struct __kernel_sockaddr_storage gf_group
-               __attribute__ ((aligned(4)));
+               __aligned(4);
        __u32                            gf_fmode;
        __u32                            gf_numsrc;
        struct __kernel_sockaddr_storage gf_slist[1]
-               __attribute__ ((aligned(4)));
+               __aligned(4);
 } __packed;
 
 #define __COMPAT_GF0_SIZE (sizeof(struct compat_group_filter) - \
index df493d68330c03d1cb5b59e40d31294d7f45b3f8..b80fb91bb3f7e8dc630663cb5e012dc97ac6924f 100644 (file)
@@ -673,7 +673,7 @@ int skb_copy_and_csum_datagram_msg(struct sk_buff *skb,
        if (!chunk)
                return 0;
 
-       if (iov_iter_count(&msg->msg_iter) < chunk) {
+       if (msg_data_left(msg) < chunk) {
                if (__skb_checksum_complete(skb))
                        goto csum_error;
                if (skb_copy_datagram_msg(skb, hlen, msg, chunk))
index 962ee9d719641291853715f366717bf1626e115c..3b3965288f52764daf082dede116a851bff4fc9e 100644 (file)
@@ -659,6 +659,27 @@ __setup("netdev=", netdev_boot_setup);
 
 *******************************************************************************/
 
+/**
+ *     dev_get_iflink  - get 'iflink' value of a interface
+ *     @dev: targeted interface
+ *
+ *     Indicates the ifindex the interface is linked to.
+ *     Physical interfaces have the same 'ifindex' and 'iflink' values.
+ */
+
+int dev_get_iflink(const struct net_device *dev)
+{
+       if (dev->netdev_ops && dev->netdev_ops->ndo_get_iflink)
+               return dev->netdev_ops->ndo_get_iflink(dev);
+
+       /* If dev->rtnl_link_ops is set, it's a virtual interface. */
+       if (dev->rtnl_link_ops)
+               return 0;
+
+       return dev->ifindex;
+}
+EXPORT_SYMBOL(dev_get_iflink);
+
 /**
  *     __dev_get_by_name       - find a device by its name
  *     @net: the applicable net namespace
@@ -1385,7 +1406,7 @@ static int __dev_close(struct net_device *dev)
        return retval;
 }
 
-static int dev_close_many(struct list_head *head)
+int dev_close_many(struct list_head *head, bool unlink)
 {
        struct net_device *dev, *tmp;
 
@@ -1399,11 +1420,13 @@ static int dev_close_many(struct list_head *head)
        list_for_each_entry_safe(dev, tmp, head, close_list) {
                rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING, GFP_KERNEL);
                call_netdevice_notifiers(NETDEV_DOWN, dev);
-               list_del_init(&dev->close_list);
+               if (unlink)
+                       list_del_init(&dev->close_list);
        }
 
        return 0;
 }
+EXPORT_SYMBOL(dev_close_many);
 
 /**
  *     dev_close - shutdown an interface.
@@ -1420,7 +1443,7 @@ int dev_close(struct net_device *dev)
                LIST_HEAD(single);
 
                list_add(&dev->close_list, &single);
-               dev_close_many(&single);
+               dev_close_many(&single, true);
                list_del(&single);
        }
        return 0;
@@ -1694,6 +1717,7 @@ int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
        }
 
        skb_scrub_packet(skb, true);
+       skb->priority = 0;
        skb->protocol = eth_type_trans(skb, dev);
        skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
 
@@ -1737,7 +1761,8 @@ static inline int deliver_skb(struct sk_buff *skb,
 
 static inline void deliver_ptype_list_skb(struct sk_buff *skb,
                                          struct packet_type **pt,
-                                         struct net_device *dev, __be16 type,
+                                         struct net_device *orig_dev,
+                                         __be16 type,
                                          struct list_head *ptype_list)
 {
        struct packet_type *ptype, *pt_prev = *pt;
@@ -1746,7 +1771,7 @@ static inline void deliver_ptype_list_skb(struct sk_buff *skb,
                if (ptype->type != type)
                        continue;
                if (pt_prev)
-                       deliver_skb(skb, pt_prev, dev);
+                       deliver_skb(skb, pt_prev, orig_dev);
                pt_prev = ptype;
        }
        *pt = pt_prev;
@@ -2559,12 +2584,26 @@ static netdev_features_t harmonize_features(struct sk_buff *skb,
        return features;
 }
 
+netdev_features_t passthru_features_check(struct sk_buff *skb,
+                                         struct net_device *dev,
+                                         netdev_features_t features)
+{
+       return features;
+}
+EXPORT_SYMBOL(passthru_features_check);
+
+static netdev_features_t dflt_features_check(const struct sk_buff *skb,
+                                            struct net_device *dev,
+                                            netdev_features_t features)
+{
+       return vlan_features_check(skb, features);
+}
+
 netdev_features_t netif_skb_features(struct sk_buff *skb)
 {
        struct net_device *dev = skb->dev;
        netdev_features_t features = dev->features;
        u16 gso_segs = skb_shinfo(skb)->gso_segs;
-       __be16 protocol = skb->protocol;
 
        if (gso_segs > dev->gso_max_segs || gso_segs < dev->gso_min_segs)
                features &= ~NETIF_F_GSO_MASK;
@@ -2576,34 +2615,17 @@ netdev_features_t netif_skb_features(struct sk_buff *skb)
        if (skb->encapsulation)
                features &= dev->hw_enc_features;
 
-       if (!skb_vlan_tag_present(skb)) {
-               if (unlikely(protocol == htons(ETH_P_8021Q) ||
-                            protocol == htons(ETH_P_8021AD))) {
-                       struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
-                       protocol = veh->h_vlan_encapsulated_proto;
-               } else {
-                       goto finalize;
-               }
-       }
-
-       features = netdev_intersect_features(features,
-                                            dev->vlan_features |
-                                            NETIF_F_HW_VLAN_CTAG_TX |
-                                            NETIF_F_HW_VLAN_STAG_TX);
-
-       if (protocol == htons(ETH_P_8021Q) || protocol == htons(ETH_P_8021AD))
+       if (skb_vlan_tagged(skb))
                features = netdev_intersect_features(features,
-                                                    NETIF_F_SG |
-                                                    NETIF_F_HIGHDMA |
-                                                    NETIF_F_FRAGLIST |
-                                                    NETIF_F_GEN_CSUM |
+                                                    dev->vlan_features |
                                                     NETIF_F_HW_VLAN_CTAG_TX |
                                                     NETIF_F_HW_VLAN_STAG_TX);
 
-finalize:
        if (dev->netdev_ops->ndo_features_check)
                features &= dev->netdev_ops->ndo_features_check(skb, dev,
                                                                features);
+       else
+               features &= dflt_features_check(skb, dev, features);
 
        return harmonize_features(skb, features);
 }
@@ -2848,7 +2870,9 @@ static void skb_update_prio(struct sk_buff *skb)
 #define skb_update_prio(skb)
 #endif
 
-static DEFINE_PER_CPU(int, xmit_recursion);
+DEFINE_PER_CPU(int, xmit_recursion);
+EXPORT_SYMBOL(xmit_recursion);
+
 #define RECURSION_LIMIT 10
 
 /**
@@ -5911,6 +5935,24 @@ int dev_get_phys_port_id(struct net_device *dev,
 }
 EXPORT_SYMBOL(dev_get_phys_port_id);
 
+/**
+ *     dev_get_phys_port_name - Get device physical port name
+ *     @dev: device
+ *     @name: port name
+ *
+ *     Get device physical port name
+ */
+int dev_get_phys_port_name(struct net_device *dev,
+                          char *name, size_t len)
+{
+       const struct net_device_ops *ops = dev->netdev_ops;
+
+       if (!ops->ndo_get_phys_port_name)
+               return -EOPNOTSUPP;
+       return ops->ndo_get_phys_port_name(dev, name, len);
+}
+EXPORT_SYMBOL(dev_get_phys_port_name);
+
 /**
  *     dev_new_index   -       allocate an ifindex
  *     @net: the applicable net namespace
@@ -5968,7 +6010,7 @@ static void rollback_registered_many(struct list_head *head)
        /* If device is running, close it first. */
        list_for_each_entry(dev, head, unreg_list)
                list_add_tail(&dev->close_list, &close_head);
-       dev_close_many(&close_head);
+       dev_close_many(&close_head, true);
 
        list_for_each_entry(dev, head, unreg_list) {
                /* And unlink it from device chain. */
@@ -6295,8 +6337,6 @@ int register_netdevice(struct net_device *dev)
        spin_lock_init(&dev->addr_list_lock);
        netdev_set_addr_lockdep_class(dev);
 
-       dev->iflink = -1;
-
        ret = dev_get_valid_name(net, dev, dev->name);
        if (ret < 0)
                goto out;
@@ -6326,9 +6366,6 @@ int register_netdevice(struct net_device *dev)
        else if (__dev_get_by_index(net, dev->ifindex))
                goto err_uninit;
 
-       if (dev->iflink == -1)
-               dev->iflink = dev->ifindex;
-
        /* Transfer changeable features to wanted_features and enable
         * software offloads (GSO and GRO).
         */
@@ -6841,8 +6878,6 @@ void free_netdev(struct net_device *dev)
 {
        struct napi_struct *p, *n;
 
-       release_net(dev_net(dev));
-
        netif_free_tx_queues(dev);
 #ifdef CONFIG_SYSFS
        kvfree(dev->_rx);
@@ -7043,12 +7078,8 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char
        dev_net_set(dev, net);
 
        /* If there is an ifindex conflict assign a new one */
-       if (__dev_get_by_index(net, dev->ifindex)) {
-               int iflink = (dev->iflink == dev->ifindex);
+       if (__dev_get_by_index(net, dev->ifindex))
                dev->ifindex = dev_new_index(net);
-               if (iflink)
-                       dev->iflink = dev->ifindex;
-       }
 
        /* Send a netdev-add uevent to the new namespace */
        kobject_uevent(&dev->dev.kobj, KOBJ_ADD);
index aa378ecef1860d0c1e255c001aef9528b8198d6f..1d00b89229024b45fef3955cd27221fafe2bfb74 100644 (file)
@@ -790,7 +790,7 @@ static noinline_for_stack int ethtool_set_rxfh(struct net_device *dev,
        if (ops->get_rxfh_indir_size)
                dev_indir_size = ops->get_rxfh_indir_size(dev);
        if (ops->get_rxfh_key_size)
-               dev_key_size = dev->ethtool_ops->get_rxfh_key_size(dev);
+               dev_key_size = ops->get_rxfh_key_size(dev);
 
        if (copy_from_user(&rxfh, useraddr, sizeof(rxfh)))
                return -EFAULT;
index 44706e81b2e03df3e9d39c1cd76879a4ede48d1e..9a12668f7d62720c6ca18f09d13c45ea3e2ca2b2 100644 (file)
@@ -31,7 +31,7 @@ int fib_default_rule_add(struct fib_rules_ops *ops,
        r->pref = pref;
        r->table = table;
        r->flags = flags;
-       r->fr_net = hold_net(ops->fro_net);
+       r->fr_net = ops->fro_net;
 
        r->suppress_prefixlen = -1;
        r->suppress_ifgroup = -1;
@@ -116,7 +116,6 @@ static int __fib_rules_register(struct fib_rules_ops *ops)
                if (ops->family == o->family)
                        goto errout;
 
-       hold_net(net);
        list_add_tail_rcu(&ops->list, &net->rules_ops);
        err = 0;
 errout:
@@ -160,25 +159,16 @@ static void fib_rules_cleanup_ops(struct fib_rules_ops *ops)
        }
 }
 
-static void fib_rules_put_rcu(struct rcu_head *head)
-{
-       struct fib_rules_ops *ops = container_of(head, struct fib_rules_ops, rcu);
-       struct net *net = ops->fro_net;
-
-       release_net(net);
-       kfree(ops);
-}
-
 void fib_rules_unregister(struct fib_rules_ops *ops)
 {
        struct net *net = ops->fro_net;
 
        spin_lock(&net->rules_mod_lock);
        list_del_rcu(&ops->list);
-       fib_rules_cleanup_ops(ops);
        spin_unlock(&net->rules_mod_lock);
 
-       call_rcu(&ops->rcu, fib_rules_put_rcu);
+       fib_rules_cleanup_ops(ops);
+       kfree_rcu(ops, rcu);
 }
 EXPORT_SYMBOL_GPL(fib_rules_unregister);
 
@@ -303,7 +293,7 @@ static int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr* nlh)
                err = -ENOMEM;
                goto errout;
        }
-       rule->fr_net = hold_net(net);
+       rule->fr_net = net;
 
        if (tb[FRA_PRIORITY])
                rule->pref = nla_get_u32(tb[FRA_PRIORITY]);
@@ -423,7 +413,6 @@ static int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr* nlh)
        return 0;
 
 errout_free:
-       release_net(rule->fr_net);
        kfree(rule);
 errout:
        rules_ops_put(ops);
@@ -492,6 +481,12 @@ static int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr* nlh)
                        goto errout;
                }
 
+               if (ops->delete) {
+                       err = ops->delete(rule);
+                       if (err)
+                               goto errout;
+               }
+
                list_del_rcu(&rule->list);
 
                if (rule->action == FR_ACT_GOTO) {
@@ -517,8 +512,6 @@ static int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr* nlh)
 
                notify_rule_change(RTM_DELRULE, rule, ops, nlh,
                                   NETLINK_CB(skb).portid);
-               if (ops->delete)
-                       ops->delete(rule);
                fib_rule_put(rule);
                flush_route_cache(ops);
                rules_ops_put(ops);
index f6bdc2b1ba01295a53be71b4043437a82848d1c6..b669e75d2b3624fb54935fc41b6d5df3d53aabe4 100644 (file)
@@ -150,10 +150,62 @@ static u64 __get_random_u32(u64 ctx, u64 a, u64 x, u64 r4, u64 r5)
        return prandom_u32();
 }
 
+static u32 convert_skb_access(int skb_field, int dst_reg, int src_reg,
+                             struct bpf_insn *insn_buf)
+{
+       struct bpf_insn *insn = insn_buf;
+
+       switch (skb_field) {
+       case SKF_AD_MARK:
+               BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);
+
+               *insn++ = BPF_LDX_MEM(BPF_W, dst_reg, src_reg,
+                                     offsetof(struct sk_buff, mark));
+               break;
+
+       case SKF_AD_PKTTYPE:
+               *insn++ = BPF_LDX_MEM(BPF_B, dst_reg, src_reg, PKT_TYPE_OFFSET());
+               *insn++ = BPF_ALU32_IMM(BPF_AND, dst_reg, PKT_TYPE_MAX);
+#ifdef __BIG_ENDIAN_BITFIELD
+               *insn++ = BPF_ALU32_IMM(BPF_RSH, dst_reg, 5);
+#endif
+               break;
+
+       case SKF_AD_QUEUE:
+               BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, queue_mapping) != 2);
+
+               *insn++ = BPF_LDX_MEM(BPF_H, dst_reg, src_reg,
+                                     offsetof(struct sk_buff, queue_mapping));
+               break;
+
+       case SKF_AD_VLAN_TAG:
+       case SKF_AD_VLAN_TAG_PRESENT:
+               BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2);
+               BUILD_BUG_ON(VLAN_TAG_PRESENT != 0x1000);
+
+               /* dst_reg = *(u16 *) (src_reg + offsetof(vlan_tci)) */
+               *insn++ = BPF_LDX_MEM(BPF_H, dst_reg, src_reg,
+                                     offsetof(struct sk_buff, vlan_tci));
+               if (skb_field == SKF_AD_VLAN_TAG) {
+                       *insn++ = BPF_ALU32_IMM(BPF_AND, dst_reg,
+                                               ~VLAN_TAG_PRESENT);
+               } else {
+                       /* dst_reg >>= 12 */
+                       *insn++ = BPF_ALU32_IMM(BPF_RSH, dst_reg, 12);
+                       /* dst_reg &= 1 */
+                       *insn++ = BPF_ALU32_IMM(BPF_AND, dst_reg, 1);
+               }
+               break;
+       }
+
+       return insn - insn_buf;
+}
+
 static bool convert_bpf_extensions(struct sock_filter *fp,
                                   struct bpf_insn **insnp)
 {
        struct bpf_insn *insn = *insnp;
+       u32 cnt;
 
        switch (fp->k) {
        case SKF_AD_OFF + SKF_AD_PROTOCOL:
@@ -167,13 +219,8 @@ static bool convert_bpf_extensions(struct sock_filter *fp,
                break;
 
        case SKF_AD_OFF + SKF_AD_PKTTYPE:
-               *insn++ = BPF_LDX_MEM(BPF_B, BPF_REG_A, BPF_REG_CTX,
-                                     PKT_TYPE_OFFSET());
-               *insn = BPF_ALU32_IMM(BPF_AND, BPF_REG_A, PKT_TYPE_MAX);
-#ifdef __BIG_ENDIAN_BITFIELD
-               insn++;
-                *insn = BPF_ALU32_IMM(BPF_RSH, BPF_REG_A, 5);
-#endif
+               cnt = convert_skb_access(SKF_AD_PKTTYPE, BPF_REG_A, BPF_REG_CTX, insn);
+               insn += cnt - 1;
                break;
 
        case SKF_AD_OFF + SKF_AD_IFINDEX:
@@ -197,10 +244,8 @@ static bool convert_bpf_extensions(struct sock_filter *fp,
                break;
 
        case SKF_AD_OFF + SKF_AD_MARK:
-               BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);
-
-               *insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_CTX,
-                                   offsetof(struct sk_buff, mark));
+               cnt = convert_skb_access(SKF_AD_MARK, BPF_REG_A, BPF_REG_CTX, insn);
+               insn += cnt - 1;
                break;
 
        case SKF_AD_OFF + SKF_AD_RXHASH:
@@ -211,29 +256,30 @@ static bool convert_bpf_extensions(struct sock_filter *fp,
                break;
 
        case SKF_AD_OFF + SKF_AD_QUEUE:
-               BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, queue_mapping) != 2);
-
-               *insn = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_CTX,
-                                   offsetof(struct sk_buff, queue_mapping));
+               cnt = convert_skb_access(SKF_AD_QUEUE, BPF_REG_A, BPF_REG_CTX, insn);
+               insn += cnt - 1;
                break;
 
        case SKF_AD_OFF + SKF_AD_VLAN_TAG:
+               cnt = convert_skb_access(SKF_AD_VLAN_TAG,
+                                        BPF_REG_A, BPF_REG_CTX, insn);
+               insn += cnt - 1;
+               break;
+
        case SKF_AD_OFF + SKF_AD_VLAN_TAG_PRESENT:
-               BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2);
-               BUILD_BUG_ON(VLAN_TAG_PRESENT != 0x1000);
+               cnt = convert_skb_access(SKF_AD_VLAN_TAG_PRESENT,
+                                        BPF_REG_A, BPF_REG_CTX, insn);
+               insn += cnt - 1;
+               break;
+
+       case SKF_AD_OFF + SKF_AD_VLAN_TPID:
+               BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_proto) != 2);
 
-               /* A = *(u16 *) (CTX + offsetof(vlan_tci)) */
+               /* A = *(u16 *) (CTX + offsetof(vlan_proto)) */
                *insn++ = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_CTX,
-                                     offsetof(struct sk_buff, vlan_tci));
-               if (fp->k == SKF_AD_OFF + SKF_AD_VLAN_TAG) {
-                       *insn = BPF_ALU32_IMM(BPF_AND, BPF_REG_A,
-                                             ~VLAN_TAG_PRESENT);
-               } else {
-                       /* A >>= 12 */
-                       *insn++ = BPF_ALU32_IMM(BPF_RSH, BPF_REG_A, 12);
-                       /* A &= 1 */
-                       *insn = BPF_ALU32_IMM(BPF_AND, BPF_REG_A, 1);
-               }
+                                     offsetof(struct sk_buff, vlan_proto));
+               /* A = ntohs(A) [emitting a nop or swap16] */
+               *insn = BPF_ENDIAN(BPF_FROM_BE, BPF_REG_A, 16);
                break;
 
        case SKF_AD_OFF + SKF_AD_PAY_OFFSET:
@@ -814,7 +860,7 @@ static void bpf_release_orig_filter(struct bpf_prog *fp)
 
 static void __bpf_prog_release(struct bpf_prog *prog)
 {
-       if (prog->aux->prog_type == BPF_PROG_TYPE_SOCKET_FILTER) {
+       if (prog->type == BPF_PROG_TYPE_SOCKET_FILTER) {
                bpf_prog_put(prog);
        } else {
                bpf_release_orig_filter(prog);
@@ -1019,6 +1065,32 @@ void bpf_prog_destroy(struct bpf_prog *fp)
 }
 EXPORT_SYMBOL_GPL(bpf_prog_destroy);
 
+static int __sk_attach_prog(struct bpf_prog *prog, struct sock *sk)
+{
+       struct sk_filter *fp, *old_fp;
+
+       fp = kmalloc(sizeof(*fp), GFP_KERNEL);
+       if (!fp)
+               return -ENOMEM;
+
+       fp->prog = prog;
+       atomic_set(&fp->refcnt, 0);
+
+       if (!sk_filter_charge(sk, fp)) {
+               kfree(fp);
+               return -ENOMEM;
+       }
+
+       old_fp = rcu_dereference_protected(sk->sk_filter,
+                                          sock_owned_by_user(sk));
+       rcu_assign_pointer(sk->sk_filter, fp);
+
+       if (old_fp)
+               sk_filter_uncharge(sk, old_fp);
+
+       return 0;
+}
+
 /**
  *     sk_attach_filter - attach a socket filter
  *     @fprog: the filter program
@@ -1031,7 +1103,6 @@ EXPORT_SYMBOL_GPL(bpf_prog_destroy);
  */
 int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
 {
-       struct sk_filter *fp, *old_fp;
        unsigned int fsize = bpf_classic_proglen(fprog);
        unsigned int bpf_fsize = bpf_prog_size(fprog->len);
        struct bpf_prog *prog;
@@ -1068,36 +1139,20 @@ int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
        if (IS_ERR(prog))
                return PTR_ERR(prog);
 
-       fp = kmalloc(sizeof(*fp), GFP_KERNEL);
-       if (!fp) {
+       err = __sk_attach_prog(prog, sk);
+       if (err < 0) {
                __bpf_prog_release(prog);
-               return -ENOMEM;
-       }
-       fp->prog = prog;
-
-       atomic_set(&fp->refcnt, 0);
-
-       if (!sk_filter_charge(sk, fp)) {
-               __sk_filter_release(fp);
-               return -ENOMEM;
+               return err;
        }
 
-       old_fp = rcu_dereference_protected(sk->sk_filter,
-                                          sock_owned_by_user(sk));
-       rcu_assign_pointer(sk->sk_filter, fp);
-
-       if (old_fp)
-               sk_filter_uncharge(sk, old_fp);
-
        return 0;
 }
 EXPORT_SYMBOL_GPL(sk_attach_filter);
 
-#ifdef CONFIG_BPF_SYSCALL
 int sk_attach_bpf(u32 ufd, struct sock *sk)
 {
-       struct sk_filter *fp, *old_fp;
        struct bpf_prog *prog;
+       int err;
 
        if (sock_flag(sk, SOCK_FILTER_LOCKED))
                return -EPERM;
@@ -1106,40 +1161,168 @@ int sk_attach_bpf(u32 ufd, struct sock *sk)
        if (IS_ERR(prog))
                return PTR_ERR(prog);
 
-       if (prog->aux->prog_type != BPF_PROG_TYPE_SOCKET_FILTER) {
-               /* valid fd, but invalid program type */
+       if (prog->type != BPF_PROG_TYPE_SOCKET_FILTER) {
                bpf_prog_put(prog);
                return -EINVAL;
        }
 
-       fp = kmalloc(sizeof(*fp), GFP_KERNEL);
-       if (!fp) {
+       err = __sk_attach_prog(prog, sk);
+       if (err < 0) {
                bpf_prog_put(prog);
-               return -ENOMEM;
+               return err;
        }
-       fp->prog = prog;
 
-       atomic_set(&fp->refcnt, 0);
+       return 0;
+}
 
-       if (!sk_filter_charge(sk, fp)) {
-               __sk_filter_release(fp);
-               return -ENOMEM;
+#define BPF_RECOMPUTE_CSUM(flags)      ((flags) & 1)
+
+static u64 bpf_skb_store_bytes(u64 r1, u64 r2, u64 r3, u64 r4, u64 flags)
+{
+       struct sk_buff *skb = (struct sk_buff *) (long) r1;
+       unsigned int offset = (unsigned int) r2;
+       void *from = (void *) (long) r3;
+       unsigned int len = (unsigned int) r4;
+       char buf[16];
+       void *ptr;
+
+       /* bpf verifier guarantees that:
+        * 'from' pointer points to bpf program stack
+        * 'len' bytes of it were initialized
+        * 'len' > 0
+        * 'skb' is a valid pointer to 'struct sk_buff'
+        *
+        * so check for invalid 'offset' and too large 'len'
+        */
+       if (unlikely(offset > 0xffff || len > sizeof(buf)))
+               return -EFAULT;
+
+       if (skb_cloned(skb) && !skb_clone_writable(skb, offset + len))
+               return -EFAULT;
+
+       ptr = skb_header_pointer(skb, offset, len, buf);
+       if (unlikely(!ptr))
+               return -EFAULT;
+
+       if (BPF_RECOMPUTE_CSUM(flags))
+               skb_postpull_rcsum(skb, ptr, len);
+
+       memcpy(ptr, from, len);
+
+       if (ptr == buf)
+               /* skb_store_bits cannot return -EFAULT here */
+               skb_store_bits(skb, offset, ptr, len);
+
+       if (BPF_RECOMPUTE_CSUM(flags) && skb->ip_summed == CHECKSUM_COMPLETE)
+               skb->csum = csum_add(skb->csum, csum_partial(ptr, len, 0));
+       return 0;
+}
+
+const struct bpf_func_proto bpf_skb_store_bytes_proto = {
+       .func           = bpf_skb_store_bytes,
+       .gpl_only       = false,
+       .ret_type       = RET_INTEGER,
+       .arg1_type      = ARG_PTR_TO_CTX,
+       .arg2_type      = ARG_ANYTHING,
+       .arg3_type      = ARG_PTR_TO_STACK,
+       .arg4_type      = ARG_CONST_STACK_SIZE,
+       .arg5_type      = ARG_ANYTHING,
+};
+
+#define BPF_HEADER_FIELD_SIZE(flags)   ((flags) & 0x0f)
+#define BPF_IS_PSEUDO_HEADER(flags)    ((flags) & 0x10)
+
+static u64 bpf_l3_csum_replace(u64 r1, u64 offset, u64 from, u64 to, u64 flags)
+{
+       struct sk_buff *skb = (struct sk_buff *) (long) r1;
+       __sum16 sum, *ptr;
+
+       if (unlikely(offset > 0xffff))
+               return -EFAULT;
+
+       if (skb_cloned(skb) && !skb_clone_writable(skb, offset + sizeof(sum)))
+               return -EFAULT;
+
+       ptr = skb_header_pointer(skb, offset, sizeof(sum), &sum);
+       if (unlikely(!ptr))
+               return -EFAULT;
+
+       switch (BPF_HEADER_FIELD_SIZE(flags)) {
+       case 2:
+               csum_replace2(ptr, from, to);
+               break;
+       case 4:
+               csum_replace4(ptr, from, to);
+               break;
+       default:
+               return -EINVAL;
        }
 
-       old_fp = rcu_dereference_protected(sk->sk_filter,
-                                          sock_owned_by_user(sk));
-       rcu_assign_pointer(sk->sk_filter, fp);
+       if (ptr == &sum)
+               /* skb_store_bits guaranteed to not return -EFAULT here */
+               skb_store_bits(skb, offset, ptr, sizeof(sum));
 
-       if (old_fp)
-               sk_filter_uncharge(sk, old_fp);
+       return 0;
+}
+
+const struct bpf_func_proto bpf_l3_csum_replace_proto = {
+       .func           = bpf_l3_csum_replace,
+       .gpl_only       = false,
+       .ret_type       = RET_INTEGER,
+       .arg1_type      = ARG_PTR_TO_CTX,
+       .arg2_type      = ARG_ANYTHING,
+       .arg3_type      = ARG_ANYTHING,
+       .arg4_type      = ARG_ANYTHING,
+       .arg5_type      = ARG_ANYTHING,
+};
+
+static u64 bpf_l4_csum_replace(u64 r1, u64 offset, u64 from, u64 to, u64 flags)
+{
+       struct sk_buff *skb = (struct sk_buff *) (long) r1;
+       u32 is_pseudo = BPF_IS_PSEUDO_HEADER(flags);
+       __sum16 sum, *ptr;
+
+       if (unlikely(offset > 0xffff))
+               return -EFAULT;
+
+       if (skb_cloned(skb) && !skb_clone_writable(skb, offset + sizeof(sum)))
+               return -EFAULT;
+
+       ptr = skb_header_pointer(skb, offset, sizeof(sum), &sum);
+       if (unlikely(!ptr))
+               return -EFAULT;
+
+       switch (BPF_HEADER_FIELD_SIZE(flags)) {
+       case 2:
+               inet_proto_csum_replace2(ptr, skb, from, to, is_pseudo);
+               break;
+       case 4:
+               inet_proto_csum_replace4(ptr, skb, from, to, is_pseudo);
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       if (ptr == &sum)
+               /* skb_store_bits guaranteed to not return -EFAULT here */
+               skb_store_bits(skb, offset, ptr, sizeof(sum));
 
        return 0;
 }
 
-/* allow socket filters to call
- * bpf_map_lookup_elem(), bpf_map_update_elem(), bpf_map_delete_elem()
- */
-static const struct bpf_func_proto *sock_filter_func_proto(enum bpf_func_id func_id)
+const struct bpf_func_proto bpf_l4_csum_replace_proto = {
+       .func           = bpf_l4_csum_replace,
+       .gpl_only       = false,
+       .ret_type       = RET_INTEGER,
+       .arg1_type      = ARG_PTR_TO_CTX,
+       .arg2_type      = ARG_ANYTHING,
+       .arg3_type      = ARG_ANYTHING,
+       .arg4_type      = ARG_ANYTHING,
+       .arg5_type      = ARG_ANYTHING,
+};
+
+static const struct bpf_func_proto *
+sk_filter_func_proto(enum bpf_func_id func_id)
 {
        switch (func_id) {
        case BPF_FUNC_map_lookup_elem:
@@ -1148,39 +1331,144 @@ static const struct bpf_func_proto *sock_filter_func_proto(enum bpf_func_id func
                return &bpf_map_update_elem_proto;
        case BPF_FUNC_map_delete_elem:
                return &bpf_map_delete_elem_proto;
+       case BPF_FUNC_get_prandom_u32:
+               return &bpf_get_prandom_u32_proto;
+       case BPF_FUNC_get_smp_processor_id:
+               return &bpf_get_smp_processor_id_proto;
        default:
                return NULL;
        }
 }
 
-static bool sock_filter_is_valid_access(int off, int size, enum bpf_access_type type)
+static const struct bpf_func_proto *
+tc_cls_act_func_proto(enum bpf_func_id func_id)
 {
-       /* skb fields cannot be accessed yet */
-       return false;
+       switch (func_id) {
+       case BPF_FUNC_skb_store_bytes:
+               return &bpf_skb_store_bytes_proto;
+       case BPF_FUNC_l3_csum_replace:
+               return &bpf_l3_csum_replace_proto;
+       case BPF_FUNC_l4_csum_replace:
+               return &bpf_l4_csum_replace_proto;
+       default:
+               return sk_filter_func_proto(func_id);
+       }
+}
+
+static bool sk_filter_is_valid_access(int off, int size,
+                                     enum bpf_access_type type)
+{
+       /* only read is allowed */
+       if (type != BPF_READ)
+               return false;
+
+       /* check bounds */
+       if (off < 0 || off >= sizeof(struct __sk_buff))
+               return false;
+
+       /* disallow misaligned access */
+       if (off % size != 0)
+               return false;
+
+       /* all __sk_buff fields are __u32 */
+       if (size != 4)
+               return false;
+
+       return true;
+}
+
+static u32 sk_filter_convert_ctx_access(int dst_reg, int src_reg, int ctx_off,
+                                       struct bpf_insn *insn_buf)
+{
+       struct bpf_insn *insn = insn_buf;
+
+       switch (ctx_off) {
+       case offsetof(struct __sk_buff, len):
+               BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4);
+
+               *insn++ = BPF_LDX_MEM(BPF_W, dst_reg, src_reg,
+                                     offsetof(struct sk_buff, len));
+               break;
+
+       case offsetof(struct __sk_buff, protocol):
+               BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, protocol) != 2);
+
+               *insn++ = BPF_LDX_MEM(BPF_H, dst_reg, src_reg,
+                                     offsetof(struct sk_buff, protocol));
+               break;
+
+       case offsetof(struct __sk_buff, vlan_proto):
+               BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_proto) != 2);
+
+               *insn++ = BPF_LDX_MEM(BPF_H, dst_reg, src_reg,
+                                     offsetof(struct sk_buff, vlan_proto));
+               break;
+
+       case offsetof(struct __sk_buff, priority):
+               BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, priority) != 4);
+
+               *insn++ = BPF_LDX_MEM(BPF_W, dst_reg, src_reg,
+                                     offsetof(struct sk_buff, priority));
+               break;
+
+       case offsetof(struct __sk_buff, mark):
+               return convert_skb_access(SKF_AD_MARK, dst_reg, src_reg, insn);
+
+       case offsetof(struct __sk_buff, pkt_type):
+               return convert_skb_access(SKF_AD_PKTTYPE, dst_reg, src_reg, insn);
+
+       case offsetof(struct __sk_buff, queue_mapping):
+               return convert_skb_access(SKF_AD_QUEUE, dst_reg, src_reg, insn);
+
+       case offsetof(struct __sk_buff, vlan_present):
+               return convert_skb_access(SKF_AD_VLAN_TAG_PRESENT,
+                                         dst_reg, src_reg, insn);
+
+       case offsetof(struct __sk_buff, vlan_tci):
+               return convert_skb_access(SKF_AD_VLAN_TAG,
+                                         dst_reg, src_reg, insn);
+       }
+
+       return insn - insn_buf;
 }
 
-static struct bpf_verifier_ops sock_filter_ops = {
-       .get_func_proto = sock_filter_func_proto,
-       .is_valid_access = sock_filter_is_valid_access,
+static const struct bpf_verifier_ops sk_filter_ops = {
+       .get_func_proto = sk_filter_func_proto,
+       .is_valid_access = sk_filter_is_valid_access,
+       .convert_ctx_access = sk_filter_convert_ctx_access,
+};
+
+static const struct bpf_verifier_ops tc_cls_act_ops = {
+       .get_func_proto = tc_cls_act_func_proto,
+       .is_valid_access = sk_filter_is_valid_access,
+       .convert_ctx_access = sk_filter_convert_ctx_access,
 };
 
-static struct bpf_prog_type_list tl = {
-       .ops = &sock_filter_ops,
+static struct bpf_prog_type_list sk_filter_type __read_mostly = {
+       .ops = &sk_filter_ops,
        .type = BPF_PROG_TYPE_SOCKET_FILTER,
 };
 
-static int __init register_sock_filter_ops(void)
+static struct bpf_prog_type_list sched_cls_type __read_mostly = {
+       .ops = &tc_cls_act_ops,
+       .type = BPF_PROG_TYPE_SCHED_CLS,
+};
+
+static struct bpf_prog_type_list sched_act_type __read_mostly = {
+       .ops = &tc_cls_act_ops,
+       .type = BPF_PROG_TYPE_SCHED_ACT,
+};
+
+static int __init register_sk_filter_ops(void)
 {
-       bpf_register_prog_type(&tl);
+       bpf_register_prog_type(&sk_filter_type);
+       bpf_register_prog_type(&sched_cls_type);
+       bpf_register_prog_type(&sched_act_type);
+
        return 0;
 }
-late_initcall(register_sock_filter_ops);
-#else
-int sk_attach_bpf(u32 ufd, struct sock *sk)
-{
-       return -EOPNOTSUPP;
-}
-#endif
+late_initcall(register_sk_filter_ops);
+
 int sk_detach_filter(struct sock *sk)
 {
        int ret = -ENOENT;
index 49a9e3e06c085dbcb545e766c96186fba2dac45a..982861607f883e15f8c81921b72330985349bef9 100644 (file)
@@ -40,7 +40,7 @@ static DEFINE_SPINLOCK(lweventlist_lock);
 static unsigned char default_operstate(const struct net_device *dev)
 {
        if (!netif_carrier_ok(dev))
-               return (dev->ifindex != dev->iflink ?
+               return (dev->ifindex != dev_get_iflink(dev) ?
                        IF_OPER_LOWERLAYERDOWN : IF_OPER_DOWN);
 
        if (netif_dormant(dev))
@@ -89,7 +89,7 @@ static bool linkwatch_urgent_event(struct net_device *dev)
        if (!netif_running(dev))
                return false;
 
-       if (dev->ifindex != dev->iflink)
+       if (dev->ifindex != dev_get_iflink(dev))
                return true;
 
        if (dev->priv_flags & IFF_TEAM_PORT)
index 70fe9e10ac867f495086810dc6ea619f69d59368..3de6542560288b3896ab243879a7b4a9b098ca0d 100644 (file)
@@ -397,25 +397,15 @@ struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey,
                               struct net_device *dev)
 {
        struct neighbour *n;
-       int key_len = tbl->key_len;
-       u32 hash_val;
-       struct neigh_hash_table *nht;
 
        NEIGH_CACHE_STAT_INC(tbl, lookups);
 
        rcu_read_lock_bh();
-       nht = rcu_dereference_bh(tbl->nht);
-       hash_val = tbl->hash(pkey, dev, nht->hash_rnd) >> (32 - nht->hash_shift);
-
-       for (n = rcu_dereference_bh(nht->hash_buckets[hash_val]);
-            n != NULL;
-            n = rcu_dereference_bh(n->next)) {
-               if (dev == n->dev && !memcmp(n->primary_key, pkey, key_len)) {
-                       if (!atomic_inc_not_zero(&n->refcnt))
-                               n = NULL;
-                       NEIGH_CACHE_STAT_INC(tbl, hits);
-                       break;
-               }
+       n = __neigh_lookup_noref(tbl, pkey, dev);
+       if (n) {
+               if (!atomic_inc_not_zero(&n->refcnt))
+                       n = NULL;
+               NEIGH_CACHE_STAT_INC(tbl, hits);
        }
 
        rcu_read_unlock_bh();
@@ -601,7 +591,7 @@ struct pneigh_entry * pneigh_lookup(struct neigh_table *tbl,
        if (!n)
                goto out;
 
-       write_pnet(&n->net, hold_net(net));
+       write_pnet(&n->net, net);
        memcpy(n->key, pkey, key_len);
        n->dev = dev;
        if (dev)
@@ -610,7 +600,6 @@ struct pneigh_entry * pneigh_lookup(struct neigh_table *tbl,
        if (tbl->pconstructor && tbl->pconstructor(n)) {
                if (dev)
                        dev_put(dev);
-               release_net(net);
                kfree(n);
                n = NULL;
                goto out;
@@ -644,7 +633,6 @@ int pneigh_delete(struct neigh_table *tbl, struct net *net, const void *pkey,
                                tbl->pdestructor(n);
                        if (n->dev)
                                dev_put(n->dev);
-                       release_net(pneigh_net(n));
                        kfree(n);
                        return 0;
                }
@@ -667,7 +655,6 @@ static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
                                        tbl->pdestructor(n);
                                if (n->dev)
                                        dev_put(n->dev);
-                               release_net(pneigh_net(n));
                                kfree(n);
                                continue;
                        }
@@ -830,10 +817,9 @@ out:
 static __inline__ int neigh_max_probes(struct neighbour *n)
 {
        struct neigh_parms *p = n->parms;
-       int max_probes = NEIGH_VAR(p, UCAST_PROBES) + NEIGH_VAR(p, APP_PROBES);
-       if (!(n->nud_state & NUD_PROBE))
-               max_probes += NEIGH_VAR(p, MCAST_PROBES);
-       return max_probes;
+       return NEIGH_VAR(p, UCAST_PROBES) + NEIGH_VAR(p, APP_PROBES) +
+              (n->nud_state & NUD_PROBE ? NEIGH_VAR(p, MCAST_REPROBES) :
+               NEIGH_VAR(p, MCAST_PROBES));
 }
 
 static void neigh_invalidate(struct neighbour *neigh)
@@ -1263,10 +1249,10 @@ struct neighbour *neigh_event_ns(struct neigh_table *tbl,
 EXPORT_SYMBOL(neigh_event_ns);
 
 /* called with read_lock_bh(&n->lock); */
-static void neigh_hh_init(struct neighbour *n, struct dst_entry *dst)
+static void neigh_hh_init(struct neighbour *n)
 {
-       struct net_device *dev = dst->dev;
-       __be16 prot = dst->ops->protocol;
+       struct net_device *dev = n->dev;
+       __be16 prot = n->tbl->protocol;
        struct hh_cache *hh = &n->hh;
 
        write_lock_bh(&n->lock);
@@ -1280,43 +1266,19 @@ static void neigh_hh_init(struct neighbour *n, struct dst_entry *dst)
        write_unlock_bh(&n->lock);
 }
 
-/* This function can be used in contexts, where only old dev_queue_xmit
- * worked, f.e. if you want to override normal output path (eql, shaper),
- * but resolution is not made yet.
- */
-
-int neigh_compat_output(struct neighbour *neigh, struct sk_buff *skb)
-{
-       struct net_device *dev = skb->dev;
-
-       __skb_pull(skb, skb_network_offset(skb));
-
-       if (dev_hard_header(skb, dev, ntohs(skb->protocol), NULL, NULL,
-                           skb->len) < 0 &&
-           dev_rebuild_header(skb))
-               return 0;
-
-       return dev_queue_xmit(skb);
-}
-EXPORT_SYMBOL(neigh_compat_output);
-
 /* Slow and careful. */
 
 int neigh_resolve_output(struct neighbour *neigh, struct sk_buff *skb)
 {
-       struct dst_entry *dst = skb_dst(skb);
        int rc = 0;
 
-       if (!dst)
-               goto discard;
-
        if (!neigh_event_send(neigh, skb)) {
                int err;
                struct net_device *dev = neigh->dev;
                unsigned int seq;
 
                if (dev->header_ops->cache && !neigh->hh.hh_len)
-                       neigh_hh_init(neigh, dst);
+                       neigh_hh_init(neigh);
 
                do {
                        __skb_pull(skb, skb_network_offset(skb));
@@ -1332,8 +1294,6 @@ int neigh_resolve_output(struct neighbour *neigh, struct sk_buff *skb)
        }
 out:
        return rc;
-discard:
-       neigh_dbg(1, "%s: dst=%p neigh=%p\n", __func__, dst, neigh);
 out_kfree_skb:
        rc = -EINVAL;
        kfree_skb(skb);
@@ -1464,11 +1424,10 @@ struct neigh_parms *neigh_parms_alloc(struct net_device *dev,
                                neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
                dev_hold(dev);
                p->dev = dev;
-               write_pnet(&p->net, hold_net(net));
+               write_pnet(&p->net, net);
                p->sysctl_table = NULL;
 
                if (ops->ndo_neigh_setup && ops->ndo_neigh_setup(dev, p)) {
-                       release_net(net);
                        dev_put(dev);
                        kfree(p);
                        return NULL;
@@ -1508,7 +1467,6 @@ EXPORT_SYMBOL(neigh_parms_release);
 
 static void neigh_parms_destroy(struct neigh_parms *parms)
 {
-       release_net(neigh_parms_net(parms));
        kfree(parms);
 }
 
@@ -1783,6 +1741,8 @@ static int neightbl_fill_parms(struct sk_buff *skb, struct neigh_parms *parms)
                        NEIGH_VAR(parms, UCAST_PROBES)) ||
            nla_put_u32(skb, NDTPA_MCAST_PROBES,
                        NEIGH_VAR(parms, MCAST_PROBES)) ||
+           nla_put_u32(skb, NDTPA_MCAST_REPROBES,
+                       NEIGH_VAR(parms, MCAST_REPROBES)) ||
            nla_put_msecs(skb, NDTPA_REACHABLE_TIME, parms->reachable_time) ||
            nla_put_msecs(skb, NDTPA_BASE_REACHABLE_TIME,
                          NEIGH_VAR(parms, BASE_REACHABLE_TIME)) ||
@@ -1942,6 +1902,7 @@ static const struct nla_policy nl_ntbl_parm_policy[NDTPA_MAX+1] = {
        [NDTPA_APP_PROBES]              = { .type = NLA_U32 },
        [NDTPA_UCAST_PROBES]            = { .type = NLA_U32 },
        [NDTPA_MCAST_PROBES]            = { .type = NLA_U32 },
+       [NDTPA_MCAST_REPROBES]          = { .type = NLA_U32 },
        [NDTPA_BASE_REACHABLE_TIME]     = { .type = NLA_U64 },
        [NDTPA_GC_STALETIME]            = { .type = NLA_U64 },
        [NDTPA_DELAY_PROBE_TIME]        = { .type = NLA_U64 },
@@ -2042,6 +2003,10 @@ static int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh)
                                NEIGH_VAR_SET(p, MCAST_PROBES,
                                              nla_get_u32(tbp[i]));
                                break;
+                       case NDTPA_MCAST_REPROBES:
+                               NEIGH_VAR_SET(p, MCAST_REPROBES,
+                                             nla_get_u32(tbp[i]));
+                               break;
                        case NDTPA_BASE_REACHABLE_TIME:
                                NEIGH_VAR_SET(p, BASE_REACHABLE_TIME,
                                              nla_get_msecs(tbp[i]));
@@ -2427,6 +2392,40 @@ void __neigh_for_each_release(struct neigh_table *tbl,
 }
 EXPORT_SYMBOL(__neigh_for_each_release);
 
+int neigh_xmit(int index, struct net_device *dev,
+              const void *addr, struct sk_buff *skb)
+{
+       int err = -EAFNOSUPPORT;
+       if (likely(index < NEIGH_NR_TABLES)) {
+               struct neigh_table *tbl;
+               struct neighbour *neigh;
+
+               tbl = neigh_tables[index];
+               if (!tbl)
+                       goto out;
+               neigh = __neigh_lookup_noref(tbl, addr, dev);
+               if (!neigh)
+                       neigh = __neigh_create(tbl, addr, dev, false);
+               err = PTR_ERR(neigh);
+               if (IS_ERR(neigh))
+                       goto out_kfree_skb;
+               err = neigh->output(neigh, skb);
+       }
+       else if (index == NEIGH_LINK_TABLE) {
+               err = dev_hard_header(skb, dev, ntohs(skb->protocol),
+                                     addr, NULL, skb->len);
+               if (err < 0)
+                       goto out_kfree_skb;
+               err = dev_queue_xmit(skb);
+       }
+out:
+       return err;
+out_kfree_skb:
+       kfree_skb(skb);
+       goto out;
+}
+EXPORT_SYMBOL(neigh_xmit);
+
 #ifdef CONFIG_PROC_FS
 
 static struct neighbour *neigh_get_first(struct seq_file *seq)
@@ -2994,6 +2993,7 @@ static struct neigh_sysctl_table {
                NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(MCAST_PROBES, "mcast_solicit"),
                NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(UCAST_PROBES, "ucast_solicit"),
                NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(APP_PROBES, "app_solicit"),
+               NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(MCAST_REPROBES, "mcast_resolicit"),
                NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(RETRANS_TIME, "retrans_time"),
                NEIGH_SYSCTL_JIFFIES_ENTRY(BASE_REACHABLE_TIME, "base_reachable_time"),
                NEIGH_SYSCTL_JIFFIES_ENTRY(DELAY_PROBE_TIME, "delay_first_probe_time"),
index f2aa73bfb0e474f789b42e9a858080d3780ca426..4238d6da5c60dc7ac7def10fb4e3ddda0a9377e6 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/export.h>
 #include <linux/jiffies.h>
 #include <linux/pm_runtime.h>
+#include <linux/of.h>
 
 #include "net-sysfs.h"
 
@@ -108,11 +109,19 @@ NETDEVICE_SHOW_RO(dev_id, fmt_hex);
 NETDEVICE_SHOW_RO(dev_port, fmt_dec);
 NETDEVICE_SHOW_RO(addr_assign_type, fmt_dec);
 NETDEVICE_SHOW_RO(addr_len, fmt_dec);
-NETDEVICE_SHOW_RO(iflink, fmt_dec);
 NETDEVICE_SHOW_RO(ifindex, fmt_dec);
 NETDEVICE_SHOW_RO(type, fmt_dec);
 NETDEVICE_SHOW_RO(link_mode, fmt_dec);
 
+static ssize_t iflink_show(struct device *dev, struct device_attribute *attr,
+                          char *buf)
+{
+       struct net_device *ndev = to_net_dev(dev);
+
+       return sprintf(buf, fmt_dec, dev_get_iflink(ndev));
+}
+static DEVICE_ATTR_RO(iflink);
+
 static ssize_t format_name_assign_type(const struct net_device *dev, char *buf)
 {
        return sprintf(buf, fmt_dec, dev->name_assign_type);
@@ -417,6 +426,28 @@ static ssize_t phys_port_id_show(struct device *dev,
 }
 static DEVICE_ATTR_RO(phys_port_id);
 
+static ssize_t phys_port_name_show(struct device *dev,
+                                  struct device_attribute *attr, char *buf)
+{
+       struct net_device *netdev = to_net_dev(dev);
+       ssize_t ret = -EINVAL;
+
+       if (!rtnl_trylock())
+               return restart_syscall();
+
+       if (dev_isalive(netdev)) {
+               char name[IFNAMSIZ];
+
+               ret = dev_get_phys_port_name(netdev, name, sizeof(name));
+               if (!ret)
+                       ret = sprintf(buf, "%s\n", name);
+       }
+       rtnl_unlock();
+
+       return ret;
+}
+static DEVICE_ATTR_RO(phys_port_name);
+
 static ssize_t phys_switch_id_show(struct device *dev,
                                   struct device_attribute *attr, char *buf)
 {
@@ -464,6 +495,7 @@ static struct attribute *net_class_attrs[] = {
        &dev_attr_tx_queue_len.attr,
        &dev_attr_gro_flush_timeout.attr,
        &dev_attr_phys_port_id.attr,
+       &dev_attr_phys_port_name.attr,
        &dev_attr_phys_switch_id.attr,
        NULL,
 };
@@ -950,6 +982,60 @@ static ssize_t show_trans_timeout(struct netdev_queue *queue,
        return sprintf(buf, "%lu", trans_timeout);
 }
 
+#ifdef CONFIG_XPS
+static inline unsigned int get_netdev_queue_index(struct netdev_queue *queue)
+{
+       struct net_device *dev = queue->dev;
+       int i;
+
+       for (i = 0; i < dev->num_tx_queues; i++)
+               if (queue == &dev->_tx[i])
+                       break;
+
+       BUG_ON(i >= dev->num_tx_queues);
+
+       return i;
+}
+
+static ssize_t show_tx_maxrate(struct netdev_queue *queue,
+                              struct netdev_queue_attribute *attribute,
+                              char *buf)
+{
+       return sprintf(buf, "%lu\n", queue->tx_maxrate);
+}
+
+static ssize_t set_tx_maxrate(struct netdev_queue *queue,
+                             struct netdev_queue_attribute *attribute,
+                             const char *buf, size_t len)
+{
+       struct net_device *dev = queue->dev;
+       int err, index = get_netdev_queue_index(queue);
+       u32 rate = 0;
+
+       err = kstrtou32(buf, 10, &rate);
+       if (err < 0)
+               return err;
+
+       if (!rtnl_trylock())
+               return restart_syscall();
+
+       err = -EOPNOTSUPP;
+       if (dev->netdev_ops->ndo_set_tx_maxrate)
+               err = dev->netdev_ops->ndo_set_tx_maxrate(dev, index, rate);
+
+       rtnl_unlock();
+       if (!err) {
+               queue->tx_maxrate = rate;
+               return len;
+       }
+       return err;
+}
+
+static struct netdev_queue_attribute queue_tx_maxrate =
+       __ATTR(tx_maxrate, S_IRUGO | S_IWUSR,
+              show_tx_maxrate, set_tx_maxrate);
+#endif
+
 static struct netdev_queue_attribute queue_trans_timeout =
        __ATTR(tx_timeout, S_IRUGO, show_trans_timeout, NULL);
 
@@ -1064,18 +1150,6 @@ static struct attribute_group dql_group = {
 #endif /* CONFIG_BQL */
 
 #ifdef CONFIG_XPS
-static unsigned int get_netdev_queue_index(struct netdev_queue *queue)
-{
-       struct net_device *dev = queue->dev;
-       unsigned int i;
-
-       i = queue - dev->_tx;
-       BUG_ON(i >= dev->num_tx_queues);
-
-       return i;
-}
-
-
 static ssize_t show_xps_map(struct netdev_queue *queue,
                            struct netdev_queue_attribute *attribute, char *buf)
 {
@@ -1152,6 +1226,7 @@ static struct attribute *netdev_queue_default_attrs[] = {
        &queue_trans_timeout.attr,
 #ifdef CONFIG_XPS
        &xps_cpus_attribute.attr,
+       &queue_tx_maxrate.attr,
 #endif
        NULL
 };
@@ -1374,6 +1449,30 @@ static struct class net_class = {
        .namespace = net_namespace,
 };
 
+#ifdef CONFIG_OF_NET
+static int of_dev_node_match(struct device *dev, const void *data)
+{
+       int ret = 0;
+
+       if (dev->parent)
+               ret = dev->parent->of_node == data;
+
+       return ret == 0 ? dev->of_node == data : ret;
+}
+
+struct net_device *of_find_net_device_by_node(struct device_node *np)
+{
+       struct device *dev;
+
+       dev = class_find_device(&net_class, NULL, np, of_dev_node_match);
+       if (!dev)
+               return NULL;
+
+       return to_net_dev(dev);
+}
+EXPORT_SYMBOL(of_find_net_device_by_node);
+#endif
+
 /* Delete sysfs entries but hold kobject reference until after all
  * netdev references are gone.
  */
index 5221f975a4cc313bd622ecbe9bba263a4faff4a1..e7345d9031df18f1b52d87a8f2b501d0e76e1e4a 100644 (file)
@@ -198,8 +198,10 @@ static int __peernet2id(struct net *net, struct net *peer, bool alloc)
  */
 int peernet2id(struct net *net, struct net *peer)
 {
-       int id = __peernet2id(net, peer, true);
+       bool alloc = atomic_read(&peer->count) == 0 ? false : true;
+       int id;
 
+       id = __peernet2id(net, peer, alloc);
        return id >= 0 ? id : NETNSA_NSID_NOT_ASSIGNED;
 }
 EXPORT_SYMBOL(peernet2id);
@@ -236,10 +238,6 @@ static __net_init int setup_net(struct net *net, struct user_namespace *user_ns)
        net->user_ns = user_ns;
        idr_init(&net->netns_ids);
 
-#ifdef NETNS_REFCNT_DEBUG
-       atomic_set(&net->use_count, 0);
-#endif
-
        list_for_each_entry(ops, &pernet_list, list) {
                error = ops_init(ops, net);
                if (error < 0)
@@ -294,13 +292,6 @@ out_free:
 
 static void net_free(struct net *net)
 {
-#ifdef NETNS_REFCNT_DEBUG
-       if (unlikely(atomic_read(&net->use_count) != 0)) {
-               pr_emerg("network namespace not free! Usage: %d\n",
-                        atomic_read(&net->use_count));
-               return;
-       }
-#endif
        kfree(rcu_access_pointer(net->gen));
        kmem_cache_free(net_cachep, net);
 }
@@ -349,7 +340,7 @@ static LIST_HEAD(cleanup_list);  /* Must hold cleanup_list_lock to touch */
 static void cleanup_net(struct work_struct *work)
 {
        const struct pernet_operations *ops;
-       struct net *net, *tmp, *peer;
+       struct net *net, *tmp;
        struct list_head net_kill_list;
        LIST_HEAD(net_exit_list);
 
@@ -365,6 +356,14 @@ static void cleanup_net(struct work_struct *work)
        list_for_each_entry(net, &net_kill_list, cleanup_list) {
                list_del_rcu(&net->list);
                list_add_tail(&net->exit_list, &net_exit_list);
+               for_each_net(tmp) {
+                       int id = __peernet2id(tmp, net, false);
+
+                       if (id >= 0)
+                               idr_remove(&tmp->netns_ids, id);
+               }
+               idr_destroy(&net->netns_ids);
+
        }
        rtnl_unlock();
 
@@ -390,26 +389,12 @@ static void cleanup_net(struct work_struct *work)
         */
        rcu_barrier();
 
-       rtnl_lock();
        /* Finally it is safe to free my network namespace structure */
        list_for_each_entry_safe(net, tmp, &net_exit_list, exit_list) {
-               /* Unreference net from all peers (no need to loop over
-                * net_exit_list because idr_destroy() will be called for each
-                * element of this list.
-                */
-               for_each_net(peer) {
-                       int id = __peernet2id(peer, net, false);
-
-                       if (id >= 0)
-                               idr_remove(&peer->netns_ids, id);
-               }
-               idr_destroy(&net->netns_ids);
-
                list_del_init(&net->exit_list);
                put_user_ns(net->user_ns);
                net_drop_ns(net);
        }
-       rtnl_unlock();
 }
 static DECLARE_WORK(net_cleanup_work, cleanup_net);
 
index 04db318e6218d93100bd07c12bebc38e2bc1f138..87b22c0bc08c2f33fa31948b8b2604f48b8009bc 100644 (file)
@@ -58,14 +58,14 @@ int reqsk_queue_alloc(struct request_sock_queue *queue,
                return -ENOMEM;
 
        get_random_bytes(&lopt->hash_rnd, sizeof(lopt->hash_rnd));
-       rwlock_init(&queue->syn_wait_lock);
+       spin_lock_init(&queue->syn_wait_lock);
        queue->rskq_accept_head = NULL;
        lopt->nr_table_entries = nr_table_entries;
        lopt->max_qlen_log = ilog2(nr_table_entries);
 
-       write_lock_bh(&queue->syn_wait_lock);
+       spin_lock_bh(&queue->syn_wait_lock);
        queue->listen_opt = lopt;
-       write_unlock_bh(&queue->syn_wait_lock);
+       spin_unlock_bh(&queue->syn_wait_lock);
 
        return 0;
 }
@@ -81,10 +81,10 @@ static inline struct listen_sock *reqsk_queue_yank_listen_sk(
 {
        struct listen_sock *lopt;
 
-       write_lock_bh(&queue->syn_wait_lock);
+       spin_lock_bh(&queue->syn_wait_lock);
        lopt = queue->listen_opt;
        queue->listen_opt = NULL;
-       write_unlock_bh(&queue->syn_wait_lock);
+       spin_unlock_bh(&queue->syn_wait_lock);
 
        return lopt;
 }
@@ -94,21 +94,26 @@ void reqsk_queue_destroy(struct request_sock_queue *queue)
        /* make all the listen_opt local to us */
        struct listen_sock *lopt = reqsk_queue_yank_listen_sk(queue);
 
-       if (lopt->qlen != 0) {
+       if (listen_sock_qlen(lopt) != 0) {
                unsigned int i;
 
                for (i = 0; i < lopt->nr_table_entries; i++) {
                        struct request_sock *req;
 
+                       spin_lock_bh(&queue->syn_wait_lock);
                        while ((req = lopt->syn_table[i]) != NULL) {
                                lopt->syn_table[i] = req->dl_next;
-                               lopt->qlen--;
-                               reqsk_free(req);
+                               atomic_inc(&lopt->qlen_dec);
+                               if (del_timer(&req->rsk_timer))
+                                       reqsk_put(req);
+                               reqsk_put(req);
                        }
+                       spin_unlock_bh(&queue->syn_wait_lock);
                }
        }
 
-       WARN_ON(lopt->qlen != 0);
+       if (WARN_ON(listen_sock_qlen(lopt) != 0))
+               pr_err("qlen %u\n", listen_sock_qlen(lopt));
        kvfree(lopt);
 }
 
@@ -153,24 +158,22 @@ void reqsk_queue_destroy(struct request_sock_queue *queue)
  * case might also exist in tcp_v4_hnd_req() that will trigger this locking
  * order.
  *
- * When a TFO req is created, it needs to sock_hold its listener to prevent
- * the latter data structure from going away.
- *
- * This function also sets "treq->listener" to NULL and unreference listener
- * socket. treq->listener is used by the listener so it is protected by the
+ * This function also sets "treq->tfo_listener" to false.
+ * treq->tfo_listener is used by the listener so it is protected by the
  * fastopenq->lock in this function.
  */
 void reqsk_fastopen_remove(struct sock *sk, struct request_sock *req,
                           bool reset)
 {
-       struct sock *lsk = tcp_rsk(req)->listener;
-       struct fastopen_queue *fastopenq =
-           inet_csk(lsk)->icsk_accept_queue.fastopenq;
+       struct sock *lsk = req->rsk_listener;
+       struct fastopen_queue *fastopenq;
+
+       fastopenq = inet_csk(lsk)->icsk_accept_queue.fastopenq;
 
        tcp_sk(sk)->fastopen_rsk = NULL;
        spin_lock_bh(&fastopenq->lock);
        fastopenq->qlen--;
-       tcp_rsk(req)->listener = NULL;
+       tcp_rsk(req)->tfo_listener = false;
        if (req->sk)    /* the child socket hasn't been accepted yet */
                goto out;
 
@@ -179,8 +182,7 @@ void reqsk_fastopen_remove(struct sock *sk, struct request_sock *req,
                 * special RST handling below.
                 */
                spin_unlock_bh(&fastopenq->lock);
-               sock_put(lsk);
-               reqsk_free(req);
+               reqsk_put(req);
                return;
        }
        /* Wait for 60secs before removing a req that has triggered RST.
@@ -190,7 +192,7 @@ void reqsk_fastopen_remove(struct sock *sk, struct request_sock *req,
         *
         * For more details see CoNext'11 "TCP Fast Open" paper.
         */
-       req->expires = jiffies + 60*HZ;
+       req->rsk_timer.expires = jiffies + 60*HZ;
        if (fastopenq->rskq_rst_head == NULL)
                fastopenq->rskq_rst_head = req;
        else
@@ -201,5 +203,4 @@ void reqsk_fastopen_remove(struct sock *sk, struct request_sock *req,
        fastopenq->qlen++;
 out:
        spin_unlock_bh(&fastopenq->lock);
-       sock_put(lsk);
 }
index 7ebed55b5f7d1b2d1faacea1a49e7f9e947f43d1..5e02260b087f60942a03b37d16b081c58c65badd 100644 (file)
@@ -982,6 +982,24 @@ static int rtnl_phys_port_id_fill(struct sk_buff *skb, struct net_device *dev)
        return 0;
 }
 
+static int rtnl_phys_port_name_fill(struct sk_buff *skb, struct net_device *dev)
+{
+       char name[IFNAMSIZ];
+       int err;
+
+       err = dev_get_phys_port_name(dev, name, sizeof(name));
+       if (err) {
+               if (err == -EOPNOTSUPP)
+                       return 0;
+               return err;
+       }
+
+       if (nla_put(skb, IFLA_PHYS_PORT_NAME, strlen(name), name))
+               return -EMSGSIZE;
+
+       return 0;
+}
+
 static int rtnl_phys_switch_id_fill(struct sk_buff *skb, struct net_device *dev)
 {
        int err;
@@ -1037,8 +1055,8 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
 #ifdef CONFIG_RPS
            nla_put_u32(skb, IFLA_NUM_RX_QUEUES, dev->num_rx_queues) ||
 #endif
-           (dev->ifindex != dev->iflink &&
-            nla_put_u32(skb, IFLA_LINK, dev->iflink)) ||
+           (dev->ifindex != dev_get_iflink(dev) &&
+            nla_put_u32(skb, IFLA_LINK, dev_get_iflink(dev))) ||
            (upper_dev &&
             nla_put_u32(skb, IFLA_MASTER, upper_dev->ifindex)) ||
            nla_put_u8(skb, IFLA_CARRIER, netif_carrier_ok(dev)) ||
@@ -1072,6 +1090,9 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
        if (rtnl_phys_port_id_fill(skb, dev))
                goto nla_put_failure;
 
+       if (rtnl_phys_port_name_fill(skb, dev))
+               goto nla_put_failure;
+
        if (rtnl_phys_switch_id_fill(skb, dev))
                goto nla_put_failure;
 
@@ -1815,6 +1836,42 @@ errout:
        return err;
 }
 
+static int rtnl_group_dellink(const struct net *net, int group)
+{
+       struct net_device *dev, *aux;
+       LIST_HEAD(list_kill);
+       bool found = false;
+
+       if (!group)
+               return -EPERM;
+
+       for_each_netdev(net, dev) {
+               if (dev->group == group) {
+                       const struct rtnl_link_ops *ops;
+
+                       found = true;
+                       ops = dev->rtnl_link_ops;
+                       if (!ops || !ops->dellink)
+                               return -EOPNOTSUPP;
+               }
+       }
+
+       if (!found)
+               return -ENODEV;
+
+       for_each_netdev_safe(net, dev, aux) {
+               if (dev->group == group) {
+                       const struct rtnl_link_ops *ops;
+
+                       ops = dev->rtnl_link_ops;
+                       ops->dellink(dev, &list_kill);
+               }
+       }
+       unregister_netdevice_many(&list_kill);
+
+       return 0;
+}
+
 static int rtnl_dellink(struct sk_buff *skb, struct nlmsghdr *nlh)
 {
        struct net *net = sock_net(skb->sk);
@@ -1838,6 +1895,8 @@ static int rtnl_dellink(struct sk_buff *skb, struct nlmsghdr *nlh)
                dev = __dev_get_by_index(net, ifm->ifi_index);
        else if (tb[IFLA_IFNAME])
                dev = __dev_get_by_name(net, ifname);
+       else if (tb[IFLA_GROUP])
+               return rtnl_group_dellink(net, nla_get_u32(tb[IFLA_GROUP]));
        else
                return -EINVAL;
 
@@ -2804,8 +2863,8 @@ int ndo_dflt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
             nla_put_u32(skb, IFLA_MASTER, br_dev->ifindex)) ||
            (dev->addr_len &&
             nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr)) ||
-           (dev->ifindex != dev->iflink &&
-            nla_put_u32(skb, IFLA_LINK, dev->iflink)))
+           (dev->ifindex != dev_get_iflink(dev) &&
+            nla_put_u32(skb, IFLA_LINK, dev_get_iflink(dev))))
                goto nla_put_failure;
 
        br_afspec = nla_nest_start(skb, IFLA_AF_SPEC);
index 8e4ac97c84775f15c786020ff87a2ef22a8dbac8..cdb939b731aad72f039381d7b41183f1f6d94de5 100644 (file)
@@ -2865,7 +2865,6 @@ static void skb_ts_finish(struct ts_config *conf, struct ts_state *state)
  * @from: search offset
  * @to: search limit
  * @config: textsearch configuration
- * @state: uninitialized textsearch state variable
  *
  * Finds a pattern in the skb data according to the specified
  * textsearch configuration. Use textsearch_next() to retrieve
@@ -2873,17 +2872,17 @@ static void skb_ts_finish(struct ts_config *conf, struct ts_state *state)
  * to the first occurrence or UINT_MAX if no match was found.
  */
 unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
-                          unsigned int to, struct ts_config *config,
-                          struct ts_state *state)
+                          unsigned int to, struct ts_config *config)
 {
+       struct ts_state state;
        unsigned int ret;
 
        config->get_next_block = skb_ts_get_next_block;
        config->finish = skb_ts_finish;
 
-       skb_prepare_seq_read(skb, from, to, TS_SKB_CB(state));
+       skb_prepare_seq_read(skb, from, to, TS_SKB_CB(&state));
 
-       ret = textsearch_find(config, state);
+       ret = textsearch_find(config, &state);
        return (ret <= to - from ? ret : UINT_MAX);
 }
 EXPORT_SYMBOL(skb_find_text);
@@ -3207,10 +3206,9 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
        struct skb_shared_info *pinfo, *skbinfo = skb_shinfo(skb);
        unsigned int offset = skb_gro_offset(skb);
        unsigned int headlen = skb_headlen(skb);
-       struct sk_buff *nskb, *lp, *p = *head;
        unsigned int len = skb_gro_len(skb);
+       struct sk_buff *lp, *p = *head;
        unsigned int delta_truesize;
-       unsigned int headroom;
 
        if (unlikely(p->len + len >= 65536))
                return -E2BIG;
@@ -3277,48 +3275,6 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
                NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE_STOLEN_HEAD;
                goto done;
        }
-       /* switch back to head shinfo */
-       pinfo = skb_shinfo(p);
-
-       if (pinfo->frag_list)
-               goto merge;
-       if (skb_gro_len(p) != pinfo->gso_size)
-               return -E2BIG;
-
-       headroom = skb_headroom(p);
-       nskb = alloc_skb(headroom + skb_gro_offset(p), GFP_ATOMIC);
-       if (unlikely(!nskb))
-               return -ENOMEM;
-
-       __copy_skb_header(nskb, p);
-       nskb->mac_len = p->mac_len;
-
-       skb_reserve(nskb, headroom);
-       __skb_put(nskb, skb_gro_offset(p));
-
-       skb_set_mac_header(nskb, skb_mac_header(p) - p->data);
-       skb_set_network_header(nskb, skb_network_offset(p));
-       skb_set_transport_header(nskb, skb_transport_offset(p));
-
-       __skb_pull(p, skb_gro_offset(p));
-       memcpy(skb_mac_header(nskb), skb_mac_header(p),
-              p->data - skb_mac_header(p));
-
-       skb_shinfo(nskb)->frag_list = p;
-       skb_shinfo(nskb)->gso_size = pinfo->gso_size;
-       pinfo->gso_size = 0;
-       __skb_header_release(p);
-       NAPI_GRO_CB(nskb)->last = p;
-
-       nskb->data_len += p->len;
-       nskb->truesize += p->truesize;
-       nskb->len += p->len;
-
-       *head = nskb;
-       nskb->next = p->next;
-       p->next = NULL;
-
-       p = nskb;
 
 merge:
        delta_truesize = skb->truesize;
index 78e89eb7eb705624d3ff63324f5002ae10b51145..654e38a9975948f981f35716e7b9eac8569f0f93 100644 (file)
@@ -466,7 +466,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
        skb_dst_force(skb);
 
        spin_lock_irqsave(&list->lock, flags);
-       skb->dropcount = atomic_read(&sk->sk_drops);
+       sock_skb_set_dropcount(sk, skb);
        __skb_queue_tail(list, skb);
        spin_unlock_irqrestore(&list->lock, flags);
 
@@ -653,6 +653,25 @@ static inline void sock_valbool_flag(struct sock *sk, int bit, int valbool)
                sock_reset_flag(sk, bit);
 }
 
+bool sk_mc_loop(struct sock *sk)
+{
+       if (dev_recursion_level())
+               return false;
+       if (!sk)
+               return true;
+       switch (sk->sk_family) {
+       case AF_INET:
+               return inet_sk(sk)->mc_loop;
+#if IS_ENABLED(CONFIG_IPV6)
+       case AF_INET6:
+               return inet6_sk(sk)->mc_loop;
+#endif
+       }
+       WARN_ON(1);
+       return true;
+}
+EXPORT_SYMBOL(sk_mc_loop);
+
 /*
  *     This is meant for all protocols to use and covers goings on
  *     at the socket level. Everything here is generic.
@@ -928,8 +947,6 @@ set_rcvbuf:
                        sk->sk_mark = val;
                break;
 
-               /* We implement the SO_SNDLOWAT etc to
-                  not be settable (1003.1g 5.3) */
        case SO_RXQ_OVFL:
                sock_valbool_flag(sk, SOCK_RXQ_OVFL, valbool);
                break;
@@ -1234,6 +1251,9 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
                break;
 
        default:
+               /* We implement the SO_SNDLOWAT etc to not be settable
+                * (1003.1g 7).
+                */
                return -ENOPROTOOPT;
        }
 
@@ -1454,9 +1474,8 @@ void sk_release_kernel(struct sock *sk)
                return;
 
        sock_hold(sk);
-       sock_release(sk->sk_socket);
-       release_net(sock_net(sk));
        sock_net_set(sk, get_net(&init_net));
+       sock_release(sk->sk_socket);
        sock_put(sk);
 }
 EXPORT_SYMBOL(sk_release_kernel);
@@ -1538,6 +1557,7 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
                newsk->sk_err      = 0;
                newsk->sk_priority = 0;
                newsk->sk_incoming_cpu = raw_smp_processor_id();
+               atomic64_set(&newsk->sk_cookie, 0);
                /*
                 * Before updating sk_refcnt, we must commit prior changes to memory
                 * (Documentation/RCU/rculist_nulls.txt for details)
@@ -1665,19 +1685,6 @@ void sock_efree(struct sk_buff *skb)
 }
 EXPORT_SYMBOL(sock_efree);
 
-#ifdef CONFIG_INET
-void sock_edemux(struct sk_buff *skb)
-{
-       struct sock *sk = skb->sk;
-
-       if (sk->sk_state == TCP_TIME_WAIT)
-               inet_twsk_put(inet_twsk(sk));
-       else
-               sock_put(sk);
-}
-EXPORT_SYMBOL(sock_edemux);
-#endif
-
 kuid_t sock_i_uid(struct sock *sk)
 {
        kuid_t uid;
@@ -2167,15 +2174,14 @@ int sock_no_getsockopt(struct socket *sock, int level, int optname,
 }
 EXPORT_SYMBOL(sock_no_getsockopt);
 
-int sock_no_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
-                   size_t len)
+int sock_no_sendmsg(struct socket *sock, struct msghdr *m, size_t len)
 {
        return -EOPNOTSUPP;
 }
 EXPORT_SYMBOL(sock_no_sendmsg);
 
-int sock_no_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
-                   size_t len, int flags)
+int sock_no_recvmsg(struct socket *sock, struct msghdr *m, size_t len,
+                   int flags)
 {
        return -EOPNOTSUPP;
 }
@@ -2547,14 +2553,14 @@ int compat_sock_common_getsockopt(struct socket *sock, int level, int optname,
 EXPORT_SYMBOL(compat_sock_common_getsockopt);
 #endif
 
-int sock_common_recvmsg(struct kiocb *iocb, struct socket *sock,
-                       struct msghdr *msg, size_t size, int flags)
+int sock_common_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
+                       int flags)
 {
        struct sock *sk = sock->sk;
        int addr_len = 0;
        int err;
 
-       err = sk->sk_prot->recvmsg(iocb, sk, msg, size, flags & MSG_DONTWAIT,
+       err = sk->sk_prot->recvmsg(sk, msg, size, flags & MSG_DONTWAIT,
                                   flags & ~MSG_DONTWAIT, &addr_len);
        if (err >= 0)
                msg->msg_namelen = addr_len;
@@ -2731,6 +2737,42 @@ static inline void release_proto_idx(struct proto *prot)
 }
 #endif
 
+static void req_prot_cleanup(struct request_sock_ops *rsk_prot)
+{
+       if (!rsk_prot)
+               return;
+       kfree(rsk_prot->slab_name);
+       rsk_prot->slab_name = NULL;
+       if (rsk_prot->slab) {
+               kmem_cache_destroy(rsk_prot->slab);
+               rsk_prot->slab = NULL;
+       }
+}
+
+static int req_prot_init(const struct proto *prot)
+{
+       struct request_sock_ops *rsk_prot = prot->rsk_prot;
+
+       if (!rsk_prot)
+               return 0;
+
+       rsk_prot->slab_name = kasprintf(GFP_KERNEL, "request_sock_%s",
+                                       prot->name);
+       if (!rsk_prot->slab_name)
+               return -ENOMEM;
+
+       rsk_prot->slab = kmem_cache_create(rsk_prot->slab_name,
+                                          rsk_prot->obj_size, 0,
+                                          0, NULL);
+
+       if (!rsk_prot->slab) {
+               pr_crit("%s: Can't create request sock SLAB cache!\n",
+                       prot->name);
+               return -ENOMEM;
+       }
+       return 0;
+}
+
 int proto_register(struct proto *prot, int alloc_slab)
 {
        if (alloc_slab) {
@@ -2744,21 +2786,8 @@ int proto_register(struct proto *prot, int alloc_slab)
                        goto out;
                }
 
-               if (prot->rsk_prot != NULL) {
-                       prot->rsk_prot->slab_name = kasprintf(GFP_KERNEL, "request_sock_%s", prot->name);
-                       if (prot->rsk_prot->slab_name == NULL)
-                               goto out_free_sock_slab;
-
-                       prot->rsk_prot->slab = kmem_cache_create(prot->rsk_prot->slab_name,
-                                                                prot->rsk_prot->obj_size, 0,
-                                                                SLAB_HWCACHE_ALIGN, NULL);
-
-                       if (prot->rsk_prot->slab == NULL) {
-                               pr_crit("%s: Can't create request sock SLAB cache!\n",
-                                       prot->name);
-                               goto out_free_request_sock_slab_name;
-                       }
-               }
+               if (req_prot_init(prot))
+                       goto out_free_request_sock_slab;
 
                if (prot->twsk_prot != NULL) {
                        prot->twsk_prot->twsk_slab_name = kasprintf(GFP_KERNEL, "tw_sock_%s", prot->name);
@@ -2787,14 +2816,8 @@ int proto_register(struct proto *prot, int alloc_slab)
 out_free_timewait_sock_slab_name:
        kfree(prot->twsk_prot->twsk_slab_name);
 out_free_request_sock_slab:
-       if (prot->rsk_prot && prot->rsk_prot->slab) {
-               kmem_cache_destroy(prot->rsk_prot->slab);
-               prot->rsk_prot->slab = NULL;
-       }
-out_free_request_sock_slab_name:
-       if (prot->rsk_prot)
-               kfree(prot->rsk_prot->slab_name);
-out_free_sock_slab:
+       req_prot_cleanup(prot->rsk_prot);
+
        kmem_cache_destroy(prot->slab);
        prot->slab = NULL;
 out:
@@ -2814,11 +2837,7 @@ void proto_unregister(struct proto *prot)
                prot->slab = NULL;
        }
 
-       if (prot->rsk_prot != NULL && prot->rsk_prot->slab != NULL) {
-               kmem_cache_destroy(prot->rsk_prot->slab);
-               kfree(prot->rsk_prot->slab_name);
-               prot->rsk_prot->slab = NULL;
-       }
+       req_prot_cleanup(prot->rsk_prot);
 
        if (prot->twsk_prot != NULL && prot->twsk_prot->twsk_slab != NULL) {
                kmem_cache_destroy(prot->twsk_prot->twsk_slab);
index ad704c757bb4a6e48c685fe0478ce6525b08a3ab..74dddf84adcdd7fea05ca93d94f97c6558917ae1 100644 (file)
@@ -13,22 +13,39 @@ static const struct sock_diag_handler *sock_diag_handlers[AF_MAX];
 static int (*inet_rcv_compat)(struct sk_buff *skb, struct nlmsghdr *nlh);
 static DEFINE_MUTEX(sock_diag_table_mutex);
 
-int sock_diag_check_cookie(void *sk, __u32 *cookie)
+static u64 sock_gen_cookie(struct sock *sk)
 {
-       if ((cookie[0] != INET_DIAG_NOCOOKIE ||
-            cookie[1] != INET_DIAG_NOCOOKIE) &&
-           ((u32)(unsigned long)sk != cookie[0] ||
-            (u32)((((unsigned long)sk) >> 31) >> 1) != cookie[1]))
-               return -ESTALE;
-       else
+       while (1) {
+               u64 res = atomic64_read(&sk->sk_cookie);
+
+               if (res)
+                       return res;
+               res = atomic64_inc_return(&sock_net(sk)->cookie_gen);
+               atomic64_cmpxchg(&sk->sk_cookie, 0, res);
+       }
+}
+
+int sock_diag_check_cookie(struct sock *sk, const __u32 *cookie)
+{
+       u64 res;
+
+       if (cookie[0] == INET_DIAG_NOCOOKIE && cookie[1] == INET_DIAG_NOCOOKIE)
                return 0;
+
+       res = sock_gen_cookie(sk);
+       if ((u32)res != cookie[0] || (u32)(res >> 32) != cookie[1])
+               return -ESTALE;
+
+       return 0;
 }
 EXPORT_SYMBOL_GPL(sock_diag_check_cookie);
 
-void sock_diag_save_cookie(void *sk, __u32 *cookie)
+void sock_diag_save_cookie(struct sock *sk, __u32 *cookie)
 {
-       cookie[0] = (u32)(unsigned long)sk;
-       cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
+       u64 res = sock_gen_cookie(sk);
+
+       cookie[0] = (u32)res;
+       cookie[1] = (u32)(res >> 32);
 }
 EXPORT_SYMBOL_GPL(sock_diag_save_cookie);
 
index 8ce351ffceb122568ae05587a0b1dc3544d476d7..95b6139d710c46825d1e43f825188d81fcb70f60 100644 (file)
@@ -24,7 +24,6 @@
 
 static int zero = 0;
 static int one = 1;
-static int ushort_max = USHRT_MAX;
 static int min_sndbuf = SOCK_MIN_SNDBUF;
 static int min_rcvbuf = SOCK_MIN_RCVBUF;
 
@@ -403,7 +402,6 @@ static struct ctl_table netns_core_table[] = {
                .maxlen         = sizeof(int),
                .mode           = 0644,
                .extra1         = &zero,
-               .extra2         = &ushort_max,
                .proc_handler   = proc_dointvec_minmax
        },
        { }
index 93ea80196f0ec383cca46d28bf8c4c96d0310b25..5b21f6f88e9798b839a60a41c4e6ccda58bcf1f0 100644 (file)
@@ -177,6 +177,8 @@ static const struct nla_policy dcbnl_ieee_policy[DCB_ATTR_IEEE_MAX + 1] = {
        [DCB_ATTR_IEEE_PFC]         = {.len = sizeof(struct ieee_pfc)},
        [DCB_ATTR_IEEE_APP_TABLE]   = {.type = NLA_NESTED},
        [DCB_ATTR_IEEE_MAXRATE]   = {.len = sizeof(struct ieee_maxrate)},
+       [DCB_ATTR_IEEE_QCN]         = {.len = sizeof(struct ieee_qcn)},
+       [DCB_ATTR_IEEE_QCN_STATS]   = {.len = sizeof(struct ieee_qcn_stats)},
 };
 
 static const struct nla_policy dcbnl_ieee_app[DCB_ATTR_IEEE_APP_MAX + 1] = {
@@ -1030,7 +1032,7 @@ nla_put_failure:
        return err;
 }
 
-/* Handle IEEE 802.1Qaz GET commands. */
+/* Handle IEEE 802.1Qaz/802.1Qau/802.1Qbb GET commands. */
 static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev)
 {
        struct nlattr *ieee, *app;
@@ -1067,6 +1069,32 @@ static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev)
                }
        }
 
+       if (ops->ieee_getqcn) {
+               struct ieee_qcn qcn;
+
+               memset(&qcn, 0, sizeof(qcn));
+               err = ops->ieee_getqcn(netdev, &qcn);
+               if (!err) {
+                       err = nla_put(skb, DCB_ATTR_IEEE_QCN,
+                                     sizeof(qcn), &qcn);
+                       if (err)
+                               return -EMSGSIZE;
+               }
+       }
+
+       if (ops->ieee_getqcnstats) {
+               struct ieee_qcn_stats qcn_stats;
+
+               memset(&qcn_stats, 0, sizeof(qcn_stats));
+               err = ops->ieee_getqcnstats(netdev, &qcn_stats);
+               if (!err) {
+                       err = nla_put(skb, DCB_ATTR_IEEE_QCN_STATS,
+                                     sizeof(qcn_stats), &qcn_stats);
+                       if (err)
+                               return -EMSGSIZE;
+               }
+       }
+
        if (ops->ieee_getpfc) {
                struct ieee_pfc pfc;
                memset(&pfc, 0, sizeof(pfc));
@@ -1379,8 +1407,9 @@ int dcbnl_cee_notify(struct net_device *dev, int event, int cmd,
 }
 EXPORT_SYMBOL(dcbnl_cee_notify);
 
-/* Handle IEEE 802.1Qaz SET commands. If any requested operation can not
- * be completed the entire msg is aborted and error value is returned.
+/* Handle IEEE 802.1Qaz/802.1Qau/802.1Qbb SET commands.
+ * If any requested operation can not be completed
+ * the entire msg is aborted and error value is returned.
  * No attempt is made to reconcile the case where only part of the
  * cmd can be completed.
  */
@@ -1417,6 +1446,15 @@ static int dcbnl_ieee_set(struct net_device *netdev, struct nlmsghdr *nlh,
                        goto err;
        }
 
+       if (ieee[DCB_ATTR_IEEE_QCN] && ops->ieee_setqcn) {
+               struct ieee_qcn *qcn =
+                       nla_data(ieee[DCB_ATTR_IEEE_QCN]);
+
+               err = ops->ieee_setqcn(netdev, qcn);
+               if (err)
+                       goto err;
+       }
+
        if (ieee[DCB_ATTR_IEEE_PFC] && ops->ieee_setpfc) {
                struct ieee_pfc *pfc = nla_data(ieee[DCB_ATTR_IEEE_PFC]);
                err = ops->ieee_setpfc(netdev, pfc);
index e4c144fa706fd72d5e266ecfc142fac83b405ae8..bebc735f5afc0fd9993a2a6ddc4074dcaa5b1559 100644 (file)
@@ -280,8 +280,7 @@ struct sock *dccp_v4_request_recv_sock(struct sock *sk, struct sk_buff *skb,
                                       struct request_sock *req,
                                       struct dst_entry *dst);
 struct sock *dccp_check_req(struct sock *sk, struct sk_buff *skb,
-                           struct request_sock *req,
-                           struct request_sock **prev);
+                           struct request_sock *req);
 
 int dccp_child_process(struct sock *parent, struct sock *child,
                       struct sk_buff *skb);
@@ -310,16 +309,15 @@ int compat_dccp_setsockopt(struct sock *sk, int level, int optname,
                           char __user *optval, unsigned int optlen);
 #endif
 int dccp_ioctl(struct sock *sk, int cmd, unsigned long arg);
-int dccp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
-                size_t size);
-int dccp_recvmsg(struct kiocb *iocb, struct sock *sk,
-                struct msghdr *msg, size_t len, int nonblock, int flags,
-                int *addr_len);
+int dccp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
+int dccp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
+                int flags, int *addr_len);
 void dccp_shutdown(struct sock *sk, int how);
 int inet_dccp_listen(struct socket *sock, int backlog);
 unsigned int dccp_poll(struct file *file, struct socket *sock,
                       poll_table *wait);
 int dccp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
+void dccp_req_err(struct sock *sk, u64 seq);
 
 struct sk_buff *dccp_ctl_make_reset(struct sock *sk, struct sk_buff *skb);
 int dccp_send_reset(struct sock *sk, enum dccp_reset_codes code);
index 028fc43aacbd3a75e2bd87554aa2f3a582900324..5a45f8de5d99e5e20d93bd49843ed8e2717b2ddd 100644 (file)
@@ -49,13 +49,14 @@ static void dccp_diag_get_info(struct sock *sk, struct inet_diag_msg *r,
 }
 
 static void dccp_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
-               struct inet_diag_req_v2 *r, struct nlattr *bc)
+                          const struct inet_diag_req_v2 *r, struct nlattr *bc)
 {
        inet_diag_dump_icsk(&dccp_hashinfo, skb, cb, r, bc);
 }
 
-static int dccp_diag_dump_one(struct sk_buff *in_skb, const struct nlmsghdr *nlh,
-               struct inet_diag_req_v2 *req)
+static int dccp_diag_dump_one(struct sk_buff *in_skb,
+                             const struct nlmsghdr *nlh,
+                             const struct inet_diag_req_v2 *req)
 {
        return inet_diag_dump_one_icsk(&dccp_hashinfo, in_skb, nlh, req);
 }
index e45b968613a449206767455431eeadf808d2253a..2b4f21d34df6819c134b590d8ddeecffe668aaf6 100644 (file)
@@ -89,10 +89,9 @@ int dccp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
 
        if (inet->inet_saddr == 0)
                inet->inet_saddr = fl4->saddr;
-       inet->inet_rcv_saddr = inet->inet_saddr;
-
+       sk_rcv_saddr_set(sk, inet->inet_saddr);
        inet->inet_dport = usin->sin_port;
-       inet->inet_daddr = daddr;
+       sk_daddr_set(sk, daddr);
 
        inet_csk(sk)->icsk_ext_hdr_len = 0;
        if (inet_opt)
@@ -196,6 +195,32 @@ static void dccp_do_redirect(struct sk_buff *skb, struct sock *sk)
                dst->ops->redirect(dst, sk, skb);
 }
 
+void dccp_req_err(struct sock *sk, u64 seq)
+       {
+       struct request_sock *req = inet_reqsk(sk);
+       struct net *net = sock_net(sk);
+
+       /*
+        * ICMPs are not backlogged, hence we cannot get an established
+        * socket here.
+        */
+       WARN_ON(req->sk);
+
+       if (!between48(seq, dccp_rsk(req)->dreq_iss, dccp_rsk(req)->dreq_gss)) {
+               NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
+               reqsk_put(req);
+       } else {
+               /*
+                * Still in RESPOND, just remove it silently.
+                * There is no good way to pass the error to the newly
+                * created socket, and POSIX does not want network
+                * errors returned from accept().
+                */
+               inet_csk_reqsk_queue_drop(req->rsk_listener, req);
+       }
+}
+EXPORT_SYMBOL(dccp_req_err);
+
 /*
  * This routine is called by the ICMP module when it gets some sort of error
  * condition. If err < 0 then the socket should be closed and the error
@@ -228,10 +253,11 @@ static void dccp_v4_err(struct sk_buff *skb, u32 info)
                return;
        }
 
-       sk = inet_lookup(net, &dccp_hashinfo,
-                       iph->daddr, dh->dccph_dport,
-                       iph->saddr, dh->dccph_sport, inet_iif(skb));
-       if (sk == NULL) {
+       sk = __inet_lookup_established(net, &dccp_hashinfo,
+                                      iph->daddr, dh->dccph_dport,
+                                      iph->saddr, ntohs(dh->dccph_sport),
+                                      inet_iif(skb));
+       if (!sk) {
                ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
                return;
        }
@@ -240,6 +266,9 @@ static void dccp_v4_err(struct sk_buff *skb, u32 info)
                inet_twsk_put(inet_twsk(sk));
                return;
        }
+       seq = dccp_hdr_seq(dh);
+       if (sk->sk_state == DCCP_NEW_SYN_RECV)
+               return dccp_req_err(sk, seq);
 
        bh_lock_sock(sk);
        /* If too many ICMPs get dropped on busy
@@ -252,7 +281,6 @@ static void dccp_v4_err(struct sk_buff *skb, u32 info)
                goto out;
 
        dp = dccp_sk(sk);
-       seq = dccp_hdr_seq(dh);
        if ((1 << sk->sk_state) & ~(DCCPF_REQUESTING | DCCPF_LISTEN) &&
            !between48(seq, dp->dccps_awl, dp->dccps_awh)) {
                NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
@@ -289,35 +317,6 @@ static void dccp_v4_err(struct sk_buff *skb, u32 info)
        }
 
        switch (sk->sk_state) {
-               struct request_sock *req , **prev;
-       case DCCP_LISTEN:
-               if (sock_owned_by_user(sk))
-                       goto out;
-               req = inet_csk_search_req(sk, &prev, dh->dccph_dport,
-                                         iph->daddr, iph->saddr);
-               if (!req)
-                       goto out;
-
-               /*
-                * ICMPs are not backlogged, hence we cannot get an established
-                * socket here.
-                */
-               WARN_ON(req->sk);
-
-               if (!between48(seq, dccp_rsk(req)->dreq_iss,
-                                   dccp_rsk(req)->dreq_gss)) {
-                       NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
-                       goto out;
-               }
-               /*
-                * Still in RESPOND, just remove it silently.
-                * There is no good way to pass the error to the newly
-                * created socket, and POSIX does not want network
-                * errors returned from accept().
-                */
-               inet_csk_reqsk_queue_drop(sk, req, prev);
-               goto out;
-
        case DCCP_REQUESTING:
        case DCCP_RESPOND:
                if (!sock_owned_by_user(sk)) {
@@ -408,8 +407,8 @@ struct sock *dccp_v4_request_recv_sock(struct sock *sk, struct sk_buff *skb,
 
        newinet            = inet_sk(newsk);
        ireq               = inet_rsk(req);
-       newinet->inet_daddr     = ireq->ir_rmt_addr;
-       newinet->inet_rcv_saddr = ireq->ir_loc_addr;
+       sk_daddr_set(newsk, ireq->ir_rmt_addr);
+       sk_rcv_saddr_set(newsk, ireq->ir_loc_addr);
        newinet->inet_saddr     = ireq->ir_loc_addr;
        newinet->inet_opt       = ireq->opt;
        ireq->opt          = NULL;
@@ -449,14 +448,14 @@ static struct sock *dccp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
        const struct dccp_hdr *dh = dccp_hdr(skb);
        const struct iphdr *iph = ip_hdr(skb);
        struct sock *nsk;
-       struct request_sock **prev;
        /* Find possible connection requests. */
-       struct request_sock *req = inet_csk_search_req(sk, &prev,
-                                                      dh->dccph_sport,
+       struct request_sock *req = inet_csk_search_req(sk, dh->dccph_sport,
                                                       iph->saddr, iph->daddr);
-       if (req != NULL)
-               return dccp_check_req(sk, skb, req, prev);
-
+       if (req) {
+               nsk = dccp_check_req(sk, skb, req);
+               reqsk_put(req);
+               return nsk;
+       }
        nsk = inet_lookup_established(sock_net(sk), &dccp_hashinfo,
                                      iph->saddr, dh->dccph_sport,
                                      iph->daddr, dh->dccph_dport,
@@ -575,7 +574,7 @@ static void dccp_v4_reqsk_destructor(struct request_sock *req)
        kfree(inet_rsk(req)->opt);
 }
 
-void dccp_syn_ack_timeout(struct sock *sk, struct request_sock *req)
+void dccp_syn_ack_timeout(const struct request_sock *req)
 {
 }
 EXPORT_SYMBOL(dccp_syn_ack_timeout);
@@ -624,7 +623,7 @@ int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
        if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
                goto drop;
 
-       req = inet_reqsk_alloc(&dccp_request_sock_ops);
+       req = inet_reqsk_alloc(&dccp_request_sock_ops, sk);
        if (req == NULL)
                goto drop;
 
@@ -639,8 +638,10 @@ int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
                goto drop_and_free;
 
        ireq = inet_rsk(req);
-       ireq->ir_loc_addr = ip_hdr(skb)->daddr;
-       ireq->ir_rmt_addr = ip_hdr(skb)->saddr;
+       sk_rcv_saddr_set(req_to_sk(req), ip_hdr(skb)->daddr);
+       sk_daddr_set(req_to_sk(req), ip_hdr(skb)->saddr);
+       ireq->ireq_family = AF_INET;
+       ireq->ir_iif = sk->sk_bound_dev_if;
 
        /*
         * Step 3: Process LISTEN state
index 6bcaa33cd804d2d18e569e921c8b43b2e3ae94e9..9d0551092c6cd73f3cfa30c89130bac69d693118 100644 (file)
 static const struct inet_connection_sock_af_ops dccp_ipv6_mapped;
 static const struct inet_connection_sock_af_ops dccp_ipv6_af_ops;
 
-static void dccp_v6_hash(struct sock *sk)
-{
-       if (sk->sk_state != DCCP_CLOSED) {
-               if (inet_csk(sk)->icsk_af_ops == &dccp_ipv6_mapped) {
-                       inet_hash(sk);
-                       return;
-               }
-               local_bh_disable();
-               __inet6_hash(sk, NULL);
-               local_bh_enable();
-       }
-}
-
 /* add pseudo-header to DCCP checksum stored in skb->csum */
 static inline __sum16 dccp_v6_csum_finish(struct sk_buff *skb,
                                      const struct in6_addr *saddr,
@@ -98,11 +85,12 @@ static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
                return;
        }
 
-       sk = inet6_lookup(net, &dccp_hashinfo,
-                       &hdr->daddr, dh->dccph_dport,
-                       &hdr->saddr, dh->dccph_sport, inet6_iif(skb));
+       sk = __inet6_lookup_established(net, &dccp_hashinfo,
+                                       &hdr->daddr, dh->dccph_dport,
+                                       &hdr->saddr, ntohs(dh->dccph_sport),
+                                       inet6_iif(skb));
 
-       if (sk == NULL) {
+       if (!sk) {
                ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev),
                                   ICMP6_MIB_INERRORS);
                return;
@@ -112,6 +100,9 @@ static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
                inet_twsk_put(inet_twsk(sk));
                return;
        }
+       seq = dccp_hdr_seq(dh);
+       if (sk->sk_state == DCCP_NEW_SYN_RECV)
+               return dccp_req_err(sk, seq);
 
        bh_lock_sock(sk);
        if (sock_owned_by_user(sk))
@@ -121,7 +112,6 @@ static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
                goto out;
 
        dp = dccp_sk(sk);
-       seq = dccp_hdr_seq(dh);
        if ((1 << sk->sk_state) & ~(DCCPF_REQUESTING | DCCPF_LISTEN) &&
            !between48(seq, dp->dccps_awl, dp->dccps_awh)) {
                NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
@@ -162,32 +152,6 @@ static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
 
        /* Might be for an request_sock */
        switch (sk->sk_state) {
-               struct request_sock *req, **prev;
-       case DCCP_LISTEN:
-               if (sock_owned_by_user(sk))
-                       goto out;
-
-               req = inet6_csk_search_req(sk, &prev, dh->dccph_dport,
-                                          &hdr->daddr, &hdr->saddr,
-                                          inet6_iif(skb));
-               if (req == NULL)
-                       goto out;
-
-               /*
-                * ICMPs are not backlogged, hence we cannot get an established
-                * socket here.
-                */
-               WARN_ON(req->sk != NULL);
-
-               if (!between48(seq, dccp_rsk(req)->dreq_iss,
-                                   dccp_rsk(req)->dreq_gss)) {
-                       NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
-                       goto out;
-               }
-
-               inet_csk_reqsk_queue_drop(sk, req, prev);
-               goto out;
-
        case DCCP_REQUESTING:
        case DCCP_RESPOND:  /* Cannot happen.
                               It can, it SYNs are crossed. --ANK */
@@ -330,17 +294,16 @@ static struct sock *dccp_v6_hnd_req(struct sock *sk,struct sk_buff *skb)
 {
        const struct dccp_hdr *dh = dccp_hdr(skb);
        const struct ipv6hdr *iph = ipv6_hdr(skb);
+       struct request_sock *req;
        struct sock *nsk;
-       struct request_sock **prev;
-       /* Find possible connection requests. */
-       struct request_sock *req = inet6_csk_search_req(sk, &prev,
-                                                       dh->dccph_sport,
-                                                       &iph->saddr,
-                                                       &iph->daddr,
-                                                       inet6_iif(skb));
-       if (req != NULL)
-               return dccp_check_req(sk, skb, req, prev);
 
+       req = inet6_csk_search_req(sk, dh->dccph_sport, &iph->saddr,
+                                  &iph->daddr, inet6_iif(skb));
+       if (req) {
+               nsk = dccp_check_req(sk, skb, req);
+               reqsk_put(req);
+               return nsk;
+       }
        nsk = __inet6_lookup_established(sock_net(sk), &dccp_hashinfo,
                                         &iph->saddr, dh->dccph_sport,
                                         &iph->daddr, ntohs(dh->dccph_dport),
@@ -386,7 +349,7 @@ static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
        if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
                goto drop;
 
-       req = inet_reqsk_alloc(&dccp6_request_sock_ops);
+       req = inet_reqsk_alloc(&dccp6_request_sock_ops, sk);
        if (req == NULL)
                goto drop;
 
@@ -403,6 +366,7 @@ static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
        ireq = inet_rsk(req);
        ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
        ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
+       ireq->ireq_family = AF_INET6;
 
        if (ipv6_opt_accepted(sk, skb, IP6CB(skb)) ||
            np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
@@ -469,11 +433,7 @@ static struct sock *dccp_v6_request_recv_sock(struct sock *sk,
 
                memcpy(newnp, np, sizeof(struct ipv6_pinfo));
 
-               ipv6_addr_set_v4mapped(newinet->inet_daddr, &newsk->sk_v6_daddr);
-
-               ipv6_addr_set_v4mapped(newinet->inet_saddr, &newnp->saddr);
-
-               newsk->sk_v6_rcv_saddr = newnp->saddr;
+               newnp->saddr = newsk->sk_v6_rcv_saddr;
 
                inet_csk(newsk)->icsk_af_ops = &dccp_ipv6_mapped;
                newsk->sk_backlog_rcv = dccp_v4_do_rcv;
@@ -591,7 +551,7 @@ static struct sock *dccp_v6_request_recv_sock(struct sock *sk,
                dccp_done(newsk);
                goto out;
        }
-       __inet6_hash(newsk, NULL);
+       __inet_hash(newsk, NULL);
 
        return newsk;
 
@@ -916,9 +876,7 @@ static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
                        sk->sk_backlog_rcv = dccp_v6_do_rcv;
                        goto failure;
                }
-               ipv6_addr_set_v4mapped(inet->inet_saddr, &np->saddr);
-               ipv6_addr_set_v4mapped(inet->inet_rcv_saddr, &sk->sk_v6_rcv_saddr);
-
+               np->saddr = sk->sk_v6_rcv_saddr;
                return err;
        }
 
@@ -1061,7 +1019,7 @@ static struct proto dccp_v6_prot = {
        .sendmsg           = dccp_sendmsg,
        .recvmsg           = dccp_recvmsg,
        .backlog_rcv       = dccp_v6_do_rcv,
-       .hash              = dccp_v6_hash,
+       .hash              = inet_hash,
        .unhash            = inet_unhash,
        .accept            = inet_csk_accept,
        .get_port          = inet_csk_get_port,
index b50dc436db1fb4639b340bacffe0ae1fc027f8fa..332f7d6d994291c2cd8cded425c8e89965556acf 100644 (file)
@@ -152,8 +152,7 @@ EXPORT_SYMBOL_GPL(dccp_create_openreq_child);
  * as an request_sock.
  */
 struct sock *dccp_check_req(struct sock *sk, struct sk_buff *skb,
-                           struct request_sock *req,
-                           struct request_sock **prev)
+                           struct request_sock *req)
 {
        struct sock *child = NULL;
        struct dccp_request_sock *dreq = dccp_rsk(req);
@@ -200,7 +199,7 @@ struct sock *dccp_check_req(struct sock *sk, struct sk_buff *skb,
        if (child == NULL)
                goto listen_overflow;
 
-       inet_csk_reqsk_queue_unlink(sk, req, prev);
+       inet_csk_reqsk_queue_unlink(sk, req);
        inet_csk_reqsk_queue_removed(sk, req);
        inet_csk_reqsk_queue_add(sk, req, child);
 out:
@@ -212,7 +211,7 @@ drop:
        if (dccp_hdr(skb)->dccph_type != DCCP_PKT_RESET)
                req->rsk_ops->send_reset(sk, skb);
 
-       inet_csk_reqsk_queue_drop(sk, req, prev);
+       inet_csk_reqsk_queue_drop(sk, req);
        goto out;
 }
 
index 595ddf0459db79c724b8eab2c8b0f6a0a7d1df97..d8346d0eadebf3018aa0358a98be65a2d111d984 100644 (file)
@@ -72,8 +72,7 @@ static void printl(const char *fmt, ...)
        wake_up(&dccpw.wait);
 }
 
-static int jdccp_sendmsg(struct kiocb *iocb, struct sock *sk,
-                        struct msghdr *msg, size_t size)
+static int jdccp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
 {
        const struct inet_sock *inet = inet_sk(sk);
        struct ccid3_hc_tx_sock *hc = NULL;
index e171b780b499dabc7d7312bb3aadbbe4b005f2bd..52a94016526d3c595a71f4affac1a24251f964d7 100644 (file)
@@ -741,8 +741,7 @@ static int dccp_msghdr_parse(struct msghdr *msg, struct sk_buff *skb)
        return 0;
 }
 
-int dccp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
-                size_t len)
+int dccp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
 {
        const struct dccp_sock *dp = dccp_sk(sk);
        const int flags = msg->msg_flags;
@@ -806,8 +805,8 @@ out_discard:
 
 EXPORT_SYMBOL_GPL(dccp_sendmsg);
 
-int dccp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
-                size_t len, int nonblock, int flags, int *addr_len)
+int dccp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
+                int flags, int *addr_len)
 {
        const struct dccp_hdr *dh;
        long timeo;
index 1cd46a345cb04387a50843a251637b6e3cbd7501..3ef7acef3ce8c17f3a2e873b8178b7ee2f7cd619 100644 (file)
@@ -161,33 +161,11 @@ out:
        sock_put(sk);
 }
 
-/*
- *     Timer for listening sockets
- */
-static void dccp_response_timer(struct sock *sk)
-{
-       inet_csk_reqsk_queue_prune(sk, TCP_SYNQ_INTERVAL, DCCP_TIMEOUT_INIT,
-                                  DCCP_RTO_MAX);
-}
-
 static void dccp_keepalive_timer(unsigned long data)
 {
        struct sock *sk = (struct sock *)data;
 
-       /* Only process if socket is not in use. */
-       bh_lock_sock(sk);
-       if (sock_owned_by_user(sk)) {
-               /* Try again later. */
-               inet_csk_reset_keepalive_timer(sk, HZ / 20);
-               goto out;
-       }
-
-       if (sk->sk_state == DCCP_LISTEN) {
-               dccp_response_timer(sk);
-               goto out;
-       }
-out:
-       bh_unlock_sock(sk);
+       pr_err("dccp should not use a keepalive timer !\n");
        sock_put(sk);
 }
 
index 810228646de38f9fe26eb2c75a84fbc000840f7b..754484b3cd0e8cc2f92f2800f89dd71762b733d8 100644 (file)
@@ -1669,8 +1669,8 @@ static int dn_data_ready(struct sock *sk, struct sk_buff_head *q, int flags, int
 }
 
 
-static int dn_recvmsg(struct kiocb *iocb, struct socket *sock,
-       struct msghdr *msg, size_t size, int flags)
+static int dn_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
+                     int flags)
 {
        struct sock *sk = sock->sk;
        struct dn_scp *scp = DN_SK(sk);
@@ -1905,8 +1905,7 @@ static inline struct sk_buff *dn_alloc_send_pskb(struct sock *sk,
        return skb;
 }
 
-static int dn_sendmsg(struct kiocb *iocb, struct socket *sock,
-                     struct msghdr *msg, size_t size)
+static int dn_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
 {
        struct sock *sk = sock->sk;
        struct dn_scp *scp = DN_SK(sk);
index 7ca7c3143da332d567bd336197beff18ce210083..be1f08cdad29135238c59c52f7c6bbc6548d39d2 100644 (file)
 #include <net/dn_route.h>
 
 static int dn_neigh_construct(struct neighbour *);
-static void dn_long_error_report(struct neighbour *, struct sk_buff *);
-static void dn_short_error_report(struct neighbour *, struct sk_buff *);
-static int dn_long_output(struct neighbour *, struct sk_buff *);
-static int dn_short_output(struct neighbour *, struct sk_buff *);
-static int dn_phase3_output(struct neighbour *, struct sk_buff *);
-
-
-/*
- * For talking to broadcast devices: Ethernet & PPP
- */
-static const struct neigh_ops dn_long_ops = {
-       .family =               AF_DECnet,
-       .error_report =         dn_long_error_report,
-       .output =               dn_long_output,
-       .connected_output =     dn_long_output,
-};
+static void dn_neigh_error_report(struct neighbour *, struct sk_buff *);
+static int dn_neigh_output(struct neighbour *neigh, struct sk_buff *skb);
 
 /*
- * For talking to pointopoint and multidrop devices: DDCMP and X.25
+ * Operations for adding the link layer header.
  */
-static const struct neigh_ops dn_short_ops = {
+static const struct neigh_ops dn_neigh_ops = {
        .family =               AF_DECnet,
-       .error_report =         dn_short_error_report,
-       .output =               dn_short_output,
-       .connected_output =     dn_short_output,
-};
-
-/*
- * For talking to DECnet phase III nodes
- */
-static const struct neigh_ops dn_phase3_ops = {
-       .family =               AF_DECnet,
-       .error_report =         dn_short_error_report, /* Can use short version here */
-       .output =               dn_phase3_output,
-       .connected_output =     dn_phase3_output,
+       .error_report =         dn_neigh_error_report,
+       .output =               dn_neigh_output,
+       .connected_output =     dn_neigh_output,
 };
 
 static u32 dn_neigh_hash(const void *pkey,
@@ -93,11 +69,18 @@ static u32 dn_neigh_hash(const void *pkey,
        return jhash_2words(*(__u16 *)pkey, 0, hash_rnd[0]);
 }
 
+static bool dn_key_eq(const struct neighbour *neigh, const void *pkey)
+{
+       return neigh_key_eq16(neigh, pkey);
+}
+
 struct neigh_table dn_neigh_table = {
        .family =                       PF_DECnet,
        .entry_size =                   NEIGH_ENTRY_SIZE(sizeof(struct dn_neigh)),
        .key_len =                      sizeof(__le16),
+       .protocol =                     cpu_to_be16(ETH_P_DNA_RT),
        .hash =                         dn_neigh_hash,
+       .key_eq =                       dn_key_eq,
        .constructor =                  dn_neigh_construct,
        .id =                           "dn_neigh_cache",
        .parms ={
@@ -146,16 +129,9 @@ static int dn_neigh_construct(struct neighbour *neigh)
 
        __neigh_parms_put(neigh->parms);
        neigh->parms = neigh_parms_clone(parms);
-
-       if (dn_db->use_long)
-               neigh->ops = &dn_long_ops;
-       else
-               neigh->ops = &dn_short_ops;
        rcu_read_unlock();
 
-       if (dn->flags & DN_NDFLAG_P3)
-               neigh->ops = &dn_phase3_ops;
-
+       neigh->ops = &dn_neigh_ops;
        neigh->nud_state = NUD_NOARP;
        neigh->output = neigh->ops->connected_output;
 
@@ -187,24 +163,16 @@ static int dn_neigh_construct(struct neighbour *neigh)
        return 0;
 }
 
-static void dn_long_error_report(struct neighbour *neigh, struct sk_buff *skb)
+static void dn_neigh_error_report(struct neighbour *neigh, struct sk_buff *skb)
 {
-       printk(KERN_DEBUG "dn_long_error_report: called\n");
+       printk(KERN_DEBUG "dn_neigh_error_report: called\n");
        kfree_skb(skb);
 }
 
-
-static void dn_short_error_report(struct neighbour *neigh, struct sk_buff *skb)
-{
-       printk(KERN_DEBUG "dn_short_error_report: called\n");
-       kfree_skb(skb);
-}
-
-static int dn_neigh_output_packet(struct sk_buff *skb)
+static int dn_neigh_output(struct neighbour *neigh, struct sk_buff *skb)
 {
        struct dst_entry *dst = skb_dst(skb);
        struct dn_route *rt = (struct dn_route *)dst;
-       struct neighbour *neigh = rt->n;
        struct net_device *dev = neigh->dev;
        char mac_addr[ETH_ALEN];
        unsigned int seq;
@@ -226,6 +194,18 @@ static int dn_neigh_output_packet(struct sk_buff *skb)
        return err;
 }
 
+static int dn_neigh_output_packet(struct sk_buff *skb)
+{
+       struct dst_entry *dst = skb_dst(skb);
+       struct dn_route *rt = (struct dn_route *)dst;
+       struct neighbour *neigh = rt->n;
+
+       return neigh->output(neigh, skb);
+}
+
+/*
+ * For talking to broadcast devices: Ethernet & PPP
+ */
 static int dn_long_output(struct neighbour *neigh, struct sk_buff *skb)
 {
        struct net_device *dev = neigh->dev;
@@ -269,6 +249,9 @@ static int dn_long_output(struct neighbour *neigh, struct sk_buff *skb)
                       neigh->dev, dn_neigh_output_packet);
 }
 
+/*
+ * For talking to pointopoint and multidrop devices: DDCMP and X.25
+ */
 static int dn_short_output(struct neighbour *neigh, struct sk_buff *skb)
 {
        struct net_device *dev = neigh->dev;
@@ -306,7 +289,8 @@ static int dn_short_output(struct neighbour *neigh, struct sk_buff *skb)
 }
 
 /*
- * Phase 3 output is the same is short output, execpt that
+ * For talking to DECnet phase III nodes
+ * Phase 3 output is the same as short output, execpt that
  * it clears the area bits before transmission.
  */
 static int dn_phase3_output(struct neighbour *neigh, struct sk_buff *skb)
@@ -344,6 +328,32 @@ static int dn_phase3_output(struct neighbour *neigh, struct sk_buff *skb)
                       neigh->dev, dn_neigh_output_packet);
 }
 
+int dn_to_neigh_output(struct sk_buff *skb)
+{
+       struct dst_entry *dst = skb_dst(skb);
+       struct dn_route *rt = (struct dn_route *) dst;
+       struct neighbour *neigh = rt->n;
+       struct dn_neigh *dn = (struct dn_neigh *)neigh;
+       struct dn_dev *dn_db;
+       bool use_long;
+
+       rcu_read_lock();
+       dn_db = rcu_dereference(neigh->dev->dn_ptr);
+       if (dn_db == NULL) {
+               rcu_read_unlock();
+               return -EINVAL;
+       }
+       use_long = dn_db->use_long;
+       rcu_read_unlock();
+
+       if (dn->flags & DN_NDFLAG_P3)
+               return dn_phase3_output(neigh, skb);
+       if (use_long)
+               return dn_long_output(neigh, skb);
+       else
+               return dn_short_output(neigh, skb);
+}
+
 /*
  * Unfortunately, the neighbour code uses the device in its hash
  * function, so we don't get any advantage from it. This function
index 3b81092771f8b7beb74349a84819046f7c080df1..9ab0c4ba297f546ab4dd68b63894c14091b9ab3c 100644 (file)
@@ -136,7 +136,6 @@ int decnet_dst_gc_interval = 2;
 
 static struct dst_ops dn_dst_ops = {
        .family =               PF_DECnet,
-       .protocol =             cpu_to_be16(ETH_P_DNA_RT),
        .gc_thresh =            128,
        .gc =                   dn_dst_gc,
        .check =                dn_dst_check,
@@ -743,15 +742,6 @@ out:
        return NET_RX_DROP;
 }
 
-static int dn_to_neigh_output(struct sk_buff *skb)
-{
-       struct dst_entry *dst = skb_dst(skb);
-       struct dn_route *rt = (struct dn_route *) dst;
-       struct neighbour *n = rt->n;
-
-       return n->output(n, skb);
-}
-
 static int dn_output(struct sock *sk, struct sk_buff *skb)
 {
        struct dst_entry *dst = skb_dst(skb);
index faf7cc3483fe0822c26be6b915061ee8fdd8be9a..9d66a0f72f906733878de68e7f2e6bd80932c1b9 100644 (file)
@@ -248,7 +248,9 @@ void __init dn_fib_rules_init(void)
 
 void __exit dn_fib_rules_cleanup(void)
 {
+       rtnl_lock();
        fib_rules_unregister(dn_fib_rules_ops);
+       rtnl_unlock();
        rcu_barrier();
 }
 
index e4d9560a910b0eb96ed3a4ad59d63771f865de3c..af34fc9bdf69768e45e3e772929410fa0eeee41c 100644 (file)
@@ -89,9 +89,7 @@ static void dnrmg_send_peer(struct sk_buff *skb)
 
 static unsigned int dnrmg_hook(const struct nf_hook_ops *ops,
                        struct sk_buff *skb,
-                       const struct net_device *in,
-                       const struct net_device *out,
-                       int (*okfn)(struct sk_buff *))
+                       const struct nf_hook_state *state)
 {
        dnrmg_send_peer(skb);
        return NF_ACCEPT;
index 5f8ac404535bb6143d3fc2e90cacf5b0f0200129..ff7736f7ff42d335b08cef345b3b1309e81b7985 100644 (file)
@@ -5,9 +5,12 @@ config HAVE_NET_DSA
 # Drivers must select NET_DSA and the appropriate tagging format
 
 config NET_DSA
-       tristate
-       depends on HAVE_NET_DSA
+       tristate "Distributed Switch Architecture"
+       depends on HAVE_NET_DSA && NET_SWITCHDEV
        select PHYLIB
+       ---help---
+         Say Y if you want to enable support for the hardware switches supported
+         by the Distributed Switch Architecture.
 
 if NET_DSA
 
index 2173402d87e0f56f255d0b378f06fc51aa2d3fed..5eaadabe23a1dbf6c8dcaa8032fd739d5d7d04c8 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/of.h>
 #include <linux/of_mdio.h>
 #include <linux/of_platform.h>
+#include <linux/of_net.h>
 #include <linux/sysfs.h>
 #include "dsa_priv.h"
 
@@ -175,43 +176,14 @@ __ATTRIBUTE_GROUPS(dsa_hwmon);
 #endif /* CONFIG_NET_DSA_HWMON */
 
 /* basic switch operations **************************************************/
-static struct dsa_switch *
-dsa_switch_setup(struct dsa_switch_tree *dst, int index,
-                struct device *parent, struct device *host_dev)
+static int dsa_switch_setup_one(struct dsa_switch *ds, struct device *parent)
 {
-       struct dsa_chip_data *pd = dst->pd->chip + index;
-       struct dsa_switch_driver *drv;
-       struct dsa_switch *ds;
-       int ret;
-       char *name;
-       int i;
+       struct dsa_switch_driver *drv = ds->drv;
+       struct dsa_switch_tree *dst = ds->dst;
+       struct dsa_chip_data *pd = ds->pd;
        bool valid_name_found = false;
-
-       /*
-        * Probe for switch model.
-        */
-       drv = dsa_switch_probe(host_dev, pd->sw_addr, &name);
-       if (drv == NULL) {
-               netdev_err(dst->master_netdev, "[%d]: could not detect attached switch\n",
-                          index);
-               return ERR_PTR(-EINVAL);
-       }
-       netdev_info(dst->master_netdev, "[%d]: detected a %s switch\n",
-                   index, name);
-
-
-       /*
-        * Allocate and initialise switch state.
-        */
-       ds = kzalloc(sizeof(*ds) + drv->priv_size, GFP_KERNEL);
-       if (ds == NULL)
-               return ERR_PTR(-ENOMEM);
-
-       ds->dst = dst;
-       ds->index = index;
-       ds->pd = dst->pd->chip + index;
-       ds->drv = drv;
-       ds->master_dev = host_dev;
+       int index = ds->index;
+       int i, ret;
 
        /*
         * Validate supplied switch configuration.
@@ -256,7 +228,7 @@ dsa_switch_setup(struct dsa_switch_tree *dst, int index,
         * switch.
         */
        if (dst->cpu_switch == index) {
-               switch (drv->tag_protocol) {
+               switch (ds->tag_protocol) {
 #ifdef CONFIG_NET_DSA_TAG_DSA
                case DSA_TAG_PROTO_DSA:
                        dst->rcv = dsa_netdev_ops.rcv;
@@ -284,7 +256,7 @@ dsa_switch_setup(struct dsa_switch_tree *dst, int index,
                        goto out;
                }
 
-               dst->tag_protocol = drv->tag_protocol;
+               dst->tag_protocol = ds->tag_protocol;
        }
 
        /*
@@ -314,19 +286,15 @@ dsa_switch_setup(struct dsa_switch_tree *dst, int index,
         * Create network devices for physical switch ports.
         */
        for (i = 0; i < DSA_MAX_PORTS; i++) {
-               struct net_device *slave_dev;
-
                if (!(ds->phys_port_mask & (1 << i)))
                        continue;
 
-               slave_dev = dsa_slave_create(ds, parent, i, pd->port_names[i]);
-               if (slave_dev == NULL) {
+               ret = dsa_slave_create(ds, parent, i, pd->port_names[i]);
+               if (ret < 0) {
                        netdev_err(dst->master_netdev, "[%d]: can't create dsa slave device for port %d(%s)\n",
                                   index, i, pd->port_names[i]);
-                       continue;
+                       ret = 0;
                }
-
-               ds->ports[i] = slave_dev;
        }
 
 #ifdef CONFIG_NET_DSA_HWMON
@@ -354,13 +322,57 @@ dsa_switch_setup(struct dsa_switch_tree *dst, int index,
        }
 #endif /* CONFIG_NET_DSA_HWMON */
 
-       return ds;
+       return ret;
 
 out_free:
        mdiobus_free(ds->slave_mii_bus);
 out:
        kfree(ds);
-       return ERR_PTR(ret);
+       return ret;
+}
+
+static struct dsa_switch *
+dsa_switch_setup(struct dsa_switch_tree *dst, int index,
+                struct device *parent, struct device *host_dev)
+{
+       struct dsa_chip_data *pd = dst->pd->chip + index;
+       struct dsa_switch_driver *drv;
+       struct dsa_switch *ds;
+       int ret;
+       char *name;
+
+       /*
+        * Probe for switch model.
+        */
+       drv = dsa_switch_probe(host_dev, pd->sw_addr, &name);
+       if (drv == NULL) {
+               netdev_err(dst->master_netdev, "[%d]: could not detect attached switch\n",
+                          index);
+               return ERR_PTR(-EINVAL);
+       }
+       netdev_info(dst->master_netdev, "[%d]: detected a %s switch\n",
+                   index, name);
+
+
+       /*
+        * Allocate and initialise switch state.
+        */
+       ds = kzalloc(sizeof(*ds) + drv->priv_size, GFP_KERNEL);
+       if (ds == NULL)
+               return NULL;
+
+       ds->dst = dst;
+       ds->index = index;
+       ds->pd = pd;
+       ds->drv = drv;
+       ds->tag_protocol = drv->tag_protocol;
+       ds->master_dev = host_dev;
+
+       ret = dsa_switch_setup_one(ds, parent);
+       if (ret)
+               return NULL;
+
+       return ds;
 }
 
 static void dsa_switch_destroy(struct dsa_switch *ds)
@@ -378,7 +390,7 @@ static int dsa_switch_suspend(struct dsa_switch *ds)
 
        /* Suspend slave network devices */
        for (i = 0; i < DSA_MAX_PORTS; i++) {
-               if (!(ds->phys_port_mask & (1 << i)))
+               if (!dsa_is_port_initialized(ds, i))
                        continue;
 
                ret = dsa_slave_suspend(ds->ports[i]);
@@ -404,7 +416,7 @@ static int dsa_switch_resume(struct dsa_switch *ds)
 
        /* Resume slave network devices */
        for (i = 0; i < DSA_MAX_PORTS; i++) {
-               if (!(ds->phys_port_mask & (1 << i)))
+               if (!dsa_is_port_initialized(ds, i))
                        continue;
 
                ret = dsa_slave_resume(ds->ports[i]);
@@ -501,12 +513,10 @@ static struct net_device *dev_to_net_device(struct device *dev)
 #ifdef CONFIG_OF
 static int dsa_of_setup_routing_table(struct dsa_platform_data *pd,
                                        struct dsa_chip_data *cd,
-                                       int chip_index,
+                                       int chip_index, int port_index,
                                        struct device_node *link)
 {
-       int ret;
        const __be32 *reg;
-       int link_port_addr;
        int link_sw_addr;
        struct device_node *parent_sw;
        int len;
@@ -519,6 +529,10 @@ static int dsa_of_setup_routing_table(struct dsa_platform_data *pd,
        if (!reg || (len != sizeof(*reg) * 2))
                return -EINVAL;
 
+       /*
+        * Get the destination switch number from the second field of its 'reg'
+        * property, i.e. for "reg = <0x19 1>" sw_addr is '1'.
+        */
        link_sw_addr = be32_to_cpup(reg + 1);
 
        if (link_sw_addr >= pd->nr_chips)
@@ -535,20 +549,9 @@ static int dsa_of_setup_routing_table(struct dsa_platform_data *pd,
                memset(cd->rtable, -1, pd->nr_chips * sizeof(s8));
        }
 
-       reg = of_get_property(link, "reg", NULL);
-       if (!reg) {
-               ret = -EINVAL;
-               goto out;
-       }
-
-       link_port_addr = be32_to_cpup(reg);
-
-       cd->rtable[link_sw_addr] = link_port_addr;
+       cd->rtable[link_sw_addr] = port_index;
 
        return 0;
-out:
-       kfree(cd->rtable);
-       return ret;
 }
 
 static void dsa_of_free_platform_data(struct dsa_platform_data *pd)
@@ -567,12 +570,12 @@ static void dsa_of_free_platform_data(struct dsa_platform_data *pd)
        kfree(pd->chip);
 }
 
-static int dsa_of_probe(struct platform_device *pdev)
+static int dsa_of_probe(struct device *dev)
 {
-       struct device_node *np = pdev->dev.of_node;
+       struct device_node *np = dev->of_node;
        struct device_node *child, *mdio, *ethernet, *port, *link;
        struct mii_bus *mdio_bus;
-       struct platform_device *ethernet_dev;
+       struct net_device *ethernet_dev;
        struct dsa_platform_data *pd;
        struct dsa_chip_data *cd;
        const char *port_name;
@@ -587,22 +590,22 @@ static int dsa_of_probe(struct platform_device *pdev)
 
        mdio_bus = of_mdio_find_bus(mdio);
        if (!mdio_bus)
-               return -EINVAL;
+               return -EPROBE_DEFER;
 
        ethernet = of_parse_phandle(np, "dsa,ethernet", 0);
        if (!ethernet)
                return -EINVAL;
 
-       ethernet_dev = of_find_device_by_node(ethernet);
+       ethernet_dev = of_find_net_device_by_node(ethernet);
        if (!ethernet_dev)
-               return -ENODEV;
+               return -EPROBE_DEFER;
 
        pd = kzalloc(sizeof(*pd), GFP_KERNEL);
        if (!pd)
                return -ENOMEM;
 
-       pdev->dev.platform_data = pd;
-       pd->netdev = &ethernet_dev->dev;
+       dev->platform_data = pd;
+       pd->of_netdev = ethernet_dev;
        pd->nr_chips = of_get_available_child_count(np);
        if (pd->nr_chips > DSA_MAX_SWITCHES)
                pd->nr_chips = DSA_MAX_SWITCHES;
@@ -658,7 +661,7 @@ static int dsa_of_probe(struct platform_device *pdev)
                        if (!strcmp(port_name, "dsa") && link &&
                                        pd->nr_chips > 1) {
                                ret = dsa_of_setup_routing_table(pd, cd,
-                                               chip_index, link);
+                                               chip_index, port_index, link);
                                if (ret)
                                        goto out_free_chip;
                        }
@@ -674,72 +677,35 @@ out_free_chip:
        dsa_of_free_platform_data(pd);
 out_free:
        kfree(pd);
-       pdev->dev.platform_data = NULL;
+       dev->platform_data = NULL;
        return ret;
 }
 
-static void dsa_of_remove(struct platform_device *pdev)
+static void dsa_of_remove(struct device *dev)
 {
-       struct dsa_platform_data *pd = pdev->dev.platform_data;
+       struct dsa_platform_data *pd = dev->platform_data;
 
-       if (!pdev->dev.of_node)
+       if (!dev->of_node)
                return;
 
        dsa_of_free_platform_data(pd);
        kfree(pd);
 }
 #else
-static inline int dsa_of_probe(struct platform_device *pdev)
+static inline int dsa_of_probe(struct device *dev)
 {
        return 0;
 }
 
-static inline void dsa_of_remove(struct platform_device *pdev)
+static inline void dsa_of_remove(struct device *dev)
 {
 }
 #endif
 
-static int dsa_probe(struct platform_device *pdev)
+static void dsa_setup_dst(struct dsa_switch_tree *dst, struct net_device *dev,
+                         struct device *parent, struct dsa_platform_data *pd)
 {
-       struct dsa_platform_data *pd = pdev->dev.platform_data;
-       struct net_device *dev;
-       struct dsa_switch_tree *dst;
-       int i, ret;
-
-       pr_notice_once("Distributed Switch Architecture driver version %s\n",
-                      dsa_driver_version);
-
-       if (pdev->dev.of_node) {
-               ret = dsa_of_probe(pdev);
-               if (ret)
-                       return ret;
-
-               pd = pdev->dev.platform_data;
-       }
-
-       if (pd == NULL || pd->netdev == NULL)
-               return -EINVAL;
-
-       dev = dev_to_net_device(pd->netdev);
-       if (dev == NULL) {
-               ret = -EINVAL;
-               goto out;
-       }
-
-       if (dev->dsa_ptr != NULL) {
-               dev_put(dev);
-               ret = -EEXIST;
-               goto out;
-       }
-
-       dst = kzalloc(sizeof(*dst), GFP_KERNEL);
-       if (dst == NULL) {
-               dev_put(dev);
-               ret = -ENOMEM;
-               goto out;
-       }
-
-       platform_set_drvdata(pdev, dst);
+       int i;
 
        dst->pd = pd;
        dst->master_netdev = dev;
@@ -749,7 +715,7 @@ static int dsa_probe(struct platform_device *pdev)
        for (i = 0; i < pd->nr_chips; i++) {
                struct dsa_switch *ds;
 
-               ds = dsa_switch_setup(dst, i, &pdev->dev, pd->chip[i].host_dev);
+               ds = dsa_switch_setup(dst, i, parent, pd->chip[i].host_dev);
                if (IS_ERR(ds)) {
                        netdev_err(dev, "[%d]: couldn't create dsa switch instance (error %ld)\n",
                                   i, PTR_ERR(ds));
@@ -777,18 +743,67 @@ static int dsa_probe(struct platform_device *pdev)
                dst->link_poll_timer.expires = round_jiffies(jiffies + HZ);
                add_timer(&dst->link_poll_timer);
        }
+}
+
+static int dsa_probe(struct platform_device *pdev)
+{
+       struct dsa_platform_data *pd = pdev->dev.platform_data;
+       struct net_device *dev;
+       struct dsa_switch_tree *dst;
+       int ret;
+
+       pr_notice_once("Distributed Switch Architecture driver version %s\n",
+                      dsa_driver_version);
+
+       if (pdev->dev.of_node) {
+               ret = dsa_of_probe(&pdev->dev);
+               if (ret)
+                       return ret;
+
+               pd = pdev->dev.platform_data;
+       }
+
+       if (pd == NULL || (pd->netdev == NULL && pd->of_netdev == NULL))
+               return -EINVAL;
+
+       if (pd->of_netdev) {
+               dev = pd->of_netdev;
+               dev_hold(dev);
+       } else {
+               dev = dev_to_net_device(pd->netdev);
+       }
+       if (dev == NULL) {
+               ret = -EPROBE_DEFER;
+               goto out;
+       }
+
+       if (dev->dsa_ptr != NULL) {
+               dev_put(dev);
+               ret = -EEXIST;
+               goto out;
+       }
+
+       dst = kzalloc(sizeof(*dst), GFP_KERNEL);
+       if (dst == NULL) {
+               dev_put(dev);
+               ret = -ENOMEM;
+               goto out;
+       }
+
+       platform_set_drvdata(pdev, dst);
+
+       dsa_setup_dst(dst, dev, &pdev->dev, pd);
 
        return 0;
 
 out:
-       dsa_of_remove(pdev);
+       dsa_of_remove(&pdev->dev);
 
        return ret;
 }
 
-static int dsa_remove(struct platform_device *pdev)
+static void dsa_remove_dst(struct dsa_switch_tree *dst)
 {
-       struct dsa_switch_tree *dst = platform_get_drvdata(pdev);
        int i;
 
        if (dst->link_poll_needed)
@@ -802,8 +817,14 @@ static int dsa_remove(struct platform_device *pdev)
                if (ds != NULL)
                        dsa_switch_destroy(ds);
        }
+}
 
-       dsa_of_remove(pdev);
+static int dsa_remove(struct platform_device *pdev)
+{
+       struct dsa_switch_tree *dst = platform_get_drvdata(pdev);
+
+       dsa_remove_dst(dst);
+       dsa_of_remove(&pdev->dev);
 
        return 0;
 }
@@ -830,6 +851,10 @@ static struct packet_type dsa_pack_type __read_mostly = {
        .func   = dsa_switch_rcv,
 };
 
+static struct notifier_block dsa_netdevice_nb __read_mostly = {
+       .notifier_call  = dsa_slave_netdevice_event,
+};
+
 #ifdef CONFIG_PM_SLEEP
 static int dsa_suspend(struct device *d)
 {
@@ -888,6 +913,8 @@ static int __init dsa_init_module(void)
 {
        int rc;
 
+       register_netdevice_notifier(&dsa_netdevice_nb);
+
        rc = platform_driver_register(&dsa_driver);
        if (rc)
                return rc;
@@ -900,6 +927,7 @@ module_init(dsa_init_module);
 
 static void __exit dsa_cleanup_module(void)
 {
+       unregister_netdevice_notifier(&dsa_netdevice_nb);
        dev_remove_pack(&dsa_pack_type);
        platform_driver_unregister(&dsa_driver);
 }
index dc9756d3154c7c81a1f8bd2b3124de36f0a97963..d5f1f9b862ea5f4794ba2fab277e19aed8e51d2a 100644 (file)
@@ -45,6 +45,8 @@ struct dsa_slave_priv {
        int                     old_link;
        int                     old_pause;
        int                     old_duplex;
+
+       struct net_device       *bridge_dev;
 };
 
 /* dsa.c */
@@ -53,11 +55,12 @@ extern char dsa_driver_version[];
 /* slave.c */
 extern const struct dsa_device_ops notag_netdev_ops;
 void dsa_slave_mii_bus_init(struct dsa_switch *ds);
-struct net_device *dsa_slave_create(struct dsa_switch *ds,
-                                   struct device *parent,
-                                   int port, char *name);
+int dsa_slave_create(struct dsa_switch *ds, struct device *parent,
+                    int port, char *name);
 int dsa_slave_suspend(struct net_device *slave_dev);
 int dsa_slave_resume(struct net_device *slave_dev);
+int dsa_slave_netdevice_event(struct notifier_block *unused,
+                             unsigned long event, void *ptr);
 
 /* tag_dsa.c */
 extern const struct dsa_device_ops dsa_netdev_ops;
index f23deadf42a070a251a7fc0c8f7b4d0a1e767dc5..827cda560a552b7b0dca45d49a06816e6dda513b 100644 (file)
 
 #include <linux/list.h>
 #include <linux/etherdevice.h>
+#include <linux/netdevice.h>
 #include <linux/phy.h>
 #include <linux/phy_fixed.h>
 #include <linux/of_net.h>
 #include <linux/of_mdio.h>
+#include <net/rtnetlink.h>
+#include <net/switchdev.h>
+#include <linux/if_bridge.h>
 #include "dsa_priv.h"
 
 /* slave mii_bus handling ***************************************************/
@@ -51,13 +55,16 @@ void dsa_slave_mii_bus_init(struct dsa_switch *ds)
 
 
 /* slave device handling ****************************************************/
-static int dsa_slave_init(struct net_device *dev)
+static int dsa_slave_get_iflink(const struct net_device *dev)
 {
        struct dsa_slave_priv *p = netdev_priv(dev);
 
-       dev->iflink = p->parent->dst->master_netdev->ifindex;
+       return p->parent->dst->master_netdev->ifindex;
+}
 
-       return 0;
+static inline bool dsa_port_is_bridged(struct dsa_slave_priv *p)
+{
+       return !!p->bridge_dev;
 }
 
 static int dsa_slave_open(struct net_device *dev)
@@ -65,6 +72,8 @@ static int dsa_slave_open(struct net_device *dev)
        struct dsa_slave_priv *p = netdev_priv(dev);
        struct net_device *master = p->parent->dst->master_netdev;
        struct dsa_switch *ds = p->parent;
+       u8 stp_state = dsa_port_is_bridged(p) ?
+                       BR_STATE_BLOCKING : BR_STATE_FORWARDING;
        int err;
 
        if (!(master->flags & IFF_UP))
@@ -93,6 +102,9 @@ static int dsa_slave_open(struct net_device *dev)
                        goto clear_promisc;
        }
 
+       if (ds->drv->port_stp_update)
+               ds->drv->port_stp_update(ds, p->port, stp_state);
+
        if (p->phy)
                phy_start(p->phy);
 
@@ -133,6 +145,9 @@ static int dsa_slave_close(struct net_device *dev)
        if (ds->drv->port_disable)
                ds->drv->port_disable(ds, p->port, p->phy);
 
+       if (ds->drv->port_stp_update)
+               ds->drv->port_stp_update(ds, p->port, BR_STATE_DISABLED);
+
        return 0;
 }
 
@@ -184,6 +199,105 @@ out:
        return 0;
 }
 
+static int dsa_slave_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
+                            struct net_device *dev,
+                            const unsigned char *addr, u16 vid, u16 nlm_flags)
+{
+       struct dsa_slave_priv *p = netdev_priv(dev);
+       struct dsa_switch *ds = p->parent;
+       int ret = -EOPNOTSUPP;
+
+       if (ds->drv->fdb_add)
+               ret = ds->drv->fdb_add(ds, p->port, addr, vid);
+
+       return ret;
+}
+
+static int dsa_slave_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
+                            struct net_device *dev,
+                            const unsigned char *addr, u16 vid)
+{
+       struct dsa_slave_priv *p = netdev_priv(dev);
+       struct dsa_switch *ds = p->parent;
+       int ret = -EOPNOTSUPP;
+
+       if (ds->drv->fdb_del)
+               ret = ds->drv->fdb_del(ds, p->port, addr, vid);
+
+       return ret;
+}
+
+static int dsa_slave_fill_info(struct net_device *dev, struct sk_buff *skb,
+                              const unsigned char *addr, u16 vid,
+                              bool is_static,
+                              u32 portid, u32 seq, int type,
+                              unsigned int flags)
+{
+       struct nlmsghdr *nlh;
+       struct ndmsg *ndm;
+
+       nlh = nlmsg_put(skb, portid, seq, type, sizeof(*ndm), flags);
+       if (!nlh)
+               return -EMSGSIZE;
+
+       ndm = nlmsg_data(nlh);
+       ndm->ndm_family  = AF_BRIDGE;
+       ndm->ndm_pad1    = 0;
+       ndm->ndm_pad2    = 0;
+       ndm->ndm_flags   = NTF_EXT_LEARNED;
+       ndm->ndm_type    = 0;
+       ndm->ndm_ifindex = dev->ifindex;
+       ndm->ndm_state   = is_static ? NUD_NOARP : NUD_REACHABLE;
+
+       if (nla_put(skb, NDA_LLADDR, ETH_ALEN, addr))
+               goto nla_put_failure;
+
+       if (vid && nla_put_u16(skb, NDA_VLAN, vid))
+               goto nla_put_failure;
+
+       nlmsg_end(skb, nlh);
+       return 0;
+
+nla_put_failure:
+       nlmsg_cancel(skb, nlh);
+       return -EMSGSIZE;
+}
+
+/* Dump information about entries, in response to GETNEIGH */
+static int dsa_slave_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
+                             struct net_device *dev,
+                             struct net_device *filter_dev, int idx)
+{
+       struct dsa_slave_priv *p = netdev_priv(dev);
+       struct dsa_switch *ds = p->parent;
+       unsigned char addr[ETH_ALEN] = { 0 };
+       int ret;
+
+       if (!ds->drv->fdb_getnext)
+               return -EOPNOTSUPP;
+
+       for (; ; idx++) {
+               bool is_static;
+
+               ret = ds->drv->fdb_getnext(ds, p->port, addr, &is_static);
+               if (ret < 0)
+                       break;
+
+               if (idx < cb->args[0])
+                       continue;
+
+               ret = dsa_slave_fill_info(dev, skb, addr, 0,
+                                         is_static,
+                                         NETLINK_CB(cb->skb).portid,
+                                         cb->nlh->nlmsg_seq,
+                                         RTM_NEWNEIGH, NLM_F_MULTI);
+               if (ret < 0)
+                       break;
+       }
+
+       return idx;
+}
+
 static int dsa_slave_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
 {
        struct dsa_slave_priv *p = netdev_priv(dev);
@@ -194,6 +308,92 @@ static int dsa_slave_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
        return -EOPNOTSUPP;
 }
 
+/* Return a bitmask of all ports being currently bridged within a given bridge
+ * device. Note that on leave, the mask will still return the bitmask of ports
+ * currently bridged, prior to port removal, and this is exactly what we want.
+ */
+static u32 dsa_slave_br_port_mask(struct dsa_switch *ds,
+                                 struct net_device *bridge)
+{
+       struct dsa_slave_priv *p;
+       unsigned int port;
+       u32 mask = 0;
+
+       for (port = 0; port < DSA_MAX_PORTS; port++) {
+               if (!dsa_is_port_initialized(ds, port))
+                       continue;
+
+               p = netdev_priv(ds->ports[port]);
+
+               if (ds->ports[port]->priv_flags & IFF_BRIDGE_PORT &&
+                   p->bridge_dev == bridge)
+                       mask |= 1 << port;
+       }
+
+       return mask;
+}
+
+static int dsa_slave_stp_update(struct net_device *dev, u8 state)
+{
+       struct dsa_slave_priv *p = netdev_priv(dev);
+       struct dsa_switch *ds = p->parent;
+       int ret = -EOPNOTSUPP;
+
+       if (ds->drv->port_stp_update)
+               ret = ds->drv->port_stp_update(ds, p->port, state);
+
+       return ret;
+}
+
+static int dsa_slave_bridge_port_join(struct net_device *dev,
+                                     struct net_device *br)
+{
+       struct dsa_slave_priv *p = netdev_priv(dev);
+       struct dsa_switch *ds = p->parent;
+       int ret = -EOPNOTSUPP;
+
+       p->bridge_dev = br;
+
+       if (ds->drv->port_join_bridge)
+               ret = ds->drv->port_join_bridge(ds, p->port,
+                                               dsa_slave_br_port_mask(ds, br));
+
+       return ret;
+}
+
+static int dsa_slave_bridge_port_leave(struct net_device *dev)
+{
+       struct dsa_slave_priv *p = netdev_priv(dev);
+       struct dsa_switch *ds = p->parent;
+       int ret = -EOPNOTSUPP;
+
+
+       if (ds->drv->port_leave_bridge)
+               ret = ds->drv->port_leave_bridge(ds, p->port,
+                                                dsa_slave_br_port_mask(ds, p->bridge_dev));
+
+       p->bridge_dev = NULL;
+
+       /* Port left the bridge, put in BR_STATE_DISABLED by the bridge layer,
+        * so allow it to be in BR_STATE_FORWARDING to be kept functional
+        */
+       dsa_slave_stp_update(dev, BR_STATE_FORWARDING);
+
+       return ret;
+}
+
+static int dsa_slave_parent_id_get(struct net_device *dev,
+                                  struct netdev_phys_item_id *psid)
+{
+       struct dsa_slave_priv *p = netdev_priv(dev);
+       struct dsa_switch *ds = p->parent;
+
+       psid->id_len = sizeof(ds->index);
+       memcpy(&psid->id, &ds->index, psid->id_len);
+
+       return 0;
+}
+
 static netdev_tx_t dsa_slave_xmit(struct sk_buff *skb, struct net_device *dev)
 {
        struct dsa_slave_priv *p = netdev_priv(dev);
@@ -462,14 +662,22 @@ static const struct ethtool_ops dsa_slave_ethtool_ops = {
 };
 
 static const struct net_device_ops dsa_slave_netdev_ops = {
-       .ndo_init               = dsa_slave_init,
        .ndo_open               = dsa_slave_open,
        .ndo_stop               = dsa_slave_close,
        .ndo_start_xmit         = dsa_slave_xmit,
        .ndo_change_rx_flags    = dsa_slave_change_rx_flags,
        .ndo_set_rx_mode        = dsa_slave_set_rx_mode,
        .ndo_set_mac_address    = dsa_slave_set_mac_address,
+       .ndo_fdb_add            = dsa_slave_fdb_add,
+       .ndo_fdb_del            = dsa_slave_fdb_del,
+       .ndo_fdb_dump           = dsa_slave_fdb_dump,
        .ndo_do_ioctl           = dsa_slave_ioctl,
+       .ndo_get_iflink         = dsa_slave_get_iflink,
+};
+
+static const struct swdev_ops dsa_slave_swdev_ops = {
+       .swdev_parent_id_get = dsa_slave_parent_id_get,
+       .swdev_port_stp_update = dsa_slave_stp_update,
 };
 
 static void dsa_slave_adjust_link(struct net_device *dev)
@@ -513,6 +721,24 @@ static int dsa_slave_fixed_link_update(struct net_device *dev,
 }
 
 /* slave device setup *******************************************************/
+static int dsa_slave_phy_connect(struct dsa_slave_priv *p,
+                                struct net_device *slave_dev,
+                                int addr)
+{
+       struct dsa_switch *ds = p->parent;
+
+       p->phy = ds->slave_mii_bus->phy_map[addr];
+       if (!p->phy)
+               return -ENODEV;
+
+       /* Use already configured phy mode */
+       p->phy_interface = p->phy->interface;
+       phy_connect_direct(slave_dev, p->phy, dsa_slave_adjust_link,
+                          p->phy_interface);
+
+       return 0;
+}
+
 static int dsa_slave_phy_setup(struct dsa_slave_priv *p,
                                struct net_device *slave_dev)
 {
@@ -546,10 +772,25 @@ static int dsa_slave_phy_setup(struct dsa_slave_priv *p,
        if (ds->drv->get_phy_flags)
                phy_flags = ds->drv->get_phy_flags(ds, p->port);
 
-       if (phy_dn)
-               p->phy = of_phy_connect(slave_dev, phy_dn,
-                                       dsa_slave_adjust_link, phy_flags,
-                                       p->phy_interface);
+       if (phy_dn) {
+               ret = of_mdio_parse_addr(&slave_dev->dev, phy_dn);
+               /* If this PHY address is part of phys_mii_mask, which means
+                * that we need to divert reads and writes to/from it, then we
+                * want to bind this device using the slave MII bus created by
+                * DSA to make that happen.
+                */
+               if (!phy_is_fixed && ret >= 0 &&
+                   (ds->phys_mii_mask & (1 << ret))) {
+                       ret = dsa_slave_phy_connect(p, slave_dev, ret);
+                       if (ret)
+                               return ret;
+               } else {
+                       p->phy = of_phy_connect(slave_dev, phy_dn,
+                                               dsa_slave_adjust_link,
+                                               phy_flags,
+                                               p->phy_interface);
+               }
+       }
 
        if (p->phy && phy_is_fixed)
                fixed_phy_set_link_update(p->phy, dsa_slave_fixed_link_update);
@@ -558,14 +799,9 @@ static int dsa_slave_phy_setup(struct dsa_slave_priv *p,
         * MDIO bus instead
         */
        if (!p->phy) {
-               p->phy = ds->slave_mii_bus->phy_map[p->port];
-               if (!p->phy)
-                       return -ENODEV;
-
-               /* Use already configured phy mode */
-               p->phy_interface = p->phy->interface;
-               phy_connect_direct(slave_dev, p->phy, dsa_slave_adjust_link,
-                                  p->phy_interface);
+               ret = dsa_slave_phy_connect(p, slave_dev, p->port);
+               if (ret)
+                       return ret;
        } else {
                netdev_info(slave_dev, "attached PHY at address %d [%s]\n",
                            p->phy->addr, p->phy->drv->name);
@@ -605,9 +841,8 @@ int dsa_slave_resume(struct net_device *slave_dev)
        return 0;
 }
 
-struct net_device *
-dsa_slave_create(struct dsa_switch *ds, struct device *parent,
-                int port, char *name)
+int dsa_slave_create(struct dsa_switch *ds, struct device *parent,
+                    int port, char *name)
 {
        struct net_device *master = ds->dst->master_netdev;
        struct net_device *slave_dev;
@@ -617,13 +852,14 @@ dsa_slave_create(struct dsa_switch *ds, struct device *parent,
        slave_dev = alloc_netdev(sizeof(struct dsa_slave_priv), name,
                                 NET_NAME_UNKNOWN, ether_setup);
        if (slave_dev == NULL)
-               return slave_dev;
+               return -ENOMEM;
 
        slave_dev->features = master->vlan_features;
        slave_dev->ethtool_ops = &dsa_slave_ethtool_ops;
        eth_hw_addr_inherit(slave_dev, master);
        slave_dev->tx_queue_len = 0;
        slave_dev->netdev_ops = &dsa_slave_netdev_ops;
+       slave_dev->swdev_ops = &dsa_slave_swdev_ops;
 
        SET_NETDEV_DEV(slave_dev, parent);
        slave_dev->dev.of_node = ds->pd->port_dn[port];
@@ -667,19 +903,64 @@ dsa_slave_create(struct dsa_switch *ds, struct device *parent,
        ret = dsa_slave_phy_setup(p, slave_dev);
        if (ret) {
                free_netdev(slave_dev);
-               return NULL;
+               return ret;
        }
 
+       ds->ports[port] = slave_dev;
        ret = register_netdev(slave_dev);
        if (ret) {
                netdev_err(master, "error %d registering interface %s\n",
                           ret, slave_dev->name);
                phy_disconnect(p->phy);
+               ds->ports[port] = NULL;
                free_netdev(slave_dev);
-               return NULL;
+               return ret;
        }
 
        netif_carrier_off(slave_dev);
 
-       return slave_dev;
+       return 0;
+}
+
+static bool dsa_slave_dev_check(struct net_device *dev)
+{
+       return dev->netdev_ops == &dsa_slave_netdev_ops;
+}
+
+static int dsa_slave_master_changed(struct net_device *dev)
+{
+       struct net_device *master = netdev_master_upper_dev_get(dev);
+       struct dsa_slave_priv *p = netdev_priv(dev);
+       int err = 0;
+
+       if (master && master->rtnl_link_ops &&
+           !strcmp(master->rtnl_link_ops->kind, "bridge"))
+               err = dsa_slave_bridge_port_join(dev, master);
+       else if (dsa_port_is_bridged(p))
+               err = dsa_slave_bridge_port_leave(dev);
+
+       return err;
+}
+
+int dsa_slave_netdevice_event(struct notifier_block *unused,
+                             unsigned long event, void *ptr)
+{
+       struct net_device *dev;
+       int err = 0;
+
+       switch (event) {
+       case NETDEV_CHANGEUPPER:
+               dev = netdev_notifier_info_to_dev(ptr);
+               if (!dsa_slave_dev_check(dev))
+                       goto out;
+
+               err = dsa_slave_master_changed(dev);
+               if (err)
+                       netdev_warn(dev, "failed to reflect master change\n");
+
+               break;
+       }
+
+out:
+       return NOTIFY_DONE;
 }
index 238f38d21641fb16071caacd90f15dcb07815a25..f3bad41d725f449f91d0b1b4f7119a9c9660e976 100644 (file)
@@ -104,7 +104,7 @@ int eth_header(struct sk_buff *skb, struct net_device *dev,
         */
 
        if (dev->flags & (IFF_LOOPBACK | IFF_NOARP)) {
-               memset(eth->h_dest, 0, ETH_ALEN);
+               eth_zero_addr(eth->h_dest);
                return ETH_HLEN;
        }
 
@@ -112,39 +112,6 @@ int eth_header(struct sk_buff *skb, struct net_device *dev,
 }
 EXPORT_SYMBOL(eth_header);
 
-/**
- * eth_rebuild_header- rebuild the Ethernet MAC header.
- * @skb: socket buffer to update
- *
- * This is called after an ARP or IPV6 ndisc it's resolution on this
- * sk_buff. We now let protocol (ARP) fill in the other fields.
- *
- * This routine CANNOT use cached dst->neigh!
- * Really, it is used only when dst->neigh is wrong.
- */
-int eth_rebuild_header(struct sk_buff *skb)
-{
-       struct ethhdr *eth = (struct ethhdr *)skb->data;
-       struct net_device *dev = skb->dev;
-
-       switch (eth->h_proto) {
-#ifdef CONFIG_INET
-       case htons(ETH_P_IP):
-               return arp_find(eth->h_dest, skb);
-#endif
-       default:
-               netdev_dbg(dev,
-                      "%s: unable to resolve type %X addresses.\n",
-                      dev->name, ntohs(eth->h_proto));
-
-               memcpy(eth->h_source, dev->dev_addr, ETH_ALEN);
-               break;
-       }
-
-       return 0;
-}
-EXPORT_SYMBOL(eth_rebuild_header);
-
 /**
  * eth_get_headlen - determine the the length of header for an ethernet frame
  * @data: pointer to start of frame
@@ -369,7 +336,6 @@ EXPORT_SYMBOL(eth_validate_addr);
 const struct header_ops eth_header_ops ____cacheline_aligned = {
        .create         = eth_header,
        .parse          = eth_header_parse,
-       .rebuild        = eth_rebuild_header,
        .cache          = eth_header_cache,
        .cache_update   = eth_header_cache_update,
 };
@@ -391,7 +357,7 @@ void ether_setup(struct net_device *dev)
        dev->flags              = IFF_BROADCAST|IFF_MULTICAST;
        dev->priv_flags         |= IFF_TX_SKB_SHARING;
 
-       memset(dev->broadcast, 0xFF, ETH_ALEN);
+       eth_broadcast_addr(dev->broadcast);
 
 }
 EXPORT_SYMBOL(ether_setup);
index 055fbb71ba6f3fae7ff3740566c97306d97ebe3b..0ae5822ef944fb0e5c74b22de0fe8d426032135c 100644 (file)
@@ -113,7 +113,7 @@ static void lowpan_setup(struct net_device *dev)
 {
        dev->addr_len           = IEEE802154_ADDR_LEN;
        memset(dev->broadcast, 0xff, IEEE802154_ADDR_LEN);
-       dev->type               = ARPHRD_IEEE802154;
+       dev->type               = ARPHRD_6LOWPAN;
        /* Frame Control + Sequence Number + Address fields + Security Header */
        dev->hard_header_len    = 2 + 1 + 20 + 14;
        dev->needed_tailroom    = 2; /* FCS */
@@ -126,6 +126,7 @@ static void lowpan_setup(struct net_device *dev)
        dev->header_ops         = &lowpan_header_ops;
        dev->ml_priv            = &lowpan_mlme;
        dev->destructor         = free_netdev;
+       dev->features           |= NETIF_F_NETNS_LOCAL;
 }
 
 static int lowpan_validate(struct nlattr *tb[], struct nlattr *data[])
@@ -148,10 +149,11 @@ static int lowpan_newlink(struct net *src_net, struct net_device *dev,
 
        pr_debug("adding new link\n");
 
-       if (!tb[IFLA_LINK])
+       if (!tb[IFLA_LINK] ||
+           !net_eq(dev_net(dev), &init_net))
                return -EINVAL;
        /* find and hold real wpan device */
-       real_dev = dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK]));
+       real_dev = dev_get_by_index(dev_net(dev), nla_get_u32(tb[IFLA_LINK]));
        if (!real_dev)
                return -ENODEV;
        if (real_dev->type != ARPHRD_IEEE802154) {
index 18bc7e7385074c4b62e5e7c87c1d2bba3981e7a4..2ee00e8a03082aaebce7f017a5ca1e7219c6aade 100644 (file)
@@ -25,6 +25,9 @@
 #include "sysfs.h"
 #include "core.h"
 
+/* name for sysfs, %d is appended */
+#define PHY_NAME "phy"
+
 /* RCU-protected (and RTNL for writers) */
 LIST_HEAD(cfg802154_rdev_list);
 int cfg802154_rdev_list_generation;
@@ -122,7 +125,7 @@ wpan_phy_new(const struct cfg802154_ops *ops, size_t priv_size)
 
        INIT_LIST_HEAD(&rdev->wpan_dev_list);
        device_initialize(&rdev->wpan_phy.dev);
-       dev_set_name(&rdev->wpan_phy.dev, "wpan-phy%d", rdev->wpan_phy_idx);
+       dev_set_name(&rdev->wpan_phy.dev, PHY_NAME "%d", rdev->wpan_phy_idx);
 
        rdev->wpan_phy.dev.class = &wpan_phy_class;
        rdev->wpan_phy.dev.platform_data = rdev;
@@ -225,6 +228,7 @@ static int cfg802154_netdev_notifier_call(struct notifier_block *nb,
        switch (state) {
                /* TODO NETDEV_DEVTYPE */
        case NETDEV_REGISTER:
+               dev->features |= NETIF_F_NETNS_LOCAL;
                wpan_dev->identifier = ++rdev->wpan_dev_id;
                list_add_rcu(&wpan_dev->list, &rdev->wpan_dev_list);
                rdev->devlist_generation++;
index 9105265920fe735db77650607bd3fe89d143869a..2b4955d7aae54b9b4e647e6b7de9b8fa7ef68c42 100644 (file)
@@ -76,7 +76,6 @@ nla_put_failure:
        nlmsg_free(msg);
        return -ENOBUFS;
 }
-EXPORT_SYMBOL(ieee802154_nl_start_confirm);
 
 static int ieee802154_nl_fill_iface(struct sk_buff *msg, u32 portid,
                                    u32 seq, int flags, struct net_device *dev)
index 2878d8ca6d3bf12c8cb0c382b3e9db68911510f8..b60c65f70346a48623209dc93fb06273d445fb7b 100644 (file)
@@ -98,12 +98,12 @@ static int ieee802154_sock_release(struct socket *sock)
        return 0;
 }
 
-static int ieee802154_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
-                                  struct msghdr *msg, size_t len)
+static int ieee802154_sock_sendmsg(struct socket *sock, struct msghdr *msg,
+                                  size_t len)
 {
        struct sock *sk = sock->sk;
 
-       return sk->sk_prot->sendmsg(iocb, sk, msg, len);
+       return sk->sk_prot->sendmsg(sk, msg, len);
 }
 
 static int ieee802154_sock_bind(struct socket *sock, struct sockaddr *uaddr,
@@ -255,8 +255,7 @@ static int raw_disconnect(struct sock *sk, int flags)
        return 0;
 }
 
-static int raw_sendmsg(struct kiocb *iocb, struct sock *sk,
-                      struct msghdr *msg, size_t size)
+static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
 {
        struct net_device *dev;
        unsigned int mtu;
@@ -327,8 +326,8 @@ out:
        return err;
 }
 
-static int raw_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
-                      size_t len, int noblock, int flags, int *addr_len)
+static int raw_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
+                      int noblock, int flags, int *addr_len)
 {
        size_t copied = 0;
        int err = -EOPNOTSUPP;
@@ -615,8 +614,7 @@ static int dgram_disconnect(struct sock *sk, int flags)
        return 0;
 }
 
-static int dgram_sendmsg(struct kiocb *iocb, struct sock *sk,
-                        struct msghdr *msg, size_t size)
+static int dgram_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
 {
        struct net_device *dev;
        unsigned int mtu;
@@ -715,9 +713,8 @@ out:
        return err;
 }
 
-static int dgram_recvmsg(struct kiocb *iocb, struct sock *sk,
-                        struct msghdr *msg, size_t len, int noblock,
-                        int flags, int *addr_len)
+static int dgram_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
+                        int noblock, int flags, int *addr_len)
 {
        size_t copied = 0;
        int err = -EOPNOTSUPP;
index dff55c2d87f34fb8304f7b19d321bc3a08813ee7..133b4280660cfc2f9b651a56a95502991e21840b 100644 (file)
@@ -48,49 +48,6 @@ static ssize_t name_show(struct device *dev,
 }
 static DEVICE_ATTR_RO(name);
 
-#define MASTER_SHOW_COMPLEX(name, format_string, args...)              \
-static ssize_t name ## _show(struct device *dev,                       \
-                           struct device_attribute *attr, char *buf)   \
-{                                                                      \
-       struct wpan_phy *phy = container_of(dev, struct wpan_phy, dev); \
-       int ret;                                                        \
-                                                                       \
-       mutex_lock(&phy->pib_lock);                                     \
-       ret = snprintf(buf, PAGE_SIZE, format_string "\n", args);       \
-       mutex_unlock(&phy->pib_lock);                                   \
-       return ret;                                                     \
-}                                                                      \
-static DEVICE_ATTR_RO(name)
-
-#define MASTER_SHOW(field, format_string)                              \
-       MASTER_SHOW_COMPLEX(field, format_string, phy->field)
-
-MASTER_SHOW(current_channel, "%d");
-MASTER_SHOW(current_page, "%d");
-MASTER_SHOW(transmit_power, "%d +- 1 dB");
-MASTER_SHOW_COMPLEX(cca_mode, "%d", phy->cca.mode);
-
-static ssize_t channels_supported_show(struct device *dev,
-                                      struct device_attribute *attr,
-                                      char *buf)
-{
-       struct wpan_phy *phy = container_of(dev, struct wpan_phy, dev);
-       int ret;
-       int i, len = 0;
-
-       mutex_lock(&phy->pib_lock);
-       for (i = 0; i < 32; i++) {
-               ret = snprintf(buf + len, PAGE_SIZE - len,
-                              "%#09x\n", phy->channels_supported[i]);
-               if (ret < 0)
-                       break;
-               len += ret;
-       }
-       mutex_unlock(&phy->pib_lock);
-       return len;
-}
-static DEVICE_ATTR_RO(channels_supported);
-
 static void wpan_phy_release(struct device *dev)
 {
        struct cfg802154_registered_device *rdev = dev_to_rdev(dev);
@@ -101,12 +58,6 @@ static void wpan_phy_release(struct device *dev)
 static struct attribute *pmib_attrs[] = {
        &dev_attr_index.attr,
        &dev_attr_name.attr,
-       /* below will be removed soon */
-       &dev_attr_current_channel.attr,
-       &dev_attr_current_page.attr,
-       &dev_attr_channels_supported.attr,
-       &dev_attr_transmit_power.attr,
-       &dev_attr_cca_mode.attr,
        NULL,
 };
 ATTRIBUTE_GROUPS(pmib);
index d2e49baaff63420320486d310f5c8f7d0d54bcc2..8b47a4d79d040e39e592d3583affb7fec2d19f3d 100644 (file)
@@ -217,7 +217,7 @@ int inet_listen(struct socket *sock, int backlog)
                 * shutdown() (rather than close()).
                 */
                if ((sysctl_tcp_fastopen & TFO_SERVER_ENABLE) != 0 &&
-                   inet_csk(sk)->icsk_accept_queue.fastopenq == NULL) {
+                   !inet_csk(sk)->icsk_accept_queue.fastopenq) {
                        if ((sysctl_tcp_fastopen & TFO_SERVER_WO_SOCKOPT1) != 0)
                                err = fastopen_init_queue(sk, backlog);
                        else if ((sysctl_tcp_fastopen &
@@ -314,11 +314,11 @@ lookup_protocol:
        answer_flags = answer->flags;
        rcu_read_unlock();
 
-       WARN_ON(answer_prot->slab == NULL);
+       WARN_ON(!answer_prot->slab);
 
        err = -ENOBUFS;
        sk = sk_alloc(net, PF_INET, GFP_KERNEL, answer_prot);
-       if (sk == NULL)
+       if (!sk)
                goto out;
 
        err = 0;
@@ -716,8 +716,7 @@ int inet_getname(struct socket *sock, struct sockaddr *uaddr,
 }
 EXPORT_SYMBOL(inet_getname);
 
-int inet_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
-                size_t size)
+int inet_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
 {
        struct sock *sk = sock->sk;
 
@@ -728,7 +727,7 @@ int inet_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
            inet_autobind(sk))
                return -EAGAIN;
 
-       return sk->sk_prot->sendmsg(iocb, sk, msg, size);
+       return sk->sk_prot->sendmsg(sk, msg, size);
 }
 EXPORT_SYMBOL(inet_sendmsg);
 
@@ -750,8 +749,8 @@ ssize_t inet_sendpage(struct socket *sock, struct page *page, int offset,
 }
 EXPORT_SYMBOL(inet_sendpage);
 
-int inet_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
-                size_t size, int flags)
+int inet_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
+                int flags)
 {
        struct sock *sk = sock->sk;
        int addr_len = 0;
@@ -759,7 +758,7 @@ int inet_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
 
        sock_rps_record_flow(sk);
 
-       err = sk->sk_prot->recvmsg(iocb, sk, msg, size, flags & MSG_DONTWAIT,
+       err = sk->sk_prot->recvmsg(sk, msg, size, flags & MSG_DONTWAIT,
                                   flags & ~MSG_DONTWAIT, &addr_len);
        if (err >= 0)
                msg->msg_namelen = addr_len;
@@ -1270,7 +1269,7 @@ static struct sk_buff *inet_gso_segment(struct sk_buff *skb,
                if (udpfrag) {
                        iph->id = htons(id);
                        iph->frag_off = htons(offset >> 3);
-                       if (skb->next != NULL)
+                       if (skb->next)
                                iph->frag_off |= htons(IP_MF);
                        offset += skb->len - nhoff - ihl;
                } else {
@@ -1675,7 +1674,7 @@ static int __init inet_init(void)
        struct list_head *r;
        int rc = -EINVAL;
 
-       BUILD_BUG_ON(sizeof(struct inet_skb_parm) > FIELD_SIZEOF(struct sk_buff, cb));
+       sock_skb_cb_check_size(sizeof(struct inet_skb_parm));
 
        rc = proto_register(&tcp_prot, 1);
        if (rc)
index 205e1472aa7819784091d588818e1ab602f6006f..c6e67aa46c32aa78eb7fe9172d90628112e61739 100644 (file)
  *     Interface to generic neighbour cache.
  */
 static u32 arp_hash(const void *pkey, const struct net_device *dev, __u32 *hash_rnd);
+static bool arp_key_eq(const struct neighbour *n, const void *pkey);
 static int arp_constructor(struct neighbour *neigh);
 static void arp_solicit(struct neighbour *neigh, struct sk_buff *skb);
 static void arp_error_report(struct neighbour *neigh, struct sk_buff *skb);
@@ -149,18 +150,12 @@ static const struct neigh_ops arp_direct_ops = {
        .connected_output =     neigh_direct_output,
 };
 
-static const struct neigh_ops arp_broken_ops = {
-       .family =               AF_INET,
-       .solicit =              arp_solicit,
-       .error_report =         arp_error_report,
-       .output =               neigh_compat_output,
-       .connected_output =     neigh_compat_output,
-};
-
 struct neigh_table arp_tbl = {
        .family         = AF_INET,
        .key_len        = 4,
+       .protocol       = cpu_to_be16(ETH_P_IP),
        .hash           = arp_hash,
+       .key_eq         = arp_key_eq,
        .constructor    = arp_constructor,
        .proxy_redo     = parp_redo,
        .id             = "arp_cache",
@@ -216,7 +211,12 @@ static u32 arp_hash(const void *pkey,
                    const struct net_device *dev,
                    __u32 *hash_rnd)
 {
-       return arp_hashfn(*(u32 *)pkey, dev, *hash_rnd);
+       return arp_hashfn(pkey, dev, hash_rnd);
+}
+
+static bool arp_key_eq(const struct neighbour *neigh, const void *pkey)
+{
+       return neigh_key_eq32(neigh, pkey);
 }
 
 static int arp_constructor(struct neighbour *neigh)
@@ -228,7 +228,7 @@ static int arp_constructor(struct neighbour *neigh)
 
        rcu_read_lock();
        in_dev = __in_dev_get_rcu(dev);
-       if (in_dev == NULL) {
+       if (!in_dev) {
                rcu_read_unlock();
                return -EINVAL;
        }
@@ -260,35 +260,6 @@ static int arp_constructor(struct neighbour *neigh)
                   in old paradigm.
                 */
 
-#if 1
-               /* So... these "amateur" devices are hopeless.
-                  The only thing, that I can say now:
-                  It is very sad that we need to keep ugly obsolete
-                  code to make them happy.
-
-                  They should be moved to more reasonable state, now
-                  they use rebuild_header INSTEAD OF hard_start_xmit!!!
-                  Besides that, they are sort of out of date
-                  (a lot of redundant clones/copies, useless in 2.1),
-                  I wonder why people believe that they work.
-                */
-               switch (dev->type) {
-               default:
-                       break;
-               case ARPHRD_ROSE:
-#if IS_ENABLED(CONFIG_AX25)
-               case ARPHRD_AX25:
-#if IS_ENABLED(CONFIG_NETROM)
-               case ARPHRD_NETROM:
-#endif
-                       neigh->ops = &arp_broken_ops;
-                       neigh->output = neigh->ops->output;
-                       return 0;
-#else
-                       break;
-#endif
-               }
-#endif
                if (neigh->type == RTN_MULTICAST) {
                        neigh->nud_state = NUD_NOARP;
                        arp_mc_map(addr, neigh->ha, dev, 1);
@@ -433,71 +404,6 @@ static int arp_filter(__be32 sip, __be32 tip, struct net_device *dev)
        return flag;
 }
 
-/* OBSOLETE FUNCTIONS */
-
-/*
- *     Find an arp mapping in the cache. If not found, post a request.
- *
- *     It is very UGLY routine: it DOES NOT use skb->dst->neighbour,
- *     even if it exists. It is supposed that skb->dev was mangled
- *     by a virtual device (eql, shaper). Nobody but broken devices
- *     is allowed to use this function, it is scheduled to be removed. --ANK
- */
-
-static int arp_set_predefined(int addr_hint, unsigned char *haddr,
-                             __be32 paddr, struct net_device *dev)
-{
-       switch (addr_hint) {
-       case RTN_LOCAL:
-               pr_debug("arp called for own IP address\n");
-               memcpy(haddr, dev->dev_addr, dev->addr_len);
-               return 1;
-       case RTN_MULTICAST:
-               arp_mc_map(paddr, haddr, dev, 1);
-               return 1;
-       case RTN_BROADCAST:
-               memcpy(haddr, dev->broadcast, dev->addr_len);
-               return 1;
-       }
-       return 0;
-}
-
-
-int arp_find(unsigned char *haddr, struct sk_buff *skb)
-{
-       struct net_device *dev = skb->dev;
-       __be32 paddr;
-       struct neighbour *n;
-
-       if (!skb_dst(skb)) {
-               pr_debug("arp_find is called with dst==NULL\n");
-               kfree_skb(skb);
-               return 1;
-       }
-
-       paddr = rt_nexthop(skb_rtable(skb), ip_hdr(skb)->daddr);
-       if (arp_set_predefined(inet_addr_type(dev_net(dev), paddr), haddr,
-                              paddr, dev))
-               return 0;
-
-       n = __neigh_lookup(&arp_tbl, &paddr, dev, 1);
-
-       if (n) {
-               n->used = jiffies;
-               if (n->nud_state & NUD_VALID || neigh_event_send(n, skb) == 0) {
-                       neigh_ha_snapshot(haddr, n, dev);
-                       neigh_release(n);
-                       return 0;
-               }
-               neigh_release(n);
-       } else
-               kfree_skb(skb);
-       return 1;
-}
-EXPORT_SYMBOL(arp_find);
-
-/* END OF OBSOLETE FUNCTIONS */
-
 /*
  * Check if we can use proxy ARP for this path
  */
@@ -569,7 +475,7 @@ static inline int arp_fwd_pvlan(struct in_device *in_dev,
  */
 
 /*
- *     Create an arp packet. If (dest_hw == NULL), we create a broadcast
+ *     Create an arp packet. If dest_hw is not set, we create a broadcast
  *     message.
  */
 struct sk_buff *arp_create(int type, int ptype, __be32 dest_ip,
@@ -589,7 +495,7 @@ struct sk_buff *arp_create(int type, int ptype, __be32 dest_ip,
         */
 
        skb = alloc_skb(arp_hdr_len(dev) + hlen + tlen, GFP_ATOMIC);
-       if (skb == NULL)
+       if (!skb)
                return NULL;
 
        skb_reserve(skb, hlen);
@@ -597,9 +503,9 @@ struct sk_buff *arp_create(int type, int ptype, __be32 dest_ip,
        arp = (struct arphdr *) skb_put(skb, arp_hdr_len(dev));
        skb->dev = dev;
        skb->protocol = htons(ETH_P_ARP);
-       if (src_hw == NULL)
+       if (!src_hw)
                src_hw = dev->dev_addr;
-       if (dest_hw == NULL)
+       if (!dest_hw)
                dest_hw = dev->broadcast;
 
        /*
@@ -663,7 +569,7 @@ struct sk_buff *arp_create(int type, int ptype, __be32 dest_ip,
                break;
 #endif
        default:
-               if (target_hw != NULL)
+               if (target_hw)
                        memcpy(arp_ptr, target_hw, dev->addr_len);
                else
                        memset(arp_ptr, 0, dev->addr_len);
@@ -708,7 +614,7 @@ void arp_send(int type, int ptype, __be32 dest_ip,
 
        skb = arp_create(type, ptype, dest_ip, dev, src_ip,
                         dest_hw, src_hw, target_hw);
-       if (skb == NULL)
+       if (!skb)
                return;
 
        arp_xmit(skb);
@@ -738,7 +644,7 @@ static int arp_process(struct sk_buff *skb)
         * is ARP'able.
         */
 
-       if (in_dev == NULL)
+       if (!in_dev)
                goto out;
 
        arp = arp_hdr(skb);
@@ -902,7 +808,7 @@ static int arp_process(struct sk_buff *skb)
                is_garp = arp->ar_op == htons(ARPOP_REQUEST) && tip == sip &&
                          inet_addr_type(net, sip) == RTN_UNICAST;
 
-               if (n == NULL &&
+               if (!n &&
                    ((arp->ar_op == htons(ARPOP_REPLY)  &&
                      inet_addr_type(net, sip) == RTN_UNICAST) || is_garp))
                        n = __neigh_lookup(&arp_tbl, &sip, dev, 1);
@@ -994,7 +900,7 @@ out_of_mem:
 
 static int arp_req_set_proxy(struct net *net, struct net_device *dev, int on)
 {
-       if (dev == NULL) {
+       if (!dev) {
                IPV4_DEVCONF_ALL(net, PROXY_ARP) = on;
                return 0;
        }
@@ -1020,7 +926,7 @@ static int arp_req_set_public(struct net *net, struct arpreq *r,
                        return -ENODEV;
        }
        if (mask) {
-               if (pneigh_lookup(&arp_tbl, net, &ip, dev, 1) == NULL)
+               if (!pneigh_lookup(&arp_tbl, net, &ip, dev, 1))
                        return -ENOBUFS;
                return 0;
        }
@@ -1041,7 +947,7 @@ static int arp_req_set(struct net *net, struct arpreq *r,
        ip = ((struct sockaddr_in *)&r->arp_pa)->sin_addr.s_addr;
        if (r->arp_flags & ATF_PERM)
                r->arp_flags |= ATF_COM;
-       if (dev == NULL) {
+       if (!dev) {
                struct rtable *rt = ip_route_output(net, ip, 0, RTO_ONLINK, 0);
 
                if (IS_ERR(rt))
@@ -1161,7 +1067,7 @@ static int arp_req_delete(struct net *net, struct arpreq *r,
                return arp_req_delete_public(net, r, dev);
 
        ip = ((struct sockaddr_in *)&r->arp_pa)->sin_addr.s_addr;
-       if (dev == NULL) {
+       if (!dev) {
                struct rtable *rt = ip_route_output(net, ip, 0, RTO_ONLINK, 0);
                if (IS_ERR(rt))
                        return PTR_ERR(rt);
@@ -1210,7 +1116,7 @@ int arp_ioctl(struct net *net, unsigned int cmd, void __user *arg)
        if (r.arp_dev[0]) {
                err = -ENODEV;
                dev = __dev_get_by_name(net, r.arp_dev);
-               if (dev == NULL)
+               if (!dev)
                        goto out;
 
                /* Mmmm... It is wrong... ARPHRD_NETROM==0 */
index e361ea6f3fc8ce0d2e0814109079037a26573d1d..bdb2a07ec363b709197435ac602b74377a600780 100644 (file)
@@ -255,7 +255,7 @@ static int __init cipso_v4_cache_init(void)
        cipso_v4_cache = kcalloc(CIPSO_V4_CACHE_BUCKETS,
                                 sizeof(struct cipso_v4_map_cache_bkt),
                                 GFP_KERNEL);
-       if (cipso_v4_cache == NULL)
+       if (!cipso_v4_cache)
                return -ENOMEM;
 
        for (iter = 0; iter < CIPSO_V4_CACHE_BUCKETS; iter++) {
@@ -339,7 +339,7 @@ static int cipso_v4_cache_check(const unsigned char *key,
                        secattr->cache = entry->lsm_data;
                        secattr->flags |= NETLBL_SECATTR_CACHE;
                        secattr->type = NETLBL_NLTYPE_CIPSOV4;
-                       if (prev_entry == NULL) {
+                       if (!prev_entry) {
                                spin_unlock_bh(&cipso_v4_cache[bkt].lock);
                                return 0;
                        }
@@ -393,10 +393,10 @@ int cipso_v4_cache_add(const unsigned char *cipso_ptr,
        cipso_ptr_len = cipso_ptr[1];
 
        entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
-       if (entry == NULL)
+       if (!entry)
                return -ENOMEM;
        entry->key = kmemdup(cipso_ptr, cipso_ptr_len, GFP_ATOMIC);
-       if (entry->key == NULL) {
+       if (!entry->key) {
                ret_val = -ENOMEM;
                goto cache_add_failure;
        }
@@ -502,7 +502,7 @@ int cipso_v4_doi_add(struct cipso_v4_doi *doi_def,
        atomic_set(&doi_def->refcount, 1);
 
        spin_lock(&cipso_v4_doi_list_lock);
-       if (cipso_v4_doi_search(doi_def->doi) != NULL) {
+       if (cipso_v4_doi_search(doi_def->doi)) {
                spin_unlock(&cipso_v4_doi_list_lock);
                ret_val = -EEXIST;
                goto doi_add_return;
@@ -513,7 +513,7 @@ int cipso_v4_doi_add(struct cipso_v4_doi *doi_def,
 
 doi_add_return:
        audit_buf = netlbl_audit_start(AUDIT_MAC_CIPSOV4_ADD, audit_info);
-       if (audit_buf != NULL) {
+       if (audit_buf) {
                const char *type_str;
                switch (doi_type) {
                case CIPSO_V4_MAP_TRANS:
@@ -547,7 +547,7 @@ doi_add_return:
  */
 void cipso_v4_doi_free(struct cipso_v4_doi *doi_def)
 {
-       if (doi_def == NULL)
+       if (!doi_def)
                return;
 
        switch (doi_def->type) {
@@ -598,7 +598,7 @@ int cipso_v4_doi_remove(u32 doi, struct netlbl_audit *audit_info)
 
        spin_lock(&cipso_v4_doi_list_lock);
        doi_def = cipso_v4_doi_search(doi);
-       if (doi_def == NULL) {
+       if (!doi_def) {
                spin_unlock(&cipso_v4_doi_list_lock);
                ret_val = -ENOENT;
                goto doi_remove_return;
@@ -617,7 +617,7 @@ int cipso_v4_doi_remove(u32 doi, struct netlbl_audit *audit_info)
 
 doi_remove_return:
        audit_buf = netlbl_audit_start(AUDIT_MAC_CIPSOV4_DEL, audit_info);
-       if (audit_buf != NULL) {
+       if (audit_buf) {
                audit_log_format(audit_buf,
                                 " cipso_doi=%u res=%u",
                                 doi, ret_val == 0 ? 1 : 0);
@@ -644,7 +644,7 @@ struct cipso_v4_doi *cipso_v4_doi_getdef(u32 doi)
 
        rcu_read_lock();
        doi_def = cipso_v4_doi_search(doi);
-       if (doi_def == NULL)
+       if (!doi_def)
                goto doi_getdef_return;
        if (!atomic_inc_not_zero(&doi_def->refcount))
                doi_def = NULL;
@@ -664,7 +664,7 @@ doi_getdef_return:
  */
 void cipso_v4_doi_putdef(struct cipso_v4_doi *doi_def)
 {
-       if (doi_def == NULL)
+       if (!doi_def)
                return;
 
        if (!atomic_dec_and_test(&doi_def->refcount))
@@ -1642,7 +1642,7 @@ int cipso_v4_validate(const struct sk_buff *skb, unsigned char **option)
 
        rcu_read_lock();
        doi_def = cipso_v4_doi_search(get_unaligned_be32(&opt[2]));
-       if (doi_def == NULL) {
+       if (!doi_def) {
                err_offset = 2;
                goto validate_return_locked;
        }
@@ -1736,7 +1736,7 @@ int cipso_v4_validate(const struct sk_buff *skb, unsigned char **option)
                         * not the loopback device drop the packet. Further,
                         * there is no legitimate reason for setting this from
                         * userspace so reject it if skb is NULL. */
-                       if (skb == NULL || !(skb->dev->flags & IFF_LOOPBACK)) {
+                       if (!skb || !(skb->dev->flags & IFF_LOOPBACK)) {
                                err_offset = opt_iter;
                                goto validate_return_locked;
                        }
@@ -1897,7 +1897,7 @@ int cipso_v4_sock_setattr(struct sock *sk,
         * defined yet but it is not a problem as the only users of these
         * "lite" PF_INET sockets are functions which do an accept() call
         * afterwards so we will label the socket as part of the accept(). */
-       if (sk == NULL)
+       if (!sk)
                return 0;
 
        /* We allocate the maximum CIPSO option size here so we are probably
@@ -1905,7 +1905,7 @@ int cipso_v4_sock_setattr(struct sock *sk,
         * on and after all we are only talking about 40 bytes. */
        buf_len = CIPSO_V4_OPT_LEN_MAX;
        buf = kmalloc(buf_len, GFP_ATOMIC);
-       if (buf == NULL) {
+       if (!buf) {
                ret_val = -ENOMEM;
                goto socket_setattr_failure;
        }
@@ -1921,7 +1921,7 @@ int cipso_v4_sock_setattr(struct sock *sk,
         * set the IPOPT_CIPSO option. */
        opt_len = (buf_len + 3) & ~3;
        opt = kzalloc(sizeof(*opt) + opt_len, GFP_ATOMIC);
-       if (opt == NULL) {
+       if (!opt) {
                ret_val = -ENOMEM;
                goto socket_setattr_failure;
        }
@@ -1981,7 +1981,7 @@ int cipso_v4_req_setattr(struct request_sock *req,
         * on and after all we are only talking about 40 bytes. */
        buf_len = CIPSO_V4_OPT_LEN_MAX;
        buf = kmalloc(buf_len, GFP_ATOMIC);
-       if (buf == NULL) {
+       if (!buf) {
                ret_val = -ENOMEM;
                goto req_setattr_failure;
        }
@@ -1997,7 +1997,7 @@ int cipso_v4_req_setattr(struct request_sock *req,
         * set the IPOPT_CIPSO option. */
        opt_len = (buf_len + 3) & ~3;
        opt = kzalloc(sizeof(*opt) + opt_len, GFP_ATOMIC);
-       if (opt == NULL) {
+       if (!opt) {
                ret_val = -ENOMEM;
                goto req_setattr_failure;
        }
@@ -2102,7 +2102,7 @@ void cipso_v4_sock_delattr(struct sock *sk)
 
        sk_inet = inet_sk(sk);
        opt = rcu_dereference_protected(sk_inet->inet_opt, 1);
-       if (opt == NULL || opt->opt.cipso == 0)
+       if (!opt || opt->opt.cipso == 0)
                return;
 
        hdr_delta = cipso_v4_delopt(&sk_inet->inet_opt);
@@ -2128,7 +2128,7 @@ void cipso_v4_req_delattr(struct request_sock *req)
 
        req_inet = inet_rsk(req);
        opt = req_inet->opt;
-       if (opt == NULL || opt->opt.cipso == 0)
+       if (!opt || opt->opt.cipso == 0)
                return;
 
        cipso_v4_delopt(&req_inet->opt);
@@ -2157,7 +2157,7 @@ int cipso_v4_getattr(const unsigned char *cipso,
        doi = get_unaligned_be32(&cipso[2]);
        rcu_read_lock();
        doi_def = cipso_v4_doi_search(doi);
-       if (doi_def == NULL)
+       if (!doi_def)
                goto getattr_return;
        /* XXX - This code assumes only one tag per CIPSO option which isn't
         * really a good assumption to make but since we only support the MAC
index 3a8985c94581823b1dc8c81279cc1c1fe59d999b..419d23c53ec756327178f9101ea8287d671c9a47 100644 (file)
@@ -107,7 +107,7 @@ static const struct nla_policy ifa_ipv4_policy[IFA_MAX+1] = {
 
 static struct hlist_head inet_addr_lst[IN4_ADDR_HSIZE];
 
-static u32 inet_addr_hash(struct net *net, __be32 addr)
+static u32 inet_addr_hash(const struct net *net, __be32 addr)
 {
        u32 val = (__force u32) addr ^ net_hash_mix(net);
 
@@ -548,6 +548,26 @@ struct in_ifaddr *inet_ifa_byprefix(struct in_device *in_dev, __be32 prefix,
        return NULL;
 }
 
+static int ip_mc_config(struct sock *sk, bool join, const struct in_ifaddr *ifa)
+{
+       struct ip_mreqn mreq = {
+               .imr_multiaddr.s_addr = ifa->ifa_address,
+               .imr_ifindex = ifa->ifa_dev->dev->ifindex,
+       };
+       int ret;
+
+       ASSERT_RTNL();
+
+       lock_sock(sk);
+       if (join)
+               ret = ip_mc_join_group(sk, &mreq);
+       else
+               ret = ip_mc_leave_group(sk, &mreq);
+       release_sock(sk);
+
+       return ret;
+}
+
 static int inet_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh)
 {
        struct net *net = sock_net(skb->sk);
@@ -565,7 +585,7 @@ static int inet_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh)
 
        ifm = nlmsg_data(nlh);
        in_dev = inetdev_by_index(net, ifm->ifa_index);
-       if (in_dev == NULL) {
+       if (!in_dev) {
                err = -ENODEV;
                goto errout;
        }
@@ -573,7 +593,7 @@ static int inet_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh)
        for (ifap = &in_dev->ifa_list; (ifa = *ifap) != NULL;
             ifap = &ifa->ifa_next) {
                if (tb[IFA_LOCAL] &&
-                   ifa->ifa_local != nla_get_be32(tb[IFA_LOCAL]))
+                   ifa->ifa_local != nla_get_in_addr(tb[IFA_LOCAL]))
                        continue;
 
                if (tb[IFA_LABEL] && nla_strcmp(tb[IFA_LABEL], ifa->ifa_label))
@@ -581,9 +601,11 @@ static int inet_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh)
 
                if (tb[IFA_ADDRESS] &&
                    (ifm->ifa_prefixlen != ifa->ifa_prefixlen ||
-                   !inet_ifa_match(nla_get_be32(tb[IFA_ADDRESS]), ifa)))
+                   !inet_ifa_match(nla_get_in_addr(tb[IFA_ADDRESS]), ifa)))
                        continue;
 
+               if (ipv4_is_multicast(ifa->ifa_address))
+                       ip_mc_config(net->ipv4.mc_autojoin_sk, false, ifa);
                __inet_del_ifa(in_dev, ifap, 1, nlh, NETLINK_CB(skb).portid);
                return 0;
        }
@@ -733,21 +755,21 @@ static struct in_ifaddr *rtm_to_ifaddr(struct net *net, struct nlmsghdr *nlh,
 
        ifm = nlmsg_data(nlh);
        err = -EINVAL;
-       if (ifm->ifa_prefixlen > 32 || tb[IFA_LOCAL] == NULL)
+       if (ifm->ifa_prefixlen > 32 || !tb[IFA_LOCAL])
                goto errout;
 
        dev = __dev_get_by_index(net, ifm->ifa_index);
        err = -ENODEV;
-       if (dev == NULL)
+       if (!dev)
                goto errout;
 
        in_dev = __in_dev_get_rtnl(dev);
        err = -ENOBUFS;
-       if (in_dev == NULL)
+       if (!in_dev)
                goto errout;
 
        ifa = inet_alloc_ifa();
-       if (ifa == NULL)
+       if (!ifa)
                /*
                 * A potential indev allocation can be left alive, it stays
                 * assigned to its device and is destroy with it.
@@ -758,7 +780,7 @@ static struct in_ifaddr *rtm_to_ifaddr(struct net *net, struct nlmsghdr *nlh,
        neigh_parms_data_state_setall(in_dev->arp_parms);
        in_dev_hold(in_dev);
 
-       if (tb[IFA_ADDRESS] == NULL)
+       if (!tb[IFA_ADDRESS])
                tb[IFA_ADDRESS] = tb[IFA_LOCAL];
 
        INIT_HLIST_NODE(&ifa->hash);
@@ -769,11 +791,11 @@ static struct in_ifaddr *rtm_to_ifaddr(struct net *net, struct nlmsghdr *nlh,
        ifa->ifa_scope = ifm->ifa_scope;
        ifa->ifa_dev = in_dev;
 
-       ifa->ifa_local = nla_get_be32(tb[IFA_LOCAL]);
-       ifa->ifa_address = nla_get_be32(tb[IFA_ADDRESS]);
+       ifa->ifa_local = nla_get_in_addr(tb[IFA_LOCAL]);
+       ifa->ifa_address = nla_get_in_addr(tb[IFA_ADDRESS]);
 
        if (tb[IFA_BROADCAST])
-               ifa->ifa_broadcast = nla_get_be32(tb[IFA_BROADCAST]);
+               ifa->ifa_broadcast = nla_get_in_addr(tb[IFA_BROADCAST]);
 
        if (tb[IFA_LABEL])
                nla_strlcpy(ifa->ifa_label, tb[IFA_LABEL], IFNAMSIZ);
@@ -838,6 +860,15 @@ static int inet_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh)
                 * userspace already relies on not having to provide this.
                 */
                set_ifa_lifetime(ifa, valid_lft, prefered_lft);
+               if (ifa->ifa_flags & IFA_F_MCAUTOJOIN) {
+                       int ret = ip_mc_config(net->ipv4.mc_autojoin_sk,
+                                              true, ifa);
+
+                       if (ret < 0) {
+                               inet_free_ifa(ifa);
+                               return ret;
+                       }
+               }
                return __inet_insert_ifa(ifa, nlh, NETLINK_CB(skb).portid);
        } else {
                inet_free_ifa(ifa);
@@ -1259,7 +1290,7 @@ __be32 inet_confirm_addr(struct net *net, struct in_device *in_dev,
        __be32 addr = 0;
        struct net_device *dev;
 
-       if (in_dev != NULL)
+       if (in_dev)
                return confirm_addr_indev(in_dev, dst, local, scope);
 
        rcu_read_lock();
@@ -1309,7 +1340,7 @@ static void inetdev_changename(struct net_device *dev, struct in_device *in_dev)
                if (named++ == 0)
                        goto skip;
                dot = strchr(old, ':');
-               if (dot == NULL) {
+               if (!dot) {
                        sprintf(old, ":%d", named);
                        dot = old;
                }
@@ -1478,7 +1509,7 @@ static int inet_fill_ifaddr(struct sk_buff *skb, struct in_ifaddr *ifa,
        u32 preferred, valid;
 
        nlh = nlmsg_put(skb, portid, seq, event, sizeof(*ifm), flags);
-       if (nlh == NULL)
+       if (!nlh)
                return -EMSGSIZE;
 
        ifm = nlmsg_data(nlh);
@@ -1510,11 +1541,11 @@ static int inet_fill_ifaddr(struct sk_buff *skb, struct in_ifaddr *ifa,
                valid = INFINITY_LIFE_TIME;
        }
        if ((ifa->ifa_address &&
-            nla_put_be32(skb, IFA_ADDRESS, ifa->ifa_address)) ||
+            nla_put_in_addr(skb, IFA_ADDRESS, ifa->ifa_address)) ||
            (ifa->ifa_local &&
-            nla_put_be32(skb, IFA_LOCAL, ifa->ifa_local)) ||
+            nla_put_in_addr(skb, IFA_LOCAL, ifa->ifa_local)) ||
            (ifa->ifa_broadcast &&
-            nla_put_be32(skb, IFA_BROADCAST, ifa->ifa_broadcast)) ||
+            nla_put_in_addr(skb, IFA_BROADCAST, ifa->ifa_broadcast)) ||
            (ifa->ifa_label[0] &&
             nla_put_string(skb, IFA_LABEL, ifa->ifa_label)) ||
            nla_put_u32(skb, IFA_FLAGS, ifa->ifa_flags) ||
@@ -1597,7 +1628,7 @@ static void rtmsg_ifa(int event, struct in_ifaddr *ifa, struct nlmsghdr *nlh,
 
        net = dev_net(ifa->ifa_dev->dev);
        skb = nlmsg_new(inet_nlmsg_size(), GFP_KERNEL);
-       if (skb == NULL)
+       if (!skb)
                goto errout;
 
        err = inet_fill_ifaddr(skb, ifa, portid, seq, event, 0);
@@ -1634,7 +1665,7 @@ static int inet_fill_link_af(struct sk_buff *skb, const struct net_device *dev)
                return -ENODATA;
 
        nla = nla_reserve(skb, IFLA_INET_CONF, IPV4_DEVCONF_MAX * 4);
-       if (nla == NULL)
+       if (!nla)
                return -EMSGSIZE;
 
        for (i = 0; i < IPV4_DEVCONF_MAX; i++)
@@ -1723,7 +1754,7 @@ static int inet_netconf_fill_devconf(struct sk_buff *skb, int ifindex,
 
        nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct netconfmsg),
                        flags);
-       if (nlh == NULL)
+       if (!nlh)
                return -EMSGSIZE;
 
        ncm = nlmsg_data(nlh);
@@ -1765,7 +1796,7 @@ void inet_netconf_notify_devconf(struct net *net, int type, int ifindex,
        int err = -ENOBUFS;
 
        skb = nlmsg_new(inet_netconf_msgsize_devconf(type), GFP_ATOMIC);
-       if (skb == NULL)
+       if (!skb)
                goto errout;
 
        err = inet_netconf_fill_devconf(skb, ifindex, devconf, 0, 0,
@@ -1822,10 +1853,10 @@ static int inet_netconf_get_devconf(struct sk_buff *in_skb,
                break;
        default:
                dev = __dev_get_by_index(net, ifindex);
-               if (dev == NULL)
+               if (!dev)
                        goto errout;
                in_dev = __in_dev_get_rtnl(dev);
-               if (in_dev == NULL)
+               if (!in_dev)
                        goto errout;
                devconf = &in_dev->cnf;
                break;
@@ -1833,7 +1864,7 @@ static int inet_netconf_get_devconf(struct sk_buff *in_skb,
 
        err = -ENOBUFS;
        skb = nlmsg_new(inet_netconf_msgsize_devconf(-1), GFP_ATOMIC);
-       if (skb == NULL)
+       if (!skb)
                goto errout;
 
        err = inet_netconf_fill_devconf(skb, ifindex, devconf,
@@ -2184,7 +2215,7 @@ static void __devinet_sysctl_unregister(struct ipv4_devconf *cnf)
 {
        struct devinet_sysctl_table *t = cnf->sysctl;
 
-       if (t == NULL)
+       if (!t)
                return;
 
        cnf->sysctl = NULL;
@@ -2245,16 +2276,16 @@ static __net_init int devinet_init_net(struct net *net)
 
        if (!net_eq(net, &init_net)) {
                all = kmemdup(all, sizeof(ipv4_devconf), GFP_KERNEL);
-               if (all == NULL)
+               if (!all)
                        goto err_alloc_all;
 
                dflt = kmemdup(dflt, sizeof(ipv4_devconf_dflt), GFP_KERNEL);
-               if (dflt == NULL)
+               if (!dflt)
                        goto err_alloc_dflt;
 
 #ifdef CONFIG_SYSCTL
                tbl = kmemdup(tbl, sizeof(ctl_forward_entry), GFP_KERNEL);
-               if (tbl == NULL)
+               if (!tbl)
                        goto err_alloc_ctl;
 
                tbl[0].data = &all->data[IPV4_DEVCONF_FORWARDING - 1];
@@ -2274,7 +2305,7 @@ static __net_init int devinet_init_net(struct net *net)
 
        err = -ENOMEM;
        forw_hdr = register_net_sysctl(net, "net/ipv4", tbl);
-       if (forw_hdr == NULL)
+       if (!forw_hdr)
                goto err_reg_ctl;
        net->ipv4.forw_hdr = forw_hdr;
 #endif
index 60173d4d3a0e335a91d2e9a463eeef6f6b88adfe..421a80b09b62358dad5a0fa35d99db73d28472a7 100644 (file)
@@ -553,7 +553,7 @@ static int esp_init_authenc(struct xfrm_state *x)
        int err;
 
        err = -EINVAL;
-       if (x->ealg == NULL)
+       if (!x->ealg)
                goto error;
 
        err = -ENAMETOOLONG;
index 57be71dd6a9e0163dceefd564bf71036c12dc9ba..872494e6e6eb7996185a99a8b05915f861a73ec4 100644 (file)
@@ -52,12 +52,12 @@ static int __net_init fib4_rules_init(struct net *net)
 {
        struct fib_table *local_table, *main_table;
 
-       local_table = fib_trie_table(RT_TABLE_LOCAL);
-       if (local_table == NULL)
+       main_table  = fib_trie_table(RT_TABLE_MAIN, NULL);
+       if (!main_table)
                return -ENOMEM;
 
-       main_table  = fib_trie_table(RT_TABLE_MAIN);
-       if (main_table == NULL)
+       local_table = fib_trie_table(RT_TABLE_LOCAL, main_table);
+       if (!local_table)
                goto fail;
 
        hlist_add_head_rcu(&local_table->tb_hlist,
@@ -67,14 +67,14 @@ static int __net_init fib4_rules_init(struct net *net)
        return 0;
 
 fail:
-       fib_free_table(local_table);
+       fib_free_table(main_table);
        return -ENOMEM;
 }
 #else
 
 struct fib_table *fib_new_table(struct net *net, u32 id)
 {
-       struct fib_table *tb;
+       struct fib_table *tb, *alias = NULL;
        unsigned int h;
 
        if (id == 0)
@@ -83,23 +83,23 @@ struct fib_table *fib_new_table(struct net *net, u32 id)
        if (tb)
                return tb;
 
-       tb = fib_trie_table(id);
+       if (id == RT_TABLE_LOCAL)
+               alias = fib_new_table(net, RT_TABLE_MAIN);
+
+       tb = fib_trie_table(id, alias);
        if (!tb)
                return NULL;
 
        switch (id) {
        case RT_TABLE_LOCAL:
-               net->ipv4.fib_local = tb;
+               rcu_assign_pointer(net->ipv4.fib_local, tb);
                break;
-
        case RT_TABLE_MAIN:
-               net->ipv4.fib_main = tb;
+               rcu_assign_pointer(net->ipv4.fib_main, tb);
                break;
-
        case RT_TABLE_DEFAULT:
-               net->ipv4.fib_default = tb;
+               rcu_assign_pointer(net->ipv4.fib_default, tb);
                break;
-
        default:
                break;
        }
@@ -129,16 +129,62 @@ struct fib_table *fib_get_table(struct net *net, u32 id)
 }
 #endif /* CONFIG_IP_MULTIPLE_TABLES */
 
+static void fib_replace_table(struct net *net, struct fib_table *old,
+                             struct fib_table *new)
+{
+#ifdef CONFIG_IP_MULTIPLE_TABLES
+       switch (new->tb_id) {
+       case RT_TABLE_LOCAL:
+               rcu_assign_pointer(net->ipv4.fib_local, new);
+               break;
+       case RT_TABLE_MAIN:
+               rcu_assign_pointer(net->ipv4.fib_main, new);
+               break;
+       case RT_TABLE_DEFAULT:
+               rcu_assign_pointer(net->ipv4.fib_default, new);
+               break;
+       default:
+               break;
+       }
+
+#endif
+       /* replace the old table in the hlist */
+       hlist_replace_rcu(&old->tb_hlist, &new->tb_hlist);
+}
+
+int fib_unmerge(struct net *net)
+{
+       struct fib_table *old, *new;
+
+       /* attempt to fetch local table if it has been allocated */
+       old = fib_get_table(net, RT_TABLE_LOCAL);
+       if (!old)
+               return 0;
+
+       new = fib_trie_unmerge(old);
+       if (!new)
+               return -ENOMEM;
+
+       /* replace merged table with clean table */
+       if (new != old) {
+               fib_replace_table(net, old, new);
+               fib_free_table(old);
+       }
+
+       return 0;
+}
+
 static void fib_flush(struct net *net)
 {
        int flushed = 0;
-       struct fib_table *tb;
-       struct hlist_head *head;
        unsigned int h;
 
        for (h = 0; h < FIB_TABLE_HASHSZ; h++) {
-               head = &net->ipv4.fib_table_hash[h];
-               hlist_for_each_entry(tb, head, tb_hlist)
+               struct hlist_head *head = &net->ipv4.fib_table_hash[h];
+               struct hlist_node *tmp;
+               struct fib_table *tb;
+
+               hlist_for_each_entry_safe(tb, tmp, head, tb_hlist)
                        flushed += fib_table_flush(tb);
        }
 
@@ -146,6 +192,19 @@ static void fib_flush(struct net *net)
                rt_cache_flush(net);
 }
 
+void fib_flush_external(struct net *net)
+{
+       struct fib_table *tb;
+       struct hlist_head *head;
+       unsigned int h;
+
+       for (h = 0; h < FIB_TABLE_HASHSZ; h++) {
+               head = &net->ipv4.fib_table_hash[h];
+               hlist_for_each_entry(tb, head, tb_hlist)
+                       fib_table_flush_external(tb);
+       }
+}
+
 /*
  * Find address type as if only "dev" was present in the system. If
  * on_dev is NULL then all interfaces are taken into consideration.
@@ -427,7 +486,7 @@ static int rtentry_to_fib_config(struct net *net, int cmd, struct rtentry *rt,
                        for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next)
                                if (strcmp(ifa->ifa_label, devname) == 0)
                                        break;
-                       if (ifa == NULL)
+                       if (!ifa)
                                return -ENODEV;
                        cfg->fc_prefsrc = ifa->ifa_local;
                }
@@ -455,7 +514,7 @@ static int rtentry_to_fib_config(struct net *net, int cmd, struct rtentry *rt,
                int len = 0;
 
                mx = kzalloc(3 * nla_total_size(4), GFP_KERNEL);
-               if (mx == NULL)
+               if (!mx)
                        return -ENOMEM;
 
                if (rt->rt_flags & RTF_MTU)
@@ -617,7 +676,7 @@ static int inet_rtm_delroute(struct sk_buff *skb, struct nlmsghdr *nlh)
                goto errout;
 
        tb = fib_get_table(net, cfg.fc_table);
-       if (tb == NULL) {
+       if (!tb) {
                err = -ESRCH;
                goto errout;
        }
@@ -639,7 +698,7 @@ static int inet_rtm_newroute(struct sk_buff *skb, struct nlmsghdr *nlh)
                goto errout;
 
        tb = fib_new_table(net, cfg.fc_table);
-       if (tb == NULL) {
+       if (!tb) {
                err = -ENOBUFS;
                goto errout;
        }
@@ -665,10 +724,12 @@ static int inet_dump_fib(struct sk_buff *skb, struct netlink_callback *cb)
        s_h = cb->args[0];
        s_e = cb->args[1];
 
+       rcu_read_lock();
+
        for (h = s_h; h < FIB_TABLE_HASHSZ; h++, s_e = 0) {
                e = 0;
                head = &net->ipv4.fib_table_hash[h];
-               hlist_for_each_entry(tb, head, tb_hlist) {
+               hlist_for_each_entry_rcu(tb, head, tb_hlist) {
                        if (e < s_e)
                                goto next;
                        if (dumped)
@@ -682,6 +743,8 @@ next:
                }
        }
 out:
+       rcu_read_unlock();
+
        cb->args[1] = e;
        cb->args[0] = h;
 
@@ -716,7 +779,7 @@ static void fib_magic(int cmd, int type, __be32 dst, int dst_len, struct in_ifad
        else
                tb = fib_new_table(net, RT_TABLE_LOCAL);
 
-       if (tb == NULL)
+       if (!tb)
                return;
 
        cfg.fc_table = tb->tb_id;
@@ -743,7 +806,7 @@ void fib_add_ifaddr(struct in_ifaddr *ifa)
 
        if (ifa->ifa_flags & IFA_F_SECONDARY) {
                prim = inet_ifa_byprefix(in_dev, prefix, mask);
-               if (prim == NULL) {
+               if (!prim) {
                        pr_warn("%s: bug: prim == NULL\n", __func__);
                        return;
                }
@@ -797,7 +860,7 @@ void fib_del_ifaddr(struct in_ifaddr *ifa, struct in_ifaddr *iprim)
 
        if (ifa->ifa_flags & IFA_F_SECONDARY) {
                prim = inet_ifa_byprefix(in_dev, any, ifa->ifa_mask);
-               if (prim == NULL) {
+               if (!prim) {
                        pr_warn("%s: bug: prim == NULL\n", __func__);
                        return;
                }
@@ -967,7 +1030,7 @@ static void nl_fib_input(struct sk_buff *skb)
                return;
 
        skb = netlink_skb_clone(skb, GFP_KERNEL);
-       if (skb == NULL)
+       if (!skb)
                return;
        nlh = nlmsg_hdr(skb);
 
@@ -988,7 +1051,7 @@ static int __net_init nl_fib_lookup_init(struct net *net)
        };
 
        sk = netlink_kernel_create(net, NETLINK_FIB_LOOKUP, &cfg);
-       if (sk == NULL)
+       if (!sk)
                return -EAFNOSUPPORT;
        net->ipv4.fibnl = sk;
        return 0;
@@ -1026,7 +1089,7 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event,
        case NETDEV_DOWN:
                fib_del_ifaddr(ifa, NULL);
                atomic_inc(&net->ipv4.dev_addr_genid);
-               if (ifa->ifa_dev->ifa_list == NULL) {
+               if (!ifa->ifa_dev->ifa_list) {
                        /* Last address was deleted from this interface.
                         * Disable IP.
                         */
@@ -1094,7 +1157,7 @@ static int __net_init ip_fib_net_init(struct net *net)
        size = max_t(size_t, size, L1_CACHE_BYTES);
 
        net->ipv4.fib_table_hash = kzalloc(size, GFP_KERNEL);
-       if (net->ipv4.fib_table_hash == NULL)
+       if (!net->ipv4.fib_table_hash)
                return -ENOMEM;
 
        err = fib4_rules_init(net);
@@ -1111,23 +1174,27 @@ static void ip_fib_net_exit(struct net *net)
 {
        unsigned int i;
 
+       rtnl_lock();
 #ifdef CONFIG_IP_MULTIPLE_TABLES
-       fib4_rules_exit(net);
+       RCU_INIT_POINTER(net->ipv4.fib_local, NULL);
+       RCU_INIT_POINTER(net->ipv4.fib_main, NULL);
+       RCU_INIT_POINTER(net->ipv4.fib_default, NULL);
 #endif
-
-       rtnl_lock();
        for (i = 0; i < FIB_TABLE_HASHSZ; i++) {
-               struct fib_table *tb;
-               struct hlist_head *head;
+               struct hlist_head *head = &net->ipv4.fib_table_hash[i];
                struct hlist_node *tmp;
+               struct fib_table *tb;
 
-               head = &net->ipv4.fib_table_hash[i];
                hlist_for_each_entry_safe(tb, tmp, head, tb_hlist) {
                        hlist_del(&tb->tb_hlist);
                        fib_table_flush(tb);
                        fib_free_table(tb);
                }
        }
+
+#ifdef CONFIG_IP_MULTIPLE_TABLES
+       fib4_rules_exit(net);
+#endif
        rtnl_unlock();
        kfree(net->ipv4.fib_table_hash);
 }
index 825981b1049a6c17ec065c000452cdf98e41bacf..c6211ed60b03be1940a1954c08adb8a265f4e124 100644 (file)
@@ -6,11 +6,13 @@
 #include <net/ip_fib.h>
 
 struct fib_alias {
-       struct list_head        fa_list;
+       struct hlist_node       fa_list;
        struct fib_info         *fa_info;
        u8                      fa_tos;
        u8                      fa_type;
        u8                      fa_state;
+       u8                      fa_slen;
+       u32                     tb_id;
        struct rcu_head         rcu;
 };
 
index d3db718be51d17282becc0864d050adfcc77522f..56151982f74efb26dab4abad429f473ba8b06cba 100644 (file)
@@ -153,7 +153,7 @@ static struct fib_table *fib_empty_table(struct net *net)
        u32 id;
 
        for (id = 1; id <= RT_TABLE_MAX; id++)
-               if (fib_get_table(net, id) == NULL)
+               if (!fib_get_table(net, id))
                        return fib_new_table(net, id);
        return NULL;
 }
@@ -174,12 +174,17 @@ static int fib4_rule_configure(struct fib_rule *rule, struct sk_buff *skb,
        if (frh->tos & ~IPTOS_TOS_MASK)
                goto errout;
 
+       /* split local/main if they are not already split */
+       err = fib_unmerge(net);
+       if (err)
+               goto errout;
+
        if (rule->table == RT_TABLE_UNSPEC) {
                if (rule->action == FR_ACT_TO_TBL) {
                        struct fib_table *table;
 
                        table = fib_empty_table(net);
-                       if (table == NULL) {
+                       if (!table) {
                                err = -ENOBUFS;
                                goto errout;
                        }
@@ -189,10 +194,10 @@ static int fib4_rule_configure(struct fib_rule *rule, struct sk_buff *skb,
        }
 
        if (frh->src_len)
-               rule4->src = nla_get_be32(tb[FRA_SRC]);
+               rule4->src = nla_get_in_addr(tb[FRA_SRC]);
 
        if (frh->dst_len)
-               rule4->dst = nla_get_be32(tb[FRA_DST]);
+               rule4->dst = nla_get_in_addr(tb[FRA_DST]);
 
 #ifdef CONFIG_IP_ROUTE_CLASSID
        if (tb[FRA_FLOW]) {
@@ -209,21 +214,31 @@ static int fib4_rule_configure(struct fib_rule *rule, struct sk_buff *skb,
        rule4->tos = frh->tos;
 
        net->ipv4.fib_has_custom_rules = true;
+       fib_flush_external(rule->fr_net);
+
        err = 0;
 errout:
        return err;
 }
 
-static void fib4_rule_delete(struct fib_rule *rule)
+static int fib4_rule_delete(struct fib_rule *rule)
 {
        struct net *net = rule->fr_net;
-#ifdef CONFIG_IP_ROUTE_CLASSID
-       struct fib4_rule *rule4 = (struct fib4_rule *) rule;
+       int err;
 
-       if (rule4->tclassid)
+       /* split local/main if they are not already split */
+       err = fib_unmerge(net);
+       if (err)
+               goto errout;
+
+#ifdef CONFIG_IP_ROUTE_CLASSID
+       if (((struct fib4_rule *)rule)->tclassid)
                net->ipv4.fib_num_tclassid_users--;
 #endif
        net->ipv4.fib_has_custom_rules = true;
+       fib_flush_external(rule->fr_net);
+errout:
+       return err;
 }
 
 static int fib4_rule_compare(struct fib_rule *rule, struct fib_rule_hdr *frh,
@@ -245,10 +260,10 @@ static int fib4_rule_compare(struct fib_rule *rule, struct fib_rule_hdr *frh,
                return 0;
 #endif
 
-       if (frh->src_len && (rule4->src != nla_get_be32(tb[FRA_SRC])))
+       if (frh->src_len && (rule4->src != nla_get_in_addr(tb[FRA_SRC])))
                return 0;
 
-       if (frh->dst_len && (rule4->dst != nla_get_be32(tb[FRA_DST])))
+       if (frh->dst_len && (rule4->dst != nla_get_in_addr(tb[FRA_DST])))
                return 0;
 
        return 1;
@@ -264,9 +279,9 @@ static int fib4_rule_fill(struct fib_rule *rule, struct sk_buff *skb,
        frh->tos = rule4->tos;
 
        if ((rule4->dst_len &&
-            nla_put_be32(skb, FRA_DST, rule4->dst)) ||
+            nla_put_in_addr(skb, FRA_DST, rule4->dst)) ||
            (rule4->src_len &&
-            nla_put_be32(skb, FRA_SRC, rule4->src)))
+            nla_put_in_addr(skb, FRA_SRC, rule4->src)))
                goto nla_put_failure;
 #ifdef CONFIG_IP_ROUTE_CLASSID
        if (rule4->tclassid &&
index 1e2090ea663e209b739e01186b3939802630d63f..8d695b6659c715f89e06c31d9890532b34b2727f 100644 (file)
@@ -213,7 +213,6 @@ static void free_fib_info_rcu(struct rcu_head *head)
                rt_fibinfo_free(&nexthop_nh->nh_rth_input);
        } endfor_nexthops(fi);
 
-       release_net(fi->fib_net);
        if (fi->fib_metrics != (u32 *) dst_default_metrics)
                kfree(fi->fib_metrics);
        kfree(fi);
@@ -391,7 +390,7 @@ void rtmsg_fib(int event, __be32 key, struct fib_alias *fa,
        int err = -ENOBUFS;
 
        skb = nlmsg_new(fib_nlmsg_size(fa->fa_info), GFP_KERNEL);
-       if (skb == NULL)
+       if (!skb)
                goto errout;
 
        err = fib_dump_info(skb, info->portid, seq, event, tb_id,
@@ -469,7 +468,7 @@ static int fib_get_nhs(struct fib_info *fi, struct rtnexthop *rtnh,
                        struct nlattr *nla, *attrs = rtnh_attrs(rtnh);
 
                        nla = nla_find(attrs, attrlen, RTA_GATEWAY);
-                       nexthop_nh->nh_gw = nla ? nla_get_be32(nla) : 0;
+                       nexthop_nh->nh_gw = nla ? nla_get_in_addr(nla) : 0;
 #ifdef CONFIG_IP_ROUTE_CLASSID
                        nla = nla_find(attrs, attrlen, RTA_FLOW);
                        nexthop_nh->nh_tclassid = nla ? nla_get_u32(nla) : 0;
@@ -504,7 +503,7 @@ int fib_nh_match(struct fib_config *cfg, struct fib_info *fi)
        }
 
 #ifdef CONFIG_IP_ROUTE_MULTIPATH
-       if (cfg->fc_mp == NULL)
+       if (!cfg->fc_mp)
                return 0;
 
        rtnh = cfg->fc_mp;
@@ -524,7 +523,7 @@ int fib_nh_match(struct fib_config *cfg, struct fib_info *fi)
                        struct nlattr *nla, *attrs = rtnh_attrs(rtnh);
 
                        nla = nla_find(attrs, attrlen, RTA_GATEWAY);
-                       if (nla && nla_get_be32(nla) != nh->nh_gw)
+                       if (nla && nla_get_in_addr(nla) != nh->nh_gw)
                                return 1;
 #ifdef CONFIG_IP_ROUTE_CLASSID
                        nla = nla_find(attrs, attrlen, RTA_FLOW);
@@ -647,7 +646,7 @@ static int fib_check_nh(struct fib_config *cfg, struct fib_info *fi,
                rcu_read_lock();
                err = -ENODEV;
                in_dev = inetdev_by_index(net, nh->nh_oif);
-               if (in_dev == NULL)
+               if (!in_dev)
                        goto out;
                err = -ENETDOWN;
                if (!(in_dev->dev->flags & IFF_UP))
@@ -804,7 +803,7 @@ struct fib_info *fib_create_info(struct fib_config *cfg)
        }
 
        fi = kzalloc(sizeof(*fi)+nhs*sizeof(struct fib_nh), GFP_KERNEL);
-       if (fi == NULL)
+       if (!fi)
                goto failure;
        fib_info_cnt++;
        if (cfg->fc_mx) {
@@ -814,7 +813,7 @@ struct fib_info *fib_create_info(struct fib_config *cfg)
        } else
                fi->fib_metrics = (u32 *) dst_default_metrics;
 
-       fi->fib_net = hold_net(net);
+       fi->fib_net = net;
        fi->fib_protocol = cfg->fc_protocol;
        fi->fib_scope = cfg->fc_scope;
        fi->fib_flags = cfg->fc_flags;
@@ -922,7 +921,7 @@ struct fib_info *fib_create_info(struct fib_config *cfg)
                nh->nh_scope = RT_SCOPE_NOWHERE;
                nh->nh_dev = dev_get_by_index(net, fi->fib_nh->nh_oif);
                err = -ENODEV;
-               if (nh->nh_dev == NULL)
+               if (!nh->nh_dev)
                        goto failure;
        } else {
                change_nexthops(fi) {
@@ -996,7 +995,7 @@ int fib_dump_info(struct sk_buff *skb, u32 portid, u32 seq, int event,
        struct rtmsg *rtm;
 
        nlh = nlmsg_put(skb, portid, seq, event, sizeof(*rtm), flags);
-       if (nlh == NULL)
+       if (!nlh)
                return -EMSGSIZE;
 
        rtm = nlmsg_data(nlh);
@@ -1016,7 +1015,7 @@ int fib_dump_info(struct sk_buff *skb, u32 portid, u32 seq, int event,
        rtm->rtm_protocol = fi->fib_protocol;
 
        if (rtm->rtm_dst_len &&
-           nla_put_be32(skb, RTA_DST, dst))
+           nla_put_in_addr(skb, RTA_DST, dst))
                goto nla_put_failure;
        if (fi->fib_priority &&
            nla_put_u32(skb, RTA_PRIORITY, fi->fib_priority))
@@ -1025,11 +1024,11 @@ int fib_dump_info(struct sk_buff *skb, u32 portid, u32 seq, int event,
                goto nla_put_failure;
 
        if (fi->fib_prefsrc &&
-           nla_put_be32(skb, RTA_PREFSRC, fi->fib_prefsrc))
+           nla_put_in_addr(skb, RTA_PREFSRC, fi->fib_prefsrc))
                goto nla_put_failure;
        if (fi->fib_nhs == 1) {
                if (fi->fib_nh->nh_gw &&
-                   nla_put_be32(skb, RTA_GATEWAY, fi->fib_nh->nh_gw))
+                   nla_put_in_addr(skb, RTA_GATEWAY, fi->fib_nh->nh_gw))
                        goto nla_put_failure;
                if (fi->fib_nh->nh_oif &&
                    nla_put_u32(skb, RTA_OIF, fi->fib_nh->nh_oif))
@@ -1046,12 +1045,12 @@ int fib_dump_info(struct sk_buff *skb, u32 portid, u32 seq, int event,
                struct nlattr *mp;
 
                mp = nla_nest_start(skb, RTA_MULTIPATH);
-               if (mp == NULL)
+               if (!mp)
                        goto nla_put_failure;
 
                for_nexthops(fi) {
                        rtnh = nla_reserve_nohdr(skb, sizeof(*rtnh));
-                       if (rtnh == NULL)
+                       if (!rtnh)
                                goto nla_put_failure;
 
                        rtnh->rtnh_flags = nh->nh_flags & 0xFF;
@@ -1059,7 +1058,7 @@ int fib_dump_info(struct sk_buff *skb, u32 portid, u32 seq, int event,
                        rtnh->rtnh_ifindex = nh->nh_oif;
 
                        if (nh->nh_gw &&
-                           nla_put_be32(skb, RTA_GATEWAY, nh->nh_gw))
+                           nla_put_in_addr(skb, RTA_GATEWAY, nh->nh_gw))
                                goto nla_put_failure;
 #ifdef CONFIG_IP_ROUTE_CLASSID
                        if (nh->nh_tclassid &&
@@ -1094,7 +1093,7 @@ int fib_sync_down_addr(struct net *net, __be32 local)
        struct hlist_head *head = &fib_info_laddrhash[hash];
        struct fib_info *fi;
 
-       if (fib_info_laddrhash == NULL || local == 0)
+       if (!fib_info_laddrhash || local == 0)
                return 0;
 
        hlist_for_each_entry(fi, head, fib_lhash) {
@@ -1163,12 +1162,12 @@ int fib_sync_down_dev(struct net_device *dev, int force)
 void fib_select_default(struct fib_result *res)
 {
        struct fib_info *fi = NULL, *last_resort = NULL;
-       struct list_head *fa_head = res->fa_head;
+       struct hlist_head *fa_head = res->fa_head;
        struct fib_table *tb = res->table;
        int order = -1, last_idx = -1;
        struct fib_alias *fa;
 
-       list_for_each_entry_rcu(fa, fa_head, fa_list) {
+       hlist_for_each_entry_rcu(fa, fa_head, fa_list) {
                struct fib_info *next_fi = fa->fa_info;
 
                if (next_fi->fib_scope != res->scope ||
@@ -1183,7 +1182,7 @@ void fib_select_default(struct fib_result *res)
 
                fib_alias_accessed(fa);
 
-               if (fi == NULL) {
+               if (!fi) {
                        if (next_fi != res->fi)
                                break;
                } else if (!fib_detect_death(fi, order, &last_resort,
@@ -1196,7 +1195,7 @@ void fib_select_default(struct fib_result *res)
                order++;
        }
 
-       if (order <= 0 || fi == NULL) {
+       if (order <= 0 || !fi) {
                tb->tb_default = -1;
                goto out;
        }
@@ -1252,7 +1251,7 @@ int fib_sync_up(struct net_device *dev)
                                alive++;
                                continue;
                        }
-                       if (nexthop_nh->nh_dev == NULL ||
+                       if (!nexthop_nh->nh_dev ||
                            !(nexthop_nh->nh_dev->flags & IFF_UP))
                                continue;
                        if (nexthop_nh->nh_dev != dev ||
index 3daf0224ff2e1821ce9c258a48db1c3d20437657..e13fcc602da20ee44dfd505ab1115bbcc0e13375 100644 (file)
@@ -79,6 +79,7 @@
 #include <net/tcp.h>
 #include <net/sock.h>
 #include <net/ip_fib.h>
+#include <net/switchdev.h>
 #include "fib_lookup.h"
 
 #define MAX_STAT_DEPTH 32
 
 typedef unsigned int t_key;
 
-#define IS_TNODE(n) ((n)->bits)
-#define IS_LEAF(n) (!(n)->bits)
+#define IS_TRIE(n)     ((n)->pos >= KEYLENGTH)
+#define IS_TNODE(n)    ((n)->bits)
+#define IS_LEAF(n)     (!(n)->bits)
 
-#define get_index(_key, _kv) (((_key) ^ (_kv)->key) >> (_kv)->pos)
-
-struct tnode {
+struct key_vector {
        t_key key;
-       unsigned char bits;             /* 2log(KEYLENGTH) bits needed */
        unsigned char pos;              /* 2log(KEYLENGTH) bits needed */
+       unsigned char bits;             /* 2log(KEYLENGTH) bits needed */
        unsigned char slen;
-       struct tnode __rcu *parent;
-       struct rcu_head rcu;
        union {
-               /* The fields in this struct are valid if bits > 0 (TNODE) */
-               struct {
-                       t_key empty_children; /* KEYLENGTH bits needed */
-                       t_key full_children;  /* KEYLENGTH bits needed */
-                       struct tnode __rcu *child[0];
-               };
-               /* This list pointer if valid if bits == 0 (LEAF) */
-               struct hlist_head list;
+               /* This list pointer if valid if (pos | bits) == 0 (LEAF) */
+               struct hlist_head leaf;
+               /* This array is valid if (pos | bits) > 0 (TNODE) */
+               struct key_vector __rcu *tnode[0];
        };
 };
 
-struct leaf_info {
-       struct hlist_node hlist;
-       int plen;
-       u32 mask_plen; /* ntohl(inet_make_mask(plen)) */
-       struct list_head falh;
+struct tnode {
        struct rcu_head rcu;
+       t_key empty_children;           /* KEYLENGTH bits needed */
+       t_key full_children;            /* KEYLENGTH bits needed */
+       struct key_vector __rcu *parent;
+       struct key_vector kv[1];
+#define tn_bits kv[0].bits
 };
 
+#define TNODE_SIZE(n)  offsetof(struct tnode, kv[0].tnode[n])
+#define LEAF_SIZE      TNODE_SIZE(1)
+
 #ifdef CONFIG_IP_FIB_TRIE_STATS
 struct trie_use_stats {
        unsigned int gets;
@@ -142,13 +140,13 @@ struct trie_stat {
 };
 
 struct trie {
-       struct tnode __rcu *trie;
+       struct key_vector kv[1];
 #ifdef CONFIG_IP_FIB_TRIE_STATS
        struct trie_use_stats __percpu *stats;
 #endif
 };
 
-static void resize(struct trie *t, struct tnode *tn);
+static struct key_vector *resize(struct trie *t, struct key_vector *tn);
 static size_t tnode_free_size;
 
 /*
@@ -161,41 +159,46 @@ static const int sync_pages = 128;
 static struct kmem_cache *fn_alias_kmem __read_mostly;
 static struct kmem_cache *trie_leaf_kmem __read_mostly;
 
+static inline struct tnode *tn_info(struct key_vector *kv)
+{
+       return container_of(kv, struct tnode, kv[0]);
+}
+
 /* caller must hold RTNL */
-#define node_parent(n) rtnl_dereference((n)->parent)
+#define node_parent(tn) rtnl_dereference(tn_info(tn)->parent)
+#define get_child(tn, i) rtnl_dereference((tn)->tnode[i])
 
 /* caller must hold RCU read lock or RTNL */
-#define node_parent_rcu(n) rcu_dereference_rtnl((n)->parent)
+#define node_parent_rcu(tn) rcu_dereference_rtnl(tn_info(tn)->parent)
+#define get_child_rcu(tn, i) rcu_dereference_rtnl((tn)->tnode[i])
 
 /* wrapper for rcu_assign_pointer */
-static inline void node_set_parent(struct tnode *n, struct tnode *tp)
+static inline void node_set_parent(struct key_vector *n, struct key_vector *tp)
 {
        if (n)
-               rcu_assign_pointer(n->parent, tp);
+               rcu_assign_pointer(tn_info(n)->parent, tp);
 }
 
-#define NODE_INIT_PARENT(n, p) RCU_INIT_POINTER((n)->parent, p)
+#define NODE_INIT_PARENT(n, p) RCU_INIT_POINTER(tn_info(n)->parent, p)
 
 /* This provides us with the number of children in this node, in the case of a
  * leaf this will return 0 meaning none of the children are accessible.
  */
-static inline unsigned long tnode_child_length(const struct tnode *tn)
+static inline unsigned long child_length(const struct key_vector *tn)
 {
        return (1ul << tn->bits) & ~(1ul);
 }
 
-/* caller must hold RTNL */
-static inline struct tnode *tnode_get_child(const struct tnode *tn,
-                                           unsigned long i)
-{
-       return rtnl_dereference(tn->child[i]);
-}
+#define get_cindex(key, kv) (((key) ^ (kv)->key) >> (kv)->pos)
 
-/* caller must hold RCU read lock or RTNL */
-static inline struct tnode *tnode_get_child_rcu(const struct tnode *tn,
-                                               unsigned long i)
+static inline unsigned long get_index(t_key key, struct key_vector *kv)
 {
-       return rcu_dereference_rtnl(tn->child[i]);
+       unsigned long index = key ^ kv->key;
+
+       if ((BITS_PER_LONG <= KEYLENGTH) && (KEYLENGTH == kv->pos))
+               return 0;
+
+       return index >> kv->pos;
 }
 
 /* To understand this stuff, an understanding of keys and all their bits is
@@ -274,106 +277,104 @@ static inline void alias_free_mem_rcu(struct fib_alias *fa)
 }
 
 #define TNODE_KMALLOC_MAX \
-       ilog2((PAGE_SIZE - sizeof(struct tnode)) / sizeof(struct tnode *))
+       ilog2((PAGE_SIZE - TNODE_SIZE(0)) / sizeof(struct key_vector *))
+#define TNODE_VMALLOC_MAX \
+       ilog2((SIZE_MAX - TNODE_SIZE(0)) / sizeof(struct key_vector *))
 
 static void __node_free_rcu(struct rcu_head *head)
 {
        struct tnode *n = container_of(head, struct tnode, rcu);
 
-       if (IS_LEAF(n))
+       if (!n->tn_bits)
                kmem_cache_free(trie_leaf_kmem, n);
-       else if (n->bits <= TNODE_KMALLOC_MAX)
+       else if (n->tn_bits <= TNODE_KMALLOC_MAX)
                kfree(n);
        else
                vfree(n);
 }
 
-#define node_free(n) call_rcu(&n->rcu, __node_free_rcu)
+#define node_free(n) call_rcu(&tn_info(n)->rcu, __node_free_rcu)
 
-static inline void free_leaf_info(struct leaf_info *leaf)
+static struct tnode *tnode_alloc(int bits)
 {
-       kfree_rcu(leaf, rcu);
-}
+       size_t size;
+
+       /* verify bits is within bounds */
+       if (bits > TNODE_VMALLOC_MAX)
+               return NULL;
+
+       /* determine size and verify it is non-zero and didn't overflow */
+       size = TNODE_SIZE(1ul << bits);
 
-static struct tnode *tnode_alloc(size_t size)
-{
        if (size <= PAGE_SIZE)
                return kzalloc(size, GFP_KERNEL);
        else
                return vzalloc(size);
 }
 
-static inline void empty_child_inc(struct tnode *n)
+static inline void empty_child_inc(struct key_vector *n)
 {
-       ++n->empty_children ? : ++n->full_children;
+       ++tn_info(n)->empty_children ? : ++tn_info(n)->full_children;
 }
 
-static inline void empty_child_dec(struct tnode *n)
+static inline void empty_child_dec(struct key_vector *n)
 {
-       n->empty_children-- ? : n->full_children--;
+       tn_info(n)->empty_children-- ? : tn_info(n)->full_children--;
 }
 
-static struct tnode *leaf_new(t_key key)
+static struct key_vector *leaf_new(t_key key, struct fib_alias *fa)
 {
-       struct tnode *l = kmem_cache_alloc(trie_leaf_kmem, GFP_KERNEL);
-       if (l) {
-               l->parent = NULL;
-               /* set key and pos to reflect full key value
-                * any trailing zeros in the key should be ignored
-                * as the nodes are searched
-                */
-               l->key = key;
-               l->slen = 0;
-               l->pos = 0;
-               /* set bits to 0 indicating we are not a tnode */
-               l->bits = 0;
+       struct tnode *kv = kmem_cache_alloc(trie_leaf_kmem, GFP_KERNEL);
+       struct key_vector *l = kv->kv;
 
-               INIT_HLIST_HEAD(&l->list);
-       }
-       return l;
-}
+       if (!kv)
+               return NULL;
 
-static struct leaf_info *leaf_info_new(int plen)
-{
-       struct leaf_info *li = kmalloc(sizeof(struct leaf_info),  GFP_KERNEL);
-       if (li) {
-               li->plen = plen;
-               li->mask_plen = ntohl(inet_make_mask(plen));
-               INIT_LIST_HEAD(&li->falh);
-       }
-       return li;
+       /* initialize key vector */
+       l->key = key;
+       l->pos = 0;
+       l->bits = 0;
+       l->slen = fa->fa_slen;
+
+       /* link leaf to fib alias */
+       INIT_HLIST_HEAD(&l->leaf);
+       hlist_add_head(&fa->fa_list, &l->leaf);
+
+       return l;
 }
 
-static struct tnode *tnode_new(t_key key, int pos, int bits)
+static struct key_vector *tnode_new(t_key key, int pos, int bits)
 {
-       size_t sz = offsetof(struct tnode, child[1ul << bits]);
-       struct tnode *tn = tnode_alloc(sz);
+       struct tnode *tnode = tnode_alloc(bits);
        unsigned int shift = pos + bits;
+       struct key_vector *tn = tnode->kv;
 
        /* verify bits and pos their msb bits clear and values are valid */
        BUG_ON(!bits || (shift > KEYLENGTH));
 
-       if (tn) {
-               tn->parent = NULL;
-               tn->slen = pos;
-               tn->pos = pos;
-               tn->bits = bits;
-               tn->key = (shift < KEYLENGTH) ? (key >> shift) << shift : 0;
-               if (bits == KEYLENGTH)
-                       tn->full_children = 1;
-               else
-                       tn->empty_children = 1ul << bits;
-       }
+       pr_debug("AT %p s=%zu %zu\n", tnode, TNODE_SIZE(0),
+                sizeof(struct key_vector *) << bits);
+
+       if (!tnode)
+               return NULL;
+
+       if (bits == KEYLENGTH)
+               tnode->full_children = 1;
+       else
+               tnode->empty_children = 1ul << bits;
+
+       tn->key = (shift < KEYLENGTH) ? (key >> shift) << shift : 0;
+       tn->pos = pos;
+       tn->bits = bits;
+       tn->slen = pos;
 
-       pr_debug("AT %p s=%zu %zu\n", tn, sizeof(struct tnode),
-                sizeof(struct tnode *) << bits);
        return tn;
 }
 
 /* Check whether a tnode 'n' is "full", i.e. it is an internal node
  * and no bits are skipped. See discussion in dyntree paper p. 6
  */
-static inline int tnode_full(const struct tnode *tn, const struct tnode *n)
+static inline int tnode_full(struct key_vector *tn, struct key_vector *n)
 {
        return n && ((n->pos + n->bits) == tn->pos) && IS_TNODE(n);
 }
@@ -381,17 +382,18 @@ static inline int tnode_full(const struct tnode *tn, const struct tnode *n)
 /* Add a child at position i overwriting the old value.
  * Update the value of full_children and empty_children.
  */
-static void put_child(struct tnode *tn, unsigned long i, struct tnode *n)
+static void put_child(struct key_vector *tn, unsigned long i,
+                     struct key_vector *n)
 {
-       struct tnode *chi = tnode_get_child(tn, i);
+       struct key_vector *chi = get_child(tn, i);
        int isfull, wasfull;
 
-       BUG_ON(i >= tnode_child_length(tn));
+       BUG_ON(i >= child_length(tn));
 
        /* update emptyChildren, overflow into fullChildren */
-       if (n == NULL && chi != NULL)
+       if (!n && chi)
                empty_child_inc(tn);
-       if (n != NULL && chi == NULL)
+       if (n && !chi)
                empty_child_dec(tn);
 
        /* update fullChildren */
@@ -399,23 +401,23 @@ static void put_child(struct tnode *tn, unsigned long i, struct tnode *n)
        isfull = tnode_full(tn, n);
 
        if (wasfull && !isfull)
-               tn->full_children--;
+               tn_info(tn)->full_children--;
        else if (!wasfull && isfull)
-               tn->full_children++;
+               tn_info(tn)->full_children++;
 
        if (n && (tn->slen < n->slen))
                tn->slen = n->slen;
 
-       rcu_assign_pointer(tn->child[i], n);
+       rcu_assign_pointer(tn->tnode[i], n);
 }
 
-static void update_children(struct tnode *tn)
+static void update_children(struct key_vector *tn)
 {
        unsigned long i;
 
        /* update all of the child parent pointers */
-       for (i = tnode_child_length(tn); i;) {
-               struct tnode *inode = tnode_get_child(tn, --i);
+       for (i = child_length(tn); i;) {
+               struct key_vector *inode = get_child(tn, --i);
 
                if (!inode)
                        continue;
@@ -431,36 +433,37 @@ static void update_children(struct tnode *tn)
        }
 }
 
-static inline void put_child_root(struct tnode *tp, struct trie *t,
-                                 t_key key, struct tnode *n)
+static inline void put_child_root(struct key_vector *tp, t_key key,
+                                 struct key_vector *n)
 {
-       if (tp)
-               put_child(tp, get_index(key, tp), n);
+       if (IS_TRIE(tp))
+               rcu_assign_pointer(tp->tnode[0], n);
        else
-               rcu_assign_pointer(t->trie, n);
+               put_child(tp, get_index(key, tp), n);
 }
 
-static inline void tnode_free_init(struct tnode *tn)
+static inline void tnode_free_init(struct key_vector *tn)
 {
-       tn->rcu.next = NULL;
+       tn_info(tn)->rcu.next = NULL;
 }
 
-static inline void tnode_free_append(struct tnode *tn, struct tnode *n)
+static inline void tnode_free_append(struct key_vector *tn,
+                                    struct key_vector *n)
 {
-       n->rcu.next = tn->rcu.next;
-       tn->rcu.next = &n->rcu;
+       tn_info(n)->rcu.next = tn_info(tn)->rcu.next;
+       tn_info(tn)->rcu.next = &tn_info(n)->rcu;
 }
 
-static void tnode_free(struct tnode *tn)
+static void tnode_free(struct key_vector *tn)
 {
-       struct callback_head *head = &tn->rcu;
+       struct callback_head *head = &tn_info(tn)->rcu;
 
        while (head) {
                head = head->next;
-               tnode_free_size += offsetof(struct tnode, child[1 << tn->bits]);
+               tnode_free_size += TNODE_SIZE(1ul << tn->bits);
                node_free(tn);
 
-               tn = container_of(head, struct tnode, rcu);
+               tn = container_of(head, struct tnode, rcu)->kv;
        }
 
        if (tnode_free_size >= PAGE_SIZE * sync_pages) {
@@ -469,14 +472,16 @@ static void tnode_free(struct tnode *tn)
        }
 }
 
-static void replace(struct trie *t, struct tnode *oldtnode, struct tnode *tn)
+static struct key_vector *replace(struct trie *t,
+                                 struct key_vector *oldtnode,
+                                 struct key_vector *tn)
 {
-       struct tnode *tp = node_parent(oldtnode);
+       struct key_vector *tp = node_parent(oldtnode);
        unsigned long i;
 
        /* setup the parent pointer out of and back into this node */
        NODE_INIT_PARENT(tn, tp);
-       put_child_root(tp, t, tn->key, tn);
+       put_child_root(tp, tn->key, tn);
 
        /* update all of the child parent pointers */
        update_children(tn);
@@ -485,18 +490,21 @@ static void replace(struct trie *t, struct tnode *oldtnode, struct tnode *tn)
        tnode_free(oldtnode);
 
        /* resize children now that oldtnode is freed */
-       for (i = tnode_child_length(tn); i;) {
-               struct tnode *inode = tnode_get_child(tn, --i);
+       for (i = child_length(tn); i;) {
+               struct key_vector *inode = get_child(tn, --i);
 
                /* resize child node */
                if (tnode_full(tn, inode))
-                       resize(t, inode);
+                       tn = resize(t, inode);
        }
+
+       return tp;
 }
 
-static int inflate(struct trie *t, struct tnode *oldtnode)
+static struct key_vector *inflate(struct trie *t,
+                                 struct key_vector *oldtnode)
 {
-       struct tnode *tn;
+       struct key_vector *tn;
        unsigned long i;
        t_key m;
 
@@ -504,7 +512,7 @@ static int inflate(struct trie *t, struct tnode *oldtnode)
 
        tn = tnode_new(oldtnode->key, oldtnode->pos - 1, oldtnode->bits + 1);
        if (!tn)
-               return -ENOMEM;
+               goto notnode;
 
        /* prepare oldtnode to be freed */
        tnode_free_init(oldtnode);
@@ -514,13 +522,13 @@ static int inflate(struct trie *t, struct tnode *oldtnode)
         * point to existing tnodes and the links between our allocated
         * nodes.
         */
-       for (i = tnode_child_length(oldtnode), m = 1u << tn->pos; i;) {
-               struct tnode *inode = tnode_get_child(oldtnode, --i);
-               struct tnode *node0, *node1;
+       for (i = child_length(oldtnode), m = 1u << tn->pos; i;) {
+               struct key_vector *inode = get_child(oldtnode, --i);
+               struct key_vector *node0, *node1;
                unsigned long j, k;
 
                /* An empty child */
-               if (inode == NULL)
+               if (!inode)
                        continue;
 
                /* A leaf or an internal node with skipped bits */
@@ -534,8 +542,8 @@ static int inflate(struct trie *t, struct tnode *oldtnode)
 
                /* An internal node with two children */
                if (inode->bits == 1) {
-                       put_child(tn, 2 * i + 1, tnode_get_child(inode, 1));
-                       put_child(tn, 2 * i, tnode_get_child(inode, 0));
+                       put_child(tn, 2 * i + 1, get_child(inode, 1));
+                       put_child(tn, 2 * i, get_child(inode, 0));
                        continue;
                }
 
@@ -564,11 +572,11 @@ static int inflate(struct trie *t, struct tnode *oldtnode)
                tnode_free_append(tn, node0);
 
                /* populate child pointers in new nodes */
-               for (k = tnode_child_length(inode), j = k / 2; j;) {
-                       put_child(node1, --j, tnode_get_child(inode, --k));
-                       put_child(node0, j, tnode_get_child(inode, j));
-                       put_child(node1, --j, tnode_get_child(inode, --k));
-                       put_child(node0, j, tnode_get_child(inode, j));
+               for (k = child_length(inode), j = k / 2; j;) {
+                       put_child(node1, --j, get_child(inode, --k));
+                       put_child(node0, j, get_child(inode, j));
+                       put_child(node1, --j, get_child(inode, --k));
+                       put_child(node0, j, get_child(inode, j));
                }
 
                /* link new nodes to parent */
@@ -581,25 +589,25 @@ static int inflate(struct trie *t, struct tnode *oldtnode)
        }
 
        /* setup the parent pointers into and out of this node */
-       replace(t, oldtnode, tn);
-
-       return 0;
+       return replace(t, oldtnode, tn);
 nomem:
        /* all pointers should be clean so we are done */
        tnode_free(tn);
-       return -ENOMEM;
+notnode:
+       return NULL;
 }
 
-static int halve(struct trie *t, struct tnode *oldtnode)
+static struct key_vector *halve(struct trie *t,
+                               struct key_vector *oldtnode)
 {
-       struct tnode *tn;
+       struct key_vector *tn;
        unsigned long i;
 
        pr_debug("In halve\n");
 
        tn = tnode_new(oldtnode->key, oldtnode->pos + 1, oldtnode->bits - 1);
        if (!tn)
-               return -ENOMEM;
+               goto notnode;
 
        /* prepare oldtnode to be freed */
        tnode_free_init(oldtnode);
@@ -609,10 +617,10 @@ static int halve(struct trie *t, struct tnode *oldtnode)
         * point to existing tnodes and the links between our allocated
         * nodes.
         */
-       for (i = tnode_child_length(oldtnode); i;) {
-               struct tnode *node1 = tnode_get_child(oldtnode, --i);
-               struct tnode *node0 = tnode_get_child(oldtnode, --i);
-               struct tnode *inode;
+       for (i = child_length(oldtnode); i;) {
+               struct key_vector *node1 = get_child(oldtnode, --i);
+               struct key_vector *node0 = get_child(oldtnode, --i);
+               struct key_vector *inode;
 
                /* At least one of the children is empty */
                if (!node1 || !node0) {
@@ -622,10 +630,8 @@ static int halve(struct trie *t, struct tnode *oldtnode)
 
                /* Two nonempty children */
                inode = tnode_new(node0->key, oldtnode->pos, 1);
-               if (!inode) {
-                       tnode_free(tn);
-                       return -ENOMEM;
-               }
+               if (!inode)
+                       goto nomem;
                tnode_free_append(tn, inode);
 
                /* initialize pointers out of node */
@@ -638,30 +644,36 @@ static int halve(struct trie *t, struct tnode *oldtnode)
        }
 
        /* setup the parent pointers into and out of this node */
-       replace(t, oldtnode, tn);
-
-       return 0;
+       return replace(t, oldtnode, tn);
+nomem:
+       /* all pointers should be clean so we are done */
+       tnode_free(tn);
+notnode:
+       return NULL;
 }
 
-static void collapse(struct trie *t, struct tnode *oldtnode)
+static struct key_vector *collapse(struct trie *t,
+                                  struct key_vector *oldtnode)
 {
-       struct tnode *n, *tp;
+       struct key_vector *n, *tp;
        unsigned long i;
 
        /* scan the tnode looking for that one child that might still exist */
-       for (n = NULL, i = tnode_child_length(oldtnode); !n && i;)
-               n = tnode_get_child(oldtnode, --i);
+       for (n = NULL, i = child_length(oldtnode); !n && i;)
+               n = get_child(oldtnode, --i);
 
        /* compress one level */
        tp = node_parent(oldtnode);
-       put_child_root(tp, t, oldtnode->key, n);
+       put_child_root(tp, oldtnode->key, n);
        node_set_parent(n, tp);
 
        /* drop dead node */
        node_free(oldtnode);
+
+       return tp;
 }
 
-static unsigned char update_suffix(struct tnode *tn)
+static unsigned char update_suffix(struct key_vector *tn)
 {
        unsigned char slen = tn->pos;
        unsigned long stride, i;
@@ -671,8 +683,8 @@ static unsigned char update_suffix(struct tnode *tn)
         * why we start with a stride of 2 since a stride of 1 would
         * represent the nodes with suffix length equal to tn->pos
         */
-       for (i = 0, stride = 0x2ul ; i < tnode_child_length(tn); i += stride) {
-               struct tnode *n = tnode_get_child(tn, i);
+       for (i = 0, stride = 0x2ul ; i < child_length(tn); i += stride) {
+               struct key_vector *n = get_child(tn, i);
 
                if (!n || (n->slen <= slen))
                        continue;
@@ -704,12 +716,12 @@ static unsigned char update_suffix(struct tnode *tn)
  *
  * 'high' in this instance is the variable 'inflate_threshold'. It
  * is expressed as a percentage, so we multiply it with
- * tnode_child_length() and instead of multiplying by 2 (since the
+ * child_length() and instead of multiplying by 2 (since the
  * child array will be doubled by inflate()) and multiplying
  * the left-hand side by 100 (to handle the percentage thing) we
  * multiply the left-hand side by 50.
  *
- * The left-hand side may look a bit weird: tnode_child_length(tn)
+ * The left-hand side may look a bit weird: child_length(tn)
  * - tn->empty_children is of course the number of non-null children
  * in the current node. tn->full_children is the number of "full"
  * children, that is non-null tnodes with a skip value of 0.
@@ -719,10 +731,10 @@ static unsigned char update_suffix(struct tnode *tn)
  * A clearer way to write this would be:
  *
  * to_be_doubled = tn->full_children;
- * not_to_be_doubled = tnode_child_length(tn) - tn->empty_children -
+ * not_to_be_doubled = child_length(tn) - tn->empty_children -
  *     tn->full_children;
  *
- * new_child_length = tnode_child_length(tn) * 2;
+ * new_child_length = child_length(tn) * 2;
  *
  * new_fill_factor = 100 * (not_to_be_doubled + 2*to_be_doubled) /
  *      new_child_length;
@@ -739,57 +751,57 @@ static unsigned char update_suffix(struct tnode *tn)
  *      inflate_threshold * new_child_length
  *
  * expand not_to_be_doubled and to_be_doubled, and shorten:
- * 100 * (tnode_child_length(tn) - tn->empty_children +
+ * 100 * (child_length(tn) - tn->empty_children +
  *    tn->full_children) >= inflate_threshold * new_child_length
  *
  * expand new_child_length:
- * 100 * (tnode_child_length(tn) - tn->empty_children +
+ * 100 * (child_length(tn) - tn->empty_children +
  *    tn->full_children) >=
- *      inflate_threshold * tnode_child_length(tn) * 2
+ *      inflate_threshold * child_length(tn) * 2
  *
  * shorten again:
- * 50 * (tn->full_children + tnode_child_length(tn) -
+ * 50 * (tn->full_children + child_length(tn) -
  *    tn->empty_children) >= inflate_threshold *
- *    tnode_child_length(tn)
+ *    child_length(tn)
  *
  */
-static bool should_inflate(const struct tnode *tp, const struct tnode *tn)
+static inline bool should_inflate(struct key_vector *tp, struct key_vector *tn)
 {
-       unsigned long used = tnode_child_length(tn);
+       unsigned long used = child_length(tn);
        unsigned long threshold = used;
 
        /* Keep root node larger */
-       threshold *= tp ? inflate_threshold : inflate_threshold_root;
-       used -= tn->empty_children;
-       used += tn->full_children;
+       threshold *= IS_TRIE(tp) ? inflate_threshold_root : inflate_threshold;
+       used -= tn_info(tn)->empty_children;
+       used += tn_info(tn)->full_children;
 
        /* if bits == KEYLENGTH then pos = 0, and will fail below */
 
        return (used > 1) && tn->pos && ((50 * used) >= threshold);
 }
 
-static bool should_halve(const struct tnode *tp, const struct tnode *tn)
+static inline bool should_halve(struct key_vector *tp, struct key_vector *tn)
 {
-       unsigned long used = tnode_child_length(tn);
+       unsigned long used = child_length(tn);
        unsigned long threshold = used;
 
        /* Keep root node larger */
-       threshold *= tp ? halve_threshold : halve_threshold_root;
-       used -= tn->empty_children;
+       threshold *= IS_TRIE(tp) ? halve_threshold_root : halve_threshold;
+       used -= tn_info(tn)->empty_children;
 
        /* if bits == KEYLENGTH then used = 100% on wrap, and will fail below */
 
        return (used > 1) && (tn->bits > 1) && ((100 * used) < threshold);
 }
 
-static bool should_collapse(const struct tnode *tn)
+static inline bool should_collapse(struct key_vector *tn)
 {
-       unsigned long used = tnode_child_length(tn);
+       unsigned long used = child_length(tn);
 
-       used -= tn->empty_children;
+       used -= tn_info(tn)->empty_children;
 
        /* account for bits == KEYLENGTH case */
-       if ((tn->bits == KEYLENGTH) && tn->full_children)
+       if ((tn->bits == KEYLENGTH) && tn_info(tn)->full_children)
                used -= KEY_MAX;
 
        /* One child or none, time to drop us from the trie */
@@ -797,10 +809,13 @@ static bool should_collapse(const struct tnode *tn)
 }
 
 #define MAX_WORK 10
-static void resize(struct trie *t, struct tnode *tn)
+static struct key_vector *resize(struct trie *t, struct key_vector *tn)
 {
-       struct tnode *tp = node_parent(tn);
-       struct tnode __rcu **cptr;
+#ifdef CONFIG_IP_FIB_TRIE_STATS
+       struct trie_use_stats __percpu *stats = t->stats;
+#endif
+       struct key_vector *tp = node_parent(tn);
+       unsigned long cindex = get_index(tn->key, tp);
        int max_work = MAX_WORK;
 
        pr_debug("In tnode_resize %p inflate_threshold=%d threshold=%d\n",
@@ -810,183 +825,128 @@ static void resize(struct trie *t, struct tnode *tn)
         * doing it ourselves.  This way we can let RCU fully do its
         * thing without us interfering
         */
-       cptr = tp ? &tp->child[get_index(tn->key, tp)] : &t->trie;
-       BUG_ON(tn != rtnl_dereference(*cptr));
+       BUG_ON(tn != get_child(tp, cindex));
 
        /* Double as long as the resulting node has a number of
         * nonempty nodes that are above the threshold.
         */
        while (should_inflate(tp, tn) && max_work) {
-               if (inflate(t, tn)) {
+               tp = inflate(t, tn);
+               if (!tp) {
 #ifdef CONFIG_IP_FIB_TRIE_STATS
-                       this_cpu_inc(t->stats->resize_node_skipped);
+                       this_cpu_inc(stats->resize_node_skipped);
 #endif
                        break;
                }
 
                max_work--;
-               tn = rtnl_dereference(*cptr);
+               tn = get_child(tp, cindex);
        }
 
+       /* update parent in case inflate failed */
+       tp = node_parent(tn);
+
        /* Return if at least one inflate is run */
        if (max_work != MAX_WORK)
-               return;
+               return tp;
 
        /* Halve as long as the number of empty children in this
         * node is above threshold.
         */
        while (should_halve(tp, tn) && max_work) {
-               if (halve(t, tn)) {
+               tp = halve(t, tn);
+               if (!tp) {
 #ifdef CONFIG_IP_FIB_TRIE_STATS
-                       this_cpu_inc(t->stats->resize_node_skipped);
+                       this_cpu_inc(stats->resize_node_skipped);
 #endif
                        break;
                }
 
                max_work--;
-               tn = rtnl_dereference(*cptr);
+               tn = get_child(tp, cindex);
        }
 
        /* Only one child remains */
-       if (should_collapse(tn)) {
-               collapse(t, tn);
-               return;
-       }
+       if (should_collapse(tn))
+               return collapse(t, tn);
+
+       /* update parent in case halve failed */
+       tp = node_parent(tn);
 
        /* Return if at least one deflate was run */
        if (max_work != MAX_WORK)
-               return;
+               return tp;
 
        /* push the suffix length to the parent node */
        if (tn->slen > tn->pos) {
                unsigned char slen = update_suffix(tn);
 
-               if (tp && (slen > tp->slen))
+               if (slen > tp->slen)
                        tp->slen = slen;
        }
-}
-
-/* readside must use rcu_read_lock currently dump routines
- via get_fa_head and dump */
-
-static struct leaf_info *find_leaf_info(struct tnode *l, int plen)
-{
-       struct hlist_head *head = &l->list;
-       struct leaf_info *li;
-
-       hlist_for_each_entry_rcu(li, head, hlist)
-               if (li->plen == plen)
-                       return li;
-
-       return NULL;
-}
-
-static inline struct list_head *get_fa_head(struct tnode *l, int plen)
-{
-       struct leaf_info *li = find_leaf_info(l, plen);
-
-       if (!li)
-               return NULL;
 
-       return &li->falh;
+       return tp;
 }
 
-static void leaf_pull_suffix(struct tnode *l)
+static void leaf_pull_suffix(struct key_vector *tp, struct key_vector *l)
 {
-       struct tnode *tp = node_parent(l);
-
-       while (tp && (tp->slen > tp->pos) && (tp->slen > l->slen)) {
+       while ((tp->slen > tp->pos) && (tp->slen > l->slen)) {
                if (update_suffix(tp) > l->slen)
                        break;
                tp = node_parent(tp);
        }
 }
 
-static void leaf_push_suffix(struct tnode *l)
+static void leaf_push_suffix(struct key_vector *tn, struct key_vector *l)
 {
-       struct tnode *tn = node_parent(l);
-
        /* if this is a new leaf then tn will be NULL and we can sort
         * out parent suffix lengths as a part of trie_rebalance
         */
-       while (tn && (tn->slen < l->slen)) {
+       while (tn->slen < l->slen) {
                tn->slen = l->slen;
                tn = node_parent(tn);
        }
 }
 
-static void remove_leaf_info(struct tnode *l, struct leaf_info *old)
-{
-       /* record the location of the previous list_info entry */
-       struct hlist_node **pprev = old->hlist.pprev;
-       struct leaf_info *li = hlist_entry(pprev, typeof(*li), hlist.next);
-
-       /* remove the leaf info from the list */
-       hlist_del_rcu(&old->hlist);
-
-       /* only access li if it is pointing at the last valid hlist_node */
-       if (hlist_empty(&l->list) || (*pprev))
-               return;
-
-       /* update the trie with the latest suffix length */
-       l->slen = KEYLENGTH - li->plen;
-       leaf_pull_suffix(l);
-}
-
-static void insert_leaf_info(struct tnode *l, struct leaf_info *new)
+/* rcu_read_lock needs to be hold by caller from readside */
+static struct key_vector *fib_find_node(struct trie *t,
+                                       struct key_vector **tp, u32 key)
 {
-       struct hlist_head *head = &l->list;
-       struct leaf_info *li = NULL, *last = NULL;
+       struct key_vector *pn, *n = t->kv;
+       unsigned long index = 0;
 
-       if (hlist_empty(head)) {
-               hlist_add_head_rcu(&new->hlist, head);
-       } else {
-               hlist_for_each_entry(li, head, hlist) {
-                       if (new->plen > li->plen)
-                               break;
-
-                       last = li;
-               }
-               if (last)
-                       hlist_add_behind_rcu(&new->hlist, &last->hlist);
-               else
-                       hlist_add_before_rcu(&new->hlist, &li->hlist);
-       }
-
-       /* if we added to the tail node then we need to update slen */
-       if (l->slen < (KEYLENGTH - new->plen)) {
-               l->slen = KEYLENGTH - new->plen;
-               leaf_push_suffix(l);
-       }
-}
+       do {
+               pn = n;
+               n = get_child_rcu(n, index);
 
-/* rcu_read_lock needs to be hold by caller from readside */
-static struct tnode *fib_find_node(struct trie *t, u32 key)
-{
-       struct tnode *n = rcu_dereference_rtnl(t->trie);
+               if (!n)
+                       break;
 
-       while (n) {
-               unsigned long index = get_index(key, n);
+               index = get_cindex(key, n);
 
                /* This bit of code is a bit tricky but it combines multiple
                 * checks into a single check.  The prefix consists of the
                 * prefix plus zeros for the bits in the cindex. The index
                 * is the difference between the key and this value.  From
                 * this we can actually derive several pieces of data.
-                *   if (index & (~0ul << bits))
+                *   if (index >= (1ul << bits))
                 *     we have a mismatch in skip bits and failed
                 *   else
                 *     we know the value is cindex
+                *
+                * This check is safe even if bits == KEYLENGTH due to the
+                * fact that we can only allocate a node with 32 bits if a
+                * long is greater than 32 bits.
                 */
-               if (index & (~0ul << n->bits))
-                       return NULL;
-
-               /* we have found a leaf. Prefixes have already been compared */
-               if (IS_LEAF(n))
+               if (index >= (1ul << n->bits)) {
+                       n = NULL;
                        break;
+               }
 
-               n = tnode_get_child_rcu(n, index);
-       }
+               /* keep searching until we find a perfect match leaf or NULL */
+       } while (IS_TNODE(n));
+
+       *tp = pn;
 
        return n;
 }
@@ -994,14 +954,23 @@ static struct tnode *fib_find_node(struct trie *t, u32 key)
 /* Return the first fib alias matching TOS with
  * priority less than or equal to PRIO.
  */
-static struct fib_alias *fib_find_alias(struct list_head *fah, u8 tos, u32 prio)
+static struct fib_alias *fib_find_alias(struct hlist_head *fah, u8 slen,
+                                       u8 tos, u32 prio, u32 tb_id)
 {
        struct fib_alias *fa;
 
        if (!fah)
                return NULL;
 
-       list_for_each_entry(fa, fah, fa_list) {
+       hlist_for_each_entry(fa, fah, fa_list) {
+               if (fa->fa_slen < slen)
+                       continue;
+               if (fa->fa_slen != slen)
+                       break;
+               if (fa->tb_id > tb_id)
+                       continue;
+               if (fa->tb_id != tb_id)
+                       break;
                if (fa->fa_tos > tos)
                        continue;
                if (fa->fa_info->fib_priority >= prio || fa->fa_tos < tos)
@@ -1011,77 +980,23 @@ static struct fib_alias *fib_find_alias(struct list_head *fah, u8 tos, u32 prio)
        return NULL;
 }
 
-static void trie_rebalance(struct trie *t, struct tnode *tn)
+static void trie_rebalance(struct trie *t, struct key_vector *tn)
 {
-       struct tnode *tp;
-
-       while ((tp = node_parent(tn)) != NULL) {
-               resize(t, tn);
-               tn = tp;
-       }
-
-       /* Handle last (top) tnode */
-       if (IS_TNODE(tn))
-               resize(t, tn);
+       while (!IS_TRIE(tn))
+               tn = resize(t, tn);
 }
 
-/* only used from updater-side */
-
-static struct list_head *fib_insert_node(struct trie *t, u32 key, int plen)
+static int fib_insert_node(struct trie *t, struct key_vector *tp,
+                          struct fib_alias *new, t_key key)
 {
-       struct list_head *fa_head = NULL;
-       struct tnode *l, *n, *tp = NULL;
-       struct leaf_info *li;
-
-       li = leaf_info_new(plen);
-       if (!li)
-               return NULL;
-       fa_head = &li->falh;
+       struct key_vector *n, *l;
 
-       n = rtnl_dereference(t->trie);
-
-       /* If we point to NULL, stop. Either the tree is empty and we should
-        * just put a new leaf in if, or we have reached an empty child slot,
-        * and we should just put our new leaf in that.
-        *
-        * If we hit a node with a key that does't match then we should stop
-        * and create a new tnode to replace that node and insert ourselves
-        * and the other node into the new tnode.
-        */
-       while (n) {
-               unsigned long index = get_index(key, n);
-
-               /* This bit of code is a bit tricky but it combines multiple
-                * checks into a single check.  The prefix consists of the
-                * prefix plus zeros for the "bits" in the prefix. The index
-                * is the difference between the key and this value.  From
-                * this we can actually derive several pieces of data.
-                *   if !(index >> bits)
-                *     we know the value is child index
-                *   else
-                *     we have a mismatch in skip bits and failed
-                */
-               if (index >> n->bits)
-                       break;
-
-               /* we have found a leaf. Prefixes have already been compared */
-               if (IS_LEAF(n)) {
-                       /* Case 1: n is a leaf, and prefixes match*/
-                       insert_leaf_info(n, li);
-                       return fa_head;
-               }
-
-               tp = n;
-               n = tnode_get_child_rcu(n, index);
-       }
-
-       l = leaf_new(key);
-       if (!l) {
-               free_leaf_info(li);
-               return NULL;
-       }
+       l = leaf_new(key, new);
+       if (!l)
+               goto noleaf;
 
-       insert_leaf_info(l, li);
+       /* retrieve child from parent node */
+       n = get_child(tp, get_index(key, tp));
 
        /* Case 2: n is a LEAF or a TNODE and the key doesn't match.
         *
@@ -1090,21 +1005,18 @@ static struct list_head *fib_insert_node(struct trie *t, u32 key, int plen)
         *  leaves us in position for handling as case 3
         */
        if (n) {
-               struct tnode *tn;
+               struct key_vector *tn;
 
                tn = tnode_new(key, __fls(key ^ n->key), 1);
-               if (!tn) {
-                       free_leaf_info(li);
-                       node_free(l);
-                       return NULL;
-               }
+               if (!tn)
+                       goto notnode;
 
                /* initialize routes out of node */
                NODE_INIT_PARENT(tn, tp);
                put_child(tn, get_index(key, tn) ^ 1, n);
 
                /* start adding routes into the node */
-               put_child_root(tp, t, key, tn);
+               put_child_root(tp, key, tn);
                node_set_parent(n, tn);
 
                /* parent now has a NULL spot where the leaf can go */
@@ -1112,69 +1024,93 @@ static struct list_head *fib_insert_node(struct trie *t, u32 key, int plen)
        }
 
        /* Case 3: n is NULL, and will just insert a new leaf */
-       if (tp) {
-               NODE_INIT_PARENT(l, tp);
-               put_child(tp, get_index(key, tp), l);
-               trie_rebalance(t, tp);
+       NODE_INIT_PARENT(l, tp);
+       put_child_root(tp, key, l);
+       trie_rebalance(t, tp);
+
+       return 0;
+notnode:
+       node_free(l);
+noleaf:
+       return -ENOMEM;
+}
+
+static int fib_insert_alias(struct trie *t, struct key_vector *tp,
+                           struct key_vector *l, struct fib_alias *new,
+                           struct fib_alias *fa, t_key key)
+{
+       if (!l)
+               return fib_insert_node(t, tp, new, key);
+
+       if (fa) {
+               hlist_add_before_rcu(&new->fa_list, &fa->fa_list);
        } else {
-               rcu_assign_pointer(t->trie, l);
+               struct fib_alias *last;
+
+               hlist_for_each_entry(last, &l->leaf, fa_list) {
+                       if (new->fa_slen < last->fa_slen)
+                               break;
+                       if ((new->fa_slen == last->fa_slen) &&
+                           (new->tb_id > last->tb_id))
+                               break;
+                       fa = last;
+               }
+
+               if (fa)
+                       hlist_add_behind_rcu(&new->fa_list, &fa->fa_list);
+               else
+                       hlist_add_head_rcu(&new->fa_list, &l->leaf);
        }
 
-       return fa_head;
+       /* if we added to the tail node then we need to update slen */
+       if (l->slen < new->fa_slen) {
+               l->slen = new->fa_slen;
+               leaf_push_suffix(tp, l);
+       }
+
+       return 0;
 }
 
-/*
- * Caller must hold RTNL.
- */
+/* Caller must hold RTNL. */
 int fib_table_insert(struct fib_table *tb, struct fib_config *cfg)
 {
-       struct trie *t = (struct trie *) tb->tb_data;
+       struct trie *t = (struct trie *)tb->tb_data;
        struct fib_alias *fa, *new_fa;
-       struct list_head *fa_head = NULL;
+       struct key_vector *l, *tp;
        struct fib_info *fi;
-       int plen = cfg->fc_dst_len;
+       u8 plen = cfg->fc_dst_len;
+       u8 slen = KEYLENGTH - plen;
        u8 tos = cfg->fc_tos;
-       u32 key, mask;
+       u32 key;
        int err;
-       struct tnode *l;
 
-       if (plen > 32)
+       if (plen > KEYLENGTH)
                return -EINVAL;
 
        key = ntohl(cfg->fc_dst);
 
        pr_debug("Insert table=%u %08x/%d\n", tb->tb_id, key, plen);
 
-       mask = ntohl(inet_make_mask(plen));
-
-       if (key & ~mask)
+       if ((plen < KEYLENGTH) && (key << plen))
                return -EINVAL;
 
-       key = key & mask;
-
        fi = fib_create_info(cfg);
        if (IS_ERR(fi)) {
                err = PTR_ERR(fi);
                goto err;
        }
 
-       l = fib_find_node(t, key);
-       fa = NULL;
-
-       if (l) {
-               fa_head = get_fa_head(l, plen);
-               fa = fib_find_alias(fa_head, tos, fi->fib_priority);
-       }
+       l = fib_find_node(t, &tp, key);
+       fa = l ? fib_find_alias(&l->leaf, slen, tos, fi->fib_priority,
+                               tb->tb_id) : NULL;
 
        /* Now fa, if non-NULL, points to the first fib alias
         * with the same keys [prefix,tos,priority], if such key already
         * exists or to the node before which we will insert new one.
         *
         * If fa is NULL, we will need to allocate a new one and
-        * insert to the head of f.
-        *
-        * If f is NULL, no fib node matched the destination key
-        * and we need to allocate a new one of those as well.
+        * insert to the tail of the section matching the suffix length
+        * of the new alias.
         */
 
        if (fa && fa->fa_tos == tos &&
@@ -1192,9 +1128,10 @@ int fib_table_insert(struct fib_table *tb, struct fib_config *cfg)
                 */
                fa_match = NULL;
                fa_first = fa;
-               fa = list_entry(fa->fa_list.prev, struct fib_alias, fa_list);
-               list_for_each_entry_continue(fa, fa_head, fa_list) {
-                       if (fa->fa_tos != tos)
+               hlist_for_each_entry_from(fa, fa_list) {
+                       if ((fa->fa_slen != slen) ||
+                           (fa->tb_id != tb->tb_id) ||
+                           (fa->fa_tos != tos))
                                break;
                        if (fa->fa_info->fib_priority != fi->fib_priority)
                                break;
@@ -1217,7 +1154,7 @@ int fib_table_insert(struct fib_table *tb, struct fib_config *cfg)
                        }
                        err = -ENOBUFS;
                        new_fa = kmem_cache_alloc(fn_alias_kmem, GFP_KERNEL);
-                       if (new_fa == NULL)
+                       if (!new_fa)
                                goto out;
 
                        fi_drop = fa->fa_info;
@@ -1226,8 +1163,21 @@ int fib_table_insert(struct fib_table *tb, struct fib_config *cfg)
                        new_fa->fa_type = cfg->fc_type;
                        state = fa->fa_state;
                        new_fa->fa_state = state & ~FA_S_ACCESSED;
+                       new_fa->fa_slen = fa->fa_slen;
+
+                       err = netdev_switch_fib_ipv4_add(key, plen, fi,
+                                                        new_fa->fa_tos,
+                                                        cfg->fc_type,
+                                                        cfg->fc_nlflags,
+                                                        tb->tb_id);
+                       if (err) {
+                               netdev_switch_fib_ipv4_abort(fi);
+                               kmem_cache_free(fn_alias_kmem, new_fa);
+                               goto out;
+                       }
+
+                       hlist_replace_rcu(&fa->fa_list, &new_fa->fa_list);
 
-                       list_replace_rcu(&fa->fa_list, &new_fa->fa_list);
                        alias_free_mem_rcu(fa);
 
                        fib_release_info(fi_drop);
@@ -1254,37 +1204,42 @@ int fib_table_insert(struct fib_table *tb, struct fib_config *cfg)
 
        err = -ENOBUFS;
        new_fa = kmem_cache_alloc(fn_alias_kmem, GFP_KERNEL);
-       if (new_fa == NULL)
+       if (!new_fa)
                goto out;
 
        new_fa->fa_info = fi;
        new_fa->fa_tos = tos;
        new_fa->fa_type = cfg->fc_type;
        new_fa->fa_state = 0;
-       /*
-        * Insert new entry to the list.
-        */
-
-       if (!fa_head) {
-               fa_head = fib_insert_node(t, key, plen);
-               if (unlikely(!fa_head)) {
-                       err = -ENOMEM;
-                       goto out_free_new_fa;
-               }
+       new_fa->fa_slen = slen;
+       new_fa->tb_id = tb->tb_id;
+
+       /* (Optionally) offload fib entry to switch hardware. */
+       err = netdev_switch_fib_ipv4_add(key, plen, fi, tos,
+                                        cfg->fc_type,
+                                        cfg->fc_nlflags,
+                                        tb->tb_id);
+       if (err) {
+               netdev_switch_fib_ipv4_abort(fi);
+               goto out_free_new_fa;
        }
 
+       /* Insert new entry to the list. */
+       err = fib_insert_alias(t, tp, l, new_fa, fa, key);
+       if (err)
+               goto out_sw_fib_del;
+
        if (!plen)
                tb->tb_num_default++;
 
-       list_add_tail_rcu(&new_fa->fa_list,
-                         (fa ? &fa->fa_list : fa_head));
-
        rt_cache_flush(cfg->fc_nlinfo.nl_net);
-       rtmsg_fib(RTM_NEWROUTE, htonl(key), new_fa, plen, tb->tb_id,
+       rtmsg_fib(RTM_NEWROUTE, htonl(key), new_fa, plen, new_fa->tb_id,
                  &cfg->fc_nlinfo, 0);
 succeeded:
        return 0;
 
+out_sw_fib_del:
+       netdev_switch_fib_ipv4_del(key, plen, fi, tos, cfg->fc_type, tb->tb_id);
 out_free_new_fa:
        kmem_cache_free(fn_alias_kmem, new_fa);
 out:
@@ -1293,7 +1248,7 @@ err:
        return err;
 }
 
-static inline t_key prefix_mismatch(t_key key, struct tnode *n)
+static inline t_key prefix_mismatch(t_key key, struct key_vector *n)
 {
        t_key prefix = n->key;
 
@@ -1304,16 +1259,20 @@ static inline t_key prefix_mismatch(t_key key, struct tnode *n)
 int fib_table_lookup(struct fib_table *tb, const struct flowi4 *flp,
                     struct fib_result *res, int fib_flags)
 {
-       struct trie *t = (struct trie *)tb->tb_data;
+       struct trie *t = (struct trie *) tb->tb_data;
 #ifdef CONFIG_IP_FIB_TRIE_STATS
        struct trie_use_stats __percpu *stats = t->stats;
 #endif
        const t_key key = ntohl(flp->daddr);
-       struct tnode *n, *pn;
-       struct leaf_info *li;
+       struct key_vector *n, *pn;
+       struct fib_alias *fa;
+       unsigned long index;
        t_key cindex;
 
-       n = rcu_dereference(t->trie);
+       pn = t->kv;
+       cindex = 0;
+
+       n = get_child_rcu(pn, cindex);
        if (!n)
                return -EAGAIN;
 
@@ -1321,24 +1280,25 @@ int fib_table_lookup(struct fib_table *tb, const struct flowi4 *flp,
        this_cpu_inc(stats->gets);
 #endif
 
-       pn = n;
-       cindex = 0;
-
        /* Step 1: Travel to the longest prefix match in the trie */
        for (;;) {
-               unsigned long index = get_index(key, n);
+               index = get_cindex(key, n);
 
                /* This bit of code is a bit tricky but it combines multiple
                 * checks into a single check.  The prefix consists of the
                 * prefix plus zeros for the "bits" in the prefix. The index
                 * is the difference between the key and this value.  From
                 * this we can actually derive several pieces of data.
-                *   if (index & (~0ul << bits))
+                *   if (index >= (1ul << bits))
                 *     we have a mismatch in skip bits and failed
                 *   else
                 *     we know the value is cindex
+                *
+                * This check is safe even if bits == KEYLENGTH due to the
+                * fact that we can only allocate a node with 32 bits if a
+                * long is greater than 32 bits.
                 */
-               if (index & (~0ul << n->bits))
+               if (index >= (1ul << n->bits))
                        break;
 
                /* we have found a leaf. Prefixes have already been compared */
@@ -1353,7 +1313,7 @@ int fib_table_lookup(struct fib_table *tb, const struct flowi4 *flp,
                        cindex = index;
                }
 
-               n = tnode_get_child_rcu(n, index);
+               n = get_child_rcu(n, index);
                if (unlikely(!n))
                        goto backtrace;
        }
@@ -1361,7 +1321,7 @@ int fib_table_lookup(struct fib_table *tb, const struct flowi4 *flp,
        /* Step 2: Sort out leaves and begin backtracing for longest prefix */
        for (;;) {
                /* record the pointer where our next node pointer is stored */
-               struct tnode __rcu **cptr = n->child;
+               struct key_vector __rcu **cptr = n->tnode;
 
                /* This test verifies that none of the bits that differ
                 * between the key and the prefix exist in the region of
@@ -1393,13 +1353,17 @@ backtrace:
                        while (!cindex) {
                                t_key pkey = pn->key;
 
-                               pn = node_parent_rcu(pn);
-                               if (unlikely(!pn))
+                               /* If we don't have a parent then there is
+                                * nothing for us to do as we do not have any
+                                * further nodes to parse.
+                                */
+                               if (IS_TRIE(pn))
                                        return -EAGAIN;
 #ifdef CONFIG_IP_FIB_TRIE_STATS
                                this_cpu_inc(stats->backtrack);
 #endif
                                /* Get Child's index */
+                               pn = node_parent_rcu(pn);
                                cindex = get_index(pkey, pn);
                        }
 
@@ -1407,138 +1371,134 @@ backtrace:
                        cindex &= cindex - 1;
 
                        /* grab pointer for next child node */
-                       cptr = &pn->child[cindex];
+                       cptr = &pn->tnode[cindex];
                }
        }
 
 found:
+       /* this line carries forward the xor from earlier in the function */
+       index = key ^ n->key;
+
        /* Step 3: Process the leaf, if that fails fall back to backtracing */
-       hlist_for_each_entry_rcu(li, &n->list, hlist) {
-               struct fib_alias *fa;
+       hlist_for_each_entry_rcu(fa, &n->leaf, fa_list) {
+               struct fib_info *fi = fa->fa_info;
+               int nhsel, err;
 
-               if ((key ^ n->key) & li->mask_plen)
+               if ((index >= (1ul << fa->fa_slen)) &&
+                   ((BITS_PER_LONG > KEYLENGTH) || (fa->fa_slen != KEYLENGTH)))
                        continue;
-
-               list_for_each_entry_rcu(fa, &li->falh, fa_list) {
-                       struct fib_info *fi = fa->fa_info;
-                       int nhsel, err;
-
-                       if (fa->fa_tos && fa->fa_tos != flp->flowi4_tos)
-                               continue;
-                       if (fi->fib_dead)
-                               continue;
-                       if (fa->fa_info->fib_scope < flp->flowi4_scope)
-                               continue;
-                       fib_alias_accessed(fa);
-                       err = fib_props[fa->fa_type].error;
-                       if (unlikely(err < 0)) {
+               if (fa->fa_tos && fa->fa_tos != flp->flowi4_tos)
+                       continue;
+               if (fi->fib_dead)
+                       continue;
+               if (fa->fa_info->fib_scope < flp->flowi4_scope)
+                       continue;
+               fib_alias_accessed(fa);
+               err = fib_props[fa->fa_type].error;
+               if (unlikely(err < 0)) {
 #ifdef CONFIG_IP_FIB_TRIE_STATS
-                               this_cpu_inc(stats->semantic_match_passed);
+                       this_cpu_inc(stats->semantic_match_passed);
 #endif
-                               return err;
-                       }
-                       if (fi->fib_flags & RTNH_F_DEAD)
+                       return err;
+               }
+               if (fi->fib_flags & RTNH_F_DEAD)
+                       continue;
+               for (nhsel = 0; nhsel < fi->fib_nhs; nhsel++) {
+                       const struct fib_nh *nh = &fi->fib_nh[nhsel];
+
+                       if (nh->nh_flags & RTNH_F_DEAD)
                                continue;
-                       for (nhsel = 0; nhsel < fi->fib_nhs; nhsel++) {
-                               const struct fib_nh *nh = &fi->fib_nh[nhsel];
-
-                               if (nh->nh_flags & RTNH_F_DEAD)
-                                       continue;
-                               if (flp->flowi4_oif && flp->flowi4_oif != nh->nh_oif)
-                                       continue;
-
-                               if (!(fib_flags & FIB_LOOKUP_NOREF))
-                                       atomic_inc(&fi->fib_clntref);
-
-                               res->prefixlen = li->plen;
-                               res->nh_sel = nhsel;
-                               res->type = fa->fa_type;
-                               res->scope = fi->fib_scope;
-                               res->fi = fi;
-                               res->table = tb;
-                               res->fa_head = &li->falh;
+                       if (flp->flowi4_oif && flp->flowi4_oif != nh->nh_oif)
+                               continue;
+
+                       if (!(fib_flags & FIB_LOOKUP_NOREF))
+                               atomic_inc(&fi->fib_clntref);
+
+                       res->prefixlen = KEYLENGTH - fa->fa_slen;
+                       res->nh_sel = nhsel;
+                       res->type = fa->fa_type;
+                       res->scope = fi->fib_scope;
+                       res->fi = fi;
+                       res->table = tb;
+                       res->fa_head = &n->leaf;
 #ifdef CONFIG_IP_FIB_TRIE_STATS
-                               this_cpu_inc(stats->semantic_match_passed);
+                       this_cpu_inc(stats->semantic_match_passed);
 #endif
-                               return err;
-                       }
+                       return err;
                }
-
+       }
 #ifdef CONFIG_IP_FIB_TRIE_STATS
-               this_cpu_inc(stats->semantic_match_miss);
+       this_cpu_inc(stats->semantic_match_miss);
 #endif
-       }
        goto backtrace;
 }
 EXPORT_SYMBOL_GPL(fib_table_lookup);
 
-/*
- * Remove the leaf and return parent.
- */
-static void trie_leaf_remove(struct trie *t, struct tnode *l)
+static void fib_remove_alias(struct trie *t, struct key_vector *tp,
+                            struct key_vector *l, struct fib_alias *old)
 {
-       struct tnode *tp = node_parent(l);
+       /* record the location of the previous list_info entry */
+       struct hlist_node **pprev = old->fa_list.pprev;
+       struct fib_alias *fa = hlist_entry(pprev, typeof(*fa), fa_list.next);
 
-       pr_debug("entering trie_leaf_remove(%p)\n", l);
+       /* remove the fib_alias from the list */
+       hlist_del_rcu(&old->fa_list);
 
-       if (tp) {
-               put_child(tp, get_index(l->key, tp), NULL);
+       /* if we emptied the list this leaf will be freed and we can sort
+        * out parent suffix lengths as a part of trie_rebalance
+        */
+       if (hlist_empty(&l->leaf)) {
+               put_child_root(tp, l->key, NULL);
+               node_free(l);
                trie_rebalance(t, tp);
-       } else {
-               RCU_INIT_POINTER(t->trie, NULL);
+               return;
        }
 
-       node_free(l);
+       /* only access fa if it is pointing at the last valid hlist_node */
+       if (*pprev)
+               return;
+
+       /* update the trie with the latest suffix length */
+       l->slen = fa->fa_slen;
+       leaf_pull_suffix(tp, l);
 }
 
-/*
- * Caller must hold RTNL.
- */
+/* Caller must hold RTNL. */
 int fib_table_delete(struct fib_table *tb, struct fib_config *cfg)
 {
        struct trie *t = (struct trie *) tb->tb_data;
-       u32 key, mask;
-       int plen = cfg->fc_dst_len;
-       u8 tos = cfg->fc_tos;
        struct fib_alias *fa, *fa_to_delete;
-       struct list_head *fa_head;
-       struct tnode *l;
-       struct leaf_info *li;
+       struct key_vector *l, *tp;
+       u8 plen = cfg->fc_dst_len;
+       u8 slen = KEYLENGTH - plen;
+       u8 tos = cfg->fc_tos;
+       u32 key;
 
-       if (plen > 32)
+       if (plen > KEYLENGTH)
                return -EINVAL;
 
        key = ntohl(cfg->fc_dst);
-       mask = ntohl(inet_make_mask(plen));
 
-       if (key & ~mask)
+       if ((plen < KEYLENGTH) && (key << plen))
                return -EINVAL;
 
-       key = key & mask;
-       l = fib_find_node(t, key);
-
+       l = fib_find_node(t, &tp, key);
        if (!l)
                return -ESRCH;
 
-       li = find_leaf_info(l, plen);
-
-       if (!li)
-               return -ESRCH;
-
-       fa_head = &li->falh;
-       fa = fib_find_alias(fa_head, tos, 0);
-
+       fa = fib_find_alias(&l->leaf, slen, tos, 0, tb->tb_id);
        if (!fa)
                return -ESRCH;
 
        pr_debug("Deleting %08x/%d tos=%d t=%p\n", key, plen, tos, t);
 
        fa_to_delete = NULL;
-       fa = list_entry(fa->fa_list.prev, struct fib_alias, fa_list);
-       list_for_each_entry_continue(fa, fa_head, fa_list) {
+       hlist_for_each_entry_from(fa, fa_list) {
                struct fib_info *fi = fa->fa_info;
 
-               if (fa->fa_tos != tos)
+               if ((fa->fa_slen != slen) ||
+                   (fa->tb_id != tb->tb_id) ||
+                   (fa->fa_tos != tos))
                        break;
 
                if ((!cfg->fc_type || fa->fa_type == cfg->fc_type) &&
@@ -1557,240 +1517,397 @@ int fib_table_delete(struct fib_table *tb, struct fib_config *cfg)
        if (!fa_to_delete)
                return -ESRCH;
 
-       fa = fa_to_delete;
-       rtmsg_fib(RTM_DELROUTE, htonl(key), fa, plen, tb->tb_id,
-                 &cfg->fc_nlinfo, 0);
+       netdev_switch_fib_ipv4_del(key, plen, fa_to_delete->fa_info, tos,
+                                  cfg->fc_type, tb->tb_id);
 
-       list_del_rcu(&fa->fa_list);
+       rtmsg_fib(RTM_DELROUTE, htonl(key), fa_to_delete, plen, tb->tb_id,
+                 &cfg->fc_nlinfo, 0);
 
        if (!plen)
                tb->tb_num_default--;
 
-       if (list_empty(fa_head)) {
-               remove_leaf_info(l, li);
-               free_leaf_info(li);
-       }
+       fib_remove_alias(t, tp, l, fa_to_delete);
 
-       if (hlist_empty(&l->list))
-               trie_leaf_remove(t, l);
-
-       if (fa->fa_state & FA_S_ACCESSED)
+       if (fa_to_delete->fa_state & FA_S_ACCESSED)
                rt_cache_flush(cfg->fc_nlinfo.nl_net);
 
-       fib_release_info(fa->fa_info);
-       alias_free_mem_rcu(fa);
+       fib_release_info(fa_to_delete->fa_info);
+       alias_free_mem_rcu(fa_to_delete);
        return 0;
 }
 
-static int trie_flush_list(struct list_head *head)
+/* Scan for the next leaf starting at the provided key value */
+static struct key_vector *leaf_walk_rcu(struct key_vector **tn, t_key key)
 {
-       struct fib_alias *fa, *fa_node;
-       int found = 0;
+       struct key_vector *pn, *n = *tn;
+       unsigned long cindex;
 
-       list_for_each_entry_safe(fa, fa_node, head, fa_list) {
-               struct fib_info *fi = fa->fa_info;
+       /* this loop is meant to try and find the key in the trie */
+       do {
+               /* record parent and next child index */
+               pn = n;
+               cindex = key ? get_index(key, pn) : 0;
 
-               if (fi && (fi->fib_flags & RTNH_F_DEAD)) {
-                       list_del_rcu(&fa->fa_list);
-                       fib_release_info(fa->fa_info);
-                       alias_free_mem_rcu(fa);
-                       found++;
+               if (cindex >> pn->bits)
+                       break;
+
+               /* descend into the next child */
+               n = get_child_rcu(pn, cindex++);
+               if (!n)
+                       break;
+
+               /* guarantee forward progress on the keys */
+               if (IS_LEAF(n) && (n->key >= key))
+                       goto found;
+       } while (IS_TNODE(n));
+
+       /* this loop will search for the next leaf with a greater key */
+       while (!IS_TRIE(pn)) {
+               /* if we exhausted the parent node we will need to climb */
+               if (cindex >= (1ul << pn->bits)) {
+                       t_key pkey = pn->key;
+
+                       pn = node_parent_rcu(pn);
+                       cindex = get_index(pkey, pn) + 1;
+                       continue;
                }
+
+               /* grab the next available node */
+               n = get_child_rcu(pn, cindex++);
+               if (!n)
+                       continue;
+
+               /* no need to compare keys since we bumped the index */
+               if (IS_LEAF(n))
+                       goto found;
+
+               /* Rescan start scanning in new node */
+               pn = n;
+               cindex = 0;
        }
-       return found;
+
+       *tn = pn;
+       return NULL; /* Root of trie */
+found:
+       /* if we are at the limit for keys just return NULL for the tnode */
+       *tn = pn;
+       return n;
 }
 
-static int trie_flush_leaf(struct tnode *l)
+static void fib_trie_free(struct fib_table *tb)
 {
-       int found = 0;
-       struct hlist_head *lih = &l->list;
+       struct trie *t = (struct trie *)tb->tb_data;
+       struct key_vector *pn = t->kv;
+       unsigned long cindex = 1;
        struct hlist_node *tmp;
-       struct leaf_info *li = NULL;
-       unsigned char plen = KEYLENGTH;
+       struct fib_alias *fa;
+
+       /* walk trie in reverse order and free everything */
+       for (;;) {
+               struct key_vector *n;
+
+               if (!(cindex--)) {
+                       t_key pkey = pn->key;
+
+                       if (IS_TRIE(pn))
+                               break;
+
+                       n = pn;
+                       pn = node_parent(pn);
 
-       hlist_for_each_entry_safe(li, tmp, lih, hlist) {
-               found += trie_flush_list(&li->falh);
+                       /* drop emptied tnode */
+                       put_child_root(pn, n->key, NULL);
+                       node_free(n);
+
+                       cindex = get_index(pkey, pn);
 
-               if (list_empty(&li->falh)) {
-                       hlist_del_rcu(&li->hlist);
-                       free_leaf_info(li);
                        continue;
                }
 
-               plen = li->plen;
-       }
+               /* grab the next available node */
+               n = get_child(pn, cindex);
+               if (!n)
+                       continue;
 
-       l->slen = KEYLENGTH - plen;
+               if (IS_TNODE(n)) {
+                       /* record pn and cindex for leaf walking */
+                       pn = n;
+                       cindex = 1ul << n->bits;
 
-       return found;
+                       continue;
+               }
+
+               hlist_for_each_entry_safe(fa, tmp, &n->leaf, fa_list) {
+                       hlist_del_rcu(&fa->fa_list);
+                       alias_free_mem_rcu(fa);
+               }
+
+               put_child_root(pn, n->key, NULL);
+               node_free(n);
+       }
+
+#ifdef CONFIG_IP_FIB_TRIE_STATS
+       free_percpu(t->stats);
+#endif
+       kfree(tb);
 }
 
-/*
- * Scan for the next right leaf starting at node p->child[idx]
- * Since we have back pointer, no recursion necessary.
- */
-static struct tnode *leaf_walk_rcu(struct tnode *p, struct tnode *c)
+struct fib_table *fib_trie_unmerge(struct fib_table *oldtb)
 {
-       do {
-               unsigned long idx = c ? idx = get_index(c->key, p) + 1 : 0;
+       struct trie *ot = (struct trie *)oldtb->tb_data;
+       struct key_vector *l, *tp = ot->kv;
+       struct fib_table *local_tb;
+       struct fib_alias *fa;
+       struct trie *lt;
+       t_key key = 0;
 
-               while (idx < tnode_child_length(p)) {
-                       c = tnode_get_child_rcu(p, idx++);
-                       if (!c)
+       if (oldtb->tb_data == oldtb->__data)
+               return oldtb;
+
+       local_tb = fib_trie_table(RT_TABLE_LOCAL, NULL);
+       if (!local_tb)
+               return NULL;
+
+       lt = (struct trie *)local_tb->tb_data;
+
+       while ((l = leaf_walk_rcu(&tp, key)) != NULL) {
+               struct key_vector *local_l = NULL, *local_tp;
+
+               hlist_for_each_entry_rcu(fa, &l->leaf, fa_list) {
+                       struct fib_alias *new_fa;
+
+                       if (local_tb->tb_id != fa->tb_id)
                                continue;
 
-                       if (IS_LEAF(c))
-                               return c;
+                       /* clone fa for new local table */
+                       new_fa = kmem_cache_alloc(fn_alias_kmem, GFP_KERNEL);
+                       if (!new_fa)
+                               goto out;
+
+                       memcpy(new_fa, fa, sizeof(*fa));
 
-                       /* Rescan start scanning in new node */
-                       p = c;
-                       idx = 0;
+                       /* insert clone into table */
+                       if (!local_l)
+                               local_l = fib_find_node(lt, &local_tp, l->key);
+
+                       if (fib_insert_alias(lt, local_tp, local_l, new_fa,
+                                            NULL, l->key))
+                               goto out;
                }
 
-               /* Node empty, walk back up to parent */
-               c = p;
-       } while ((p = node_parent_rcu(c)) != NULL);
+               /* stop loop if key wrapped back to 0 */
+               key = l->key + 1;
+               if (key < l->key)
+                       break;
+       }
 
-       return NULL; /* Root of trie */
+       return local_tb;
+out:
+       fib_trie_free(local_tb);
+
+       return NULL;
 }
 
-static struct tnode *trie_firstleaf(struct trie *t)
+/* Caller must hold RTNL */
+void fib_table_flush_external(struct fib_table *tb)
 {
-       struct tnode *n = rcu_dereference_rtnl(t->trie);
+       struct trie *t = (struct trie *)tb->tb_data;
+       struct key_vector *pn = t->kv;
+       unsigned long cindex = 1;
+       struct hlist_node *tmp;
+       struct fib_alias *fa;
 
-       if (!n)
-               return NULL;
+       /* walk trie in reverse order */
+       for (;;) {
+               unsigned char slen = 0;
+               struct key_vector *n;
 
-       if (IS_LEAF(n))          /* trie is just a leaf */
-               return n;
+               if (!(cindex--)) {
+                       t_key pkey = pn->key;
 
-       return leaf_walk_rcu(n, NULL);
-}
+                       /* cannot resize the trie vector */
+                       if (IS_TRIE(pn))
+                               break;
 
-static struct tnode *trie_nextleaf(struct tnode *l)
-{
-       struct tnode *p = node_parent_rcu(l);
+                       /* resize completed node */
+                       pn = resize(t, pn);
+                       cindex = get_index(pkey, pn);
 
-       if (!p)
-               return NULL;    /* trie with just one leaf */
+                       continue;
+               }
 
-       return leaf_walk_rcu(p, l);
-}
+               /* grab the next available node */
+               n = get_child(pn, cindex);
+               if (!n)
+                       continue;
 
-static struct tnode *trie_leafindex(struct trie *t, int index)
-{
-       struct tnode *l = trie_firstleaf(t);
+               if (IS_TNODE(n)) {
+                       /* record pn and cindex for leaf walking */
+                       pn = n;
+                       cindex = 1ul << n->bits;
 
-       while (l && index-- > 0)
-               l = trie_nextleaf(l);
+                       continue;
+               }
 
-       return l;
-}
+               hlist_for_each_entry_safe(fa, tmp, &n->leaf, fa_list) {
+                       struct fib_info *fi = fa->fa_info;
+
+                       /* if alias was cloned to local then we just
+                        * need to remove the local copy from main
+                        */
+                       if (tb->tb_id != fa->tb_id) {
+                               hlist_del_rcu(&fa->fa_list);
+                               alias_free_mem_rcu(fa);
+                               continue;
+                       }
 
+                       /* record local slen */
+                       slen = fa->fa_slen;
 
-/*
- * Caller must hold RTNL.
- */
+                       if (!fi || !(fi->fib_flags & RTNH_F_EXTERNAL))
+                               continue;
+
+                       netdev_switch_fib_ipv4_del(n->key,
+                                                  KEYLENGTH - fa->fa_slen,
+                                                  fi, fa->fa_tos,
+                                                  fa->fa_type, tb->tb_id);
+               }
+
+               /* update leaf slen */
+               n->slen = slen;
+
+               if (hlist_empty(&n->leaf)) {
+                       put_child_root(pn, n->key, NULL);
+                       node_free(n);
+               } else {
+                       leaf_pull_suffix(pn, n);
+               }
+       }
+}
+
+/* Caller must hold RTNL. */
 int fib_table_flush(struct fib_table *tb)
 {
-       struct trie *t = (struct trie *) tb->tb_data;
-       struct tnode *l, *ll = NULL;
+       struct trie *t = (struct trie *)tb->tb_data;
+       struct key_vector *pn = t->kv;
+       unsigned long cindex = 1;
+       struct hlist_node *tmp;
+       struct fib_alias *fa;
        int found = 0;
 
-       for (l = trie_firstleaf(t); l; l = trie_nextleaf(l)) {
-               found += trie_flush_leaf(l);
+       /* walk trie in reverse order */
+       for (;;) {
+               unsigned char slen = 0;
+               struct key_vector *n;
+
+               if (!(cindex--)) {
+                       t_key pkey = pn->key;
 
-               if (ll) {
-                       if (hlist_empty(&ll->list))
-                               trie_leaf_remove(t, ll);
-                       else
-                               leaf_pull_suffix(ll);
+                       /* cannot resize the trie vector */
+                       if (IS_TRIE(pn))
+                               break;
+
+                       /* resize completed node */
+                       pn = resize(t, pn);
+                       cindex = get_index(pkey, pn);
+
+                       continue;
                }
 
-               ll = l;
-       }
+               /* grab the next available node */
+               n = get_child(pn, cindex);
+               if (!n)
+                       continue;
 
-       if (ll) {
-               if (hlist_empty(&ll->list))
-                       trie_leaf_remove(t, ll);
-               else
-                       leaf_pull_suffix(ll);
+               if (IS_TNODE(n)) {
+                       /* record pn and cindex for leaf walking */
+                       pn = n;
+                       cindex = 1ul << n->bits;
+
+                       continue;
+               }
+
+               hlist_for_each_entry_safe(fa, tmp, &n->leaf, fa_list) {
+                       struct fib_info *fi = fa->fa_info;
+
+                       if (!fi || !(fi->fib_flags & RTNH_F_DEAD)) {
+                               slen = fa->fa_slen;
+                               continue;
+                       }
+
+                       netdev_switch_fib_ipv4_del(n->key,
+                                                  KEYLENGTH - fa->fa_slen,
+                                                  fi, fa->fa_tos,
+                                                  fa->fa_type, tb->tb_id);
+                       hlist_del_rcu(&fa->fa_list);
+                       fib_release_info(fa->fa_info);
+                       alias_free_mem_rcu(fa);
+                       found++;
+               }
+
+               /* update leaf slen */
+               n->slen = slen;
+
+               if (hlist_empty(&n->leaf)) {
+                       put_child_root(pn, n->key, NULL);
+                       node_free(n);
+               } else {
+                       leaf_pull_suffix(pn, n);
+               }
        }
 
        pr_debug("trie_flush found=%d\n", found);
        return found;
 }
 
-void fib_free_table(struct fib_table *tb)
+static void __trie_free_rcu(struct rcu_head *head)
 {
+       struct fib_table *tb = container_of(head, struct fib_table, rcu);
 #ifdef CONFIG_IP_FIB_TRIE_STATS
        struct trie *t = (struct trie *)tb->tb_data;
 
-       free_percpu(t->stats);
+       if (tb->tb_data == tb->__data)
+               free_percpu(t->stats);
 #endif /* CONFIG_IP_FIB_TRIE_STATS */
        kfree(tb);
 }
 
-static int fn_trie_dump_fa(t_key key, int plen, struct list_head *fah,
-                          struct fib_table *tb,
-                          struct sk_buff *skb, struct netlink_callback *cb)
+void fib_free_table(struct fib_table *tb)
 {
-       int i, s_i;
+       call_rcu(&tb->rcu, __trie_free_rcu);
+}
+
+static int fn_trie_dump_leaf(struct key_vector *l, struct fib_table *tb,
+                            struct sk_buff *skb, struct netlink_callback *cb)
+{
+       __be32 xkey = htonl(l->key);
        struct fib_alias *fa;
-       __be32 xkey = htonl(key);
+       int i, s_i;
 
-       s_i = cb->args[5];
+       s_i = cb->args[4];
        i = 0;
 
        /* rcu_read_lock is hold by caller */
-
-       list_for_each_entry_rcu(fa, fah, fa_list) {
+       hlist_for_each_entry_rcu(fa, &l->leaf, fa_list) {
                if (i < s_i) {
                        i++;
                        continue;
                }
 
+               if (tb->tb_id != fa->tb_id) {
+                       i++;
+                       continue;
+               }
+
                if (fib_dump_info(skb, NETLINK_CB(cb->skb).portid,
                                  cb->nlh->nlmsg_seq,
                                  RTM_NEWROUTE,
                                  tb->tb_id,
                                  fa->fa_type,
                                  xkey,
-                                 plen,
+                                 KEYLENGTH - fa->fa_slen,
                                  fa->fa_tos,
                                  fa->fa_info, NLM_F_MULTI) < 0) {
-                       cb->args[5] = i;
-                       return -1;
-               }
-               i++;
-       }
-       cb->args[5] = i;
-       return skb->len;
-}
-
-static int fn_trie_dump_leaf(struct tnode *l, struct fib_table *tb,
-                       struct sk_buff *skb, struct netlink_callback *cb)
-{
-       struct leaf_info *li;
-       int i, s_i;
-
-       s_i = cb->args[4];
-       i = 0;
-
-       /* rcu_read_lock is hold by caller */
-       hlist_for_each_entry_rcu(li, &l->list, hlist) {
-               if (i < s_i) {
-                       i++;
-                       continue;
-               }
-
-               if (i > s_i)
-                       cb->args[5] = 0;
-
-               if (list_empty(&li->falh))
-                       continue;
-
-               if (fn_trie_dump_fa(l->key, li->plen, &li->falh, tb, skb, cb) < 0) {
                        cb->args[4] = i;
                        return -1;
                }
@@ -1801,44 +1918,38 @@ static int fn_trie_dump_leaf(struct tnode *l, struct fib_table *tb,
        return skb->len;
 }
 
+/* rcu_read_lock needs to be hold by caller from readside */
 int fib_table_dump(struct fib_table *tb, struct sk_buff *skb,
                   struct netlink_callback *cb)
 {
-       struct tnode *l;
-       struct trie *t = (struct trie *) tb->tb_data;
-       t_key key = cb->args[2];
-       int count = cb->args[3];
-
-       rcu_read_lock();
+       struct trie *t = (struct trie *)tb->tb_data;
+       struct key_vector *l, *tp = t->kv;
        /* Dump starting at last key.
         * Note: 0.0.0.0/0 (ie default) is first key.
         */
-       if (count == 0)
-               l = trie_firstleaf(t);
-       else {
-               /* Normally, continue from last key, but if that is missing
-                * fallback to using slow rescan
-                */
-               l = fib_find_node(t, key);
-               if (!l)
-                       l = trie_leafindex(t, count);
-       }
+       int count = cb->args[2];
+       t_key key = cb->args[3];
 
-       while (l) {
-               cb->args[2] = l->key;
+       while ((l = leaf_walk_rcu(&tp, key)) != NULL) {
                if (fn_trie_dump_leaf(l, tb, skb, cb) < 0) {
-                       cb->args[3] = count;
-                       rcu_read_unlock();
+                       cb->args[3] = key;
+                       cb->args[2] = count;
                        return -1;
                }
 
                ++count;
-               l = trie_nextleaf(l);
+               key = l->key + 1;
+
                memset(&cb->args[4], 0,
                       sizeof(cb->args) - 4*sizeof(cb->args[0]));
+
+               /* stop loop if key wrapped back to 0 */
+               if (key < l->key)
+                       break;
        }
-       cb->args[3] = count;
-       rcu_read_unlock();
+
+       cb->args[3] = key;
+       cb->args[2] = count;
 
        return skb->len;
 }
@@ -1850,28 +1961,34 @@ void __init fib_trie_init(void)
                                          0, SLAB_PANIC, NULL);
 
        trie_leaf_kmem = kmem_cache_create("ip_fib_trie",
-                                          max(sizeof(struct tnode),
-                                              sizeof(struct leaf_info)),
+                                          LEAF_SIZE,
                                           0, SLAB_PANIC, NULL);
 }
 
-
-struct fib_table *fib_trie_table(u32 id)
+struct fib_table *fib_trie_table(u32 id, struct fib_table *alias)
 {
        struct fib_table *tb;
        struct trie *t;
+       size_t sz = sizeof(*tb);
+
+       if (!alias)
+               sz += sizeof(struct trie);
 
-       tb = kmalloc(sizeof(struct fib_table) + sizeof(struct trie),
-                    GFP_KERNEL);
-       if (tb == NULL)
+       tb = kzalloc(sz, GFP_KERNEL);
+       if (!tb)
                return NULL;
 
        tb->tb_id = id;
        tb->tb_default = -1;
        tb->tb_num_default = 0;
+       tb->tb_data = (alias ? alias->__data : tb->__data);
+
+       if (alias)
+               return tb;
 
        t = (struct trie *) tb->tb_data;
-       RCU_INIT_POINTER(t->trie, NULL);
+       t->kv[0].pos = KEYLENGTH;
+       t->kv[0].slen = KEYLENGTH;
 #ifdef CONFIG_IP_FIB_TRIE_STATS
        t->stats = alloc_percpu(struct trie_use_stats);
        if (!t->stats) {
@@ -1888,65 +2005,63 @@ struct fib_table *fib_trie_table(u32 id)
 struct fib_trie_iter {
        struct seq_net_private p;
        struct fib_table *tb;
-       struct tnode *tnode;
+       struct key_vector *tnode;
        unsigned int index;
        unsigned int depth;
 };
 
-static struct tnode *fib_trie_get_next(struct fib_trie_iter *iter)
+static struct key_vector *fib_trie_get_next(struct fib_trie_iter *iter)
 {
        unsigned long cindex = iter->index;
-       struct tnode *tn = iter->tnode;
-       struct tnode *p;
-
-       /* A single entry routing table */
-       if (!tn)
-               return NULL;
+       struct key_vector *pn = iter->tnode;
+       t_key pkey;
 
        pr_debug("get_next iter={node=%p index=%d depth=%d}\n",
                 iter->tnode, iter->index, iter->depth);
-rescan:
-       while (cindex < tnode_child_length(tn)) {
-               struct tnode *n = tnode_get_child_rcu(tn, cindex);
 
-               if (n) {
+       while (!IS_TRIE(pn)) {
+               while (cindex < child_length(pn)) {
+                       struct key_vector *n = get_child_rcu(pn, cindex++);
+
+                       if (!n)
+                               continue;
+
                        if (IS_LEAF(n)) {
-                               iter->tnode = tn;
-                               iter->index = cindex + 1;
+                               iter->tnode = pn;
+                               iter->index = cindex;
                        } else {
                                /* push down one level */
                                iter->tnode = n;
                                iter->index = 0;
                                ++iter->depth;
                        }
+
                        return n;
                }
 
-               ++cindex;
-       }
-
-       /* Current node exhausted, pop back up */
-       p = node_parent_rcu(tn);
-       if (p) {
-               cindex = get_index(tn->key, p) + 1;
-               tn = p;
+               /* Current node exhausted, pop back up */
+               pkey = pn->key;
+               pn = node_parent_rcu(pn);
+               cindex = get_index(pkey, pn) + 1;
                --iter->depth;
-               goto rescan;
        }
 
-       /* got root? */
+       /* record root node so further searches know we are done */
+       iter->tnode = pn;
+       iter->index = 0;
+
        return NULL;
 }
 
-static struct tnode *fib_trie_get_first(struct fib_trie_iter *iter,
-                                      struct trie *t)
+static struct key_vector *fib_trie_get_first(struct fib_trie_iter *iter,
+                                            struct trie *t)
 {
-       struct tnode *n;
+       struct key_vector *n, *pn = t->kv;
 
        if (!t)
                return NULL;
 
-       n = rcu_dereference(t->trie);
+       n = rcu_dereference(pn->tnode[0]);
        if (!n)
                return NULL;
 
@@ -1955,7 +2070,7 @@ static struct tnode *fib_trie_get_first(struct fib_trie_iter *iter,
                iter->index = 0;
                iter->depth = 1;
        } else {
-               iter->tnode = NULL;
+               iter->tnode = pn;
                iter->index = 0;
                iter->depth = 0;
        }
@@ -1965,7 +2080,7 @@ static struct tnode *fib_trie_get_first(struct fib_trie_iter *iter,
 
 static void trie_collect_stats(struct trie *t, struct trie_stat *s)
 {
-       struct tnode *n;
+       struct key_vector *n;
        struct fib_trie_iter iter;
 
        memset(s, 0, sizeof(*s));
@@ -1973,20 +2088,20 @@ static void trie_collect_stats(struct trie *t, struct trie_stat *s)
        rcu_read_lock();
        for (n = fib_trie_get_first(&iter, t); n; n = fib_trie_get_next(&iter)) {
                if (IS_LEAF(n)) {
-                       struct leaf_info *li;
+                       struct fib_alias *fa;
 
                        s->leaves++;
                        s->totdepth += iter.depth;
                        if (iter.depth > s->maxdepth)
                                s->maxdepth = iter.depth;
 
-                       hlist_for_each_entry_rcu(li, &n->list, hlist)
+                       hlist_for_each_entry_rcu(fa, &n->leaf, fa_list)
                                ++s->prefixes;
                } else {
                        s->tnodes++;
                        if (n->bits < MAX_STAT_DEPTH)
                                s->nodesizes[n->bits]++;
-                       s->nullpointers += n->empty_children;
+                       s->nullpointers += tn_info(n)->empty_children;
                }
        }
        rcu_read_unlock();
@@ -2009,13 +2124,13 @@ static void trie_show_stats(struct seq_file *seq, struct trie_stat *stat)
        seq_printf(seq, "\tMax depth:      %u\n", stat->maxdepth);
 
        seq_printf(seq, "\tLeaves:         %u\n", stat->leaves);
-       bytes = sizeof(struct tnode) * stat->leaves;
+       bytes = LEAF_SIZE * stat->leaves;
 
        seq_printf(seq, "\tPrefixes:       %u\n", stat->prefixes);
-       bytes += sizeof(struct leaf_info) * stat->prefixes;
+       bytes += sizeof(struct fib_alias) * stat->prefixes;
 
        seq_printf(seq, "\tInternal nodes: %u\n\t", stat->tnodes);
-       bytes += sizeof(struct tnode) * stat->tnodes;
+       bytes += TNODE_SIZE(0) * stat->tnodes;
 
        max = MAX_STAT_DEPTH;
        while (max > 0 && stat->nodesizes[max-1] == 0)
@@ -2030,7 +2145,7 @@ static void trie_show_stats(struct seq_file *seq, struct trie_stat *stat)
        seq_putc(seq, '\n');
        seq_printf(seq, "\tPointers: %u\n", pointers);
 
-       bytes += sizeof(struct tnode *) * pointers;
+       bytes += sizeof(struct key_vector *) * pointers;
        seq_printf(seq, "Null ptrs: %u\n", stat->nullpointers);
        seq_printf(seq, "Total size: %u  kB\n", (bytes + 1023) / 1024);
 }
@@ -2084,7 +2199,7 @@ static int fib_triestat_seq_show(struct seq_file *seq, void *v)
        seq_printf(seq,
                   "Basic info: size of leaf:"
                   " %Zd bytes, size of tnode: %Zd bytes.\n",
-                  sizeof(struct tnode), sizeof(struct tnode));
+                  LEAF_SIZE, TNODE_SIZE(0));
 
        for (h = 0; h < FIB_TABLE_HASHSZ; h++) {
                struct hlist_head *head = &net->ipv4.fib_table_hash[h];
@@ -2123,7 +2238,7 @@ static const struct file_operations fib_triestat_fops = {
        .release = single_release_net,
 };
 
-static struct tnode *fib_trie_get_idx(struct seq_file *seq, loff_t pos)
+static struct key_vector *fib_trie_get_idx(struct seq_file *seq, loff_t pos)
 {
        struct fib_trie_iter *iter = seq->private;
        struct net *net = seq_file_net(seq);
@@ -2135,7 +2250,7 @@ static struct tnode *fib_trie_get_idx(struct seq_file *seq, loff_t pos)
                struct fib_table *tb;
 
                hlist_for_each_entry_rcu(tb, head, tb_hlist) {
-                       struct tnode *n;
+                       struct key_vector *n;
 
                        for (n = fib_trie_get_first(iter,
                                                    (struct trie *) tb->tb_data);
@@ -2164,7 +2279,7 @@ static void *fib_trie_seq_next(struct seq_file *seq, void *v, loff_t *pos)
        struct fib_table *tb = iter->tb;
        struct hlist_node *tb_node;
        unsigned int h;
-       struct tnode *n;
+       struct key_vector *n;
 
        ++*pos;
        /* next node in same table */
@@ -2250,9 +2365,9 @@ static inline const char *rtn_type(char *buf, size_t len, unsigned int t)
 static int fib_trie_seq_show(struct seq_file *seq, void *v)
 {
        const struct fib_trie_iter *iter = seq->private;
-       struct tnode *n = v;
+       struct key_vector *n = v;
 
-       if (!node_parent_rcu(n))
+       if (IS_TRIE(node_parent_rcu(n)))
                fib_table_print(seq, iter->tb);
 
        if (IS_TNODE(n)) {
@@ -2261,30 +2376,28 @@ static int fib_trie_seq_show(struct seq_file *seq, void *v)
                seq_indent(seq, iter->depth-1);
                seq_printf(seq, "  +-- %pI4/%zu %u %u %u\n",
                           &prf, KEYLENGTH - n->pos - n->bits, n->bits,
-                          n->full_children, n->empty_children);
+                          tn_info(n)->full_children,
+                          tn_info(n)->empty_children);
        } else {
-               struct leaf_info *li;
                __be32 val = htonl(n->key);
+               struct fib_alias *fa;
 
                seq_indent(seq, iter->depth);
                seq_printf(seq, "  |-- %pI4\n", &val);
 
-               hlist_for_each_entry_rcu(li, &n->list, hlist) {
-                       struct fib_alias *fa;
-
-                       list_for_each_entry_rcu(fa, &li->falh, fa_list) {
-                               char buf1[32], buf2[32];
-
-                               seq_indent(seq, iter->depth+1);
-                               seq_printf(seq, "  /%d %s %s", li->plen,
-                                          rtn_scope(buf1, sizeof(buf1),
-                                                    fa->fa_info->fib_scope),
-                                          rtn_type(buf2, sizeof(buf2),
-                                                   fa->fa_type));
-                               if (fa->fa_tos)
-                                       seq_printf(seq, " tos=%d", fa->fa_tos);
-                               seq_putc(seq, '\n');
-                       }
+               hlist_for_each_entry_rcu(fa, &n->leaf, fa_list) {
+                       char buf1[32], buf2[32];
+
+                       seq_indent(seq, iter->depth + 1);
+                       seq_printf(seq, "  /%zu %s %s",
+                                  KEYLENGTH - fa->fa_slen,
+                                  rtn_scope(buf1, sizeof(buf1),
+                                            fa->fa_info->fib_scope),
+                                  rtn_type(buf2, sizeof(buf2),
+                                           fa->fa_type));
+                       if (fa->fa_tos)
+                               seq_printf(seq, " tos=%d", fa->fa_tos);
+                       seq_putc(seq, '\n');
                }
        }
 
@@ -2314,31 +2427,47 @@ static const struct file_operations fib_trie_fops = {
 
 struct fib_route_iter {
        struct seq_net_private p;
-       struct trie *main_trie;
+       struct fib_table *main_tb;
+       struct key_vector *tnode;
        loff_t  pos;
        t_key   key;
 };
 
-static struct tnode *fib_route_get_idx(struct fib_route_iter *iter, loff_t pos)
+static struct key_vector *fib_route_get_idx(struct fib_route_iter *iter,
+                                           loff_t pos)
 {
-       struct tnode *l = NULL;
-       struct trie *t = iter->main_trie;
+       struct fib_table *tb = iter->main_tb;
+       struct key_vector *l, **tp = &iter->tnode;
+       struct trie *t;
+       t_key key;
 
-       /* use cache location of last found key */
-       if (iter->pos > 0 && pos >= iter->pos && (l = fib_find_node(t, iter->key)))
+       /* use cache location of next-to-find key */
+       if (iter->pos > 0 && pos >= iter->pos) {
                pos -= iter->pos;
-       else {
+               key = iter->key;
+       } else {
+               t = (struct trie *)tb->tb_data;
+               iter->tnode = t->kv;
                iter->pos = 0;
-               l = trie_firstleaf(t);
+               key = 0;
        }
 
-       while (l && pos-- > 0) {
+       while ((l = leaf_walk_rcu(tp, key)) != NULL) {
+               key = l->key + 1;
                iter->pos++;
-               l = trie_nextleaf(l);
+
+               if (pos-- <= 0)
+                       break;
+
+               l = NULL;
+
+               /* handle unlikely case of a key wrap */
+               if (!key)
+                       break;
        }
 
        if (l)
-               iter->key = pos;        /* remember it */
+               iter->key = key;        /* remember it */
        else
                iter->pos = 0;          /* forget it */
 
@@ -2350,37 +2479,46 @@ static void *fib_route_seq_start(struct seq_file *seq, loff_t *pos)
 {
        struct fib_route_iter *iter = seq->private;
        struct fib_table *tb;
+       struct trie *t;
 
        rcu_read_lock();
+
        tb = fib_get_table(seq_file_net(seq), RT_TABLE_MAIN);
        if (!tb)
                return NULL;
 
-       iter->main_trie = (struct trie *) tb->tb_data;
-       if (*pos == 0)
-               return SEQ_START_TOKEN;
-       else
-               return fib_route_get_idx(iter, *pos - 1);
+       iter->main_tb = tb;
+
+       if (*pos != 0)
+               return fib_route_get_idx(iter, *pos);
+
+       t = (struct trie *)tb->tb_data;
+       iter->tnode = t->kv;
+       iter->pos = 0;
+       iter->key = 0;
+
+       return SEQ_START_TOKEN;
 }
 
 static void *fib_route_seq_next(struct seq_file *seq, void *v, loff_t *pos)
 {
        struct fib_route_iter *iter = seq->private;
-       struct tnode *l = v;
+       struct key_vector *l = NULL;
+       t_key key = iter->key;
 
        ++*pos;
-       if (v == SEQ_START_TOKEN) {
-               iter->pos = 0;
-               l = trie_firstleaf(iter->main_trie);
-       } else {
+
+       /* only allow key of 0 for start of sequence */
+       if ((v == SEQ_START_TOKEN) || key)
+               l = leaf_walk_rcu(&iter->tnode, key);
+
+       if (l) {
+               iter->key = l->key + 1;
                iter->pos++;
-               l = trie_nextleaf(l);
+       } else {
+               iter->pos = 0;
        }
 
-       if (l)
-               iter->key = l->key;
-       else
-               iter->pos = 0;
        return l;
 }
 
@@ -2412,8 +2550,11 @@ static unsigned int fib_flag_trans(int type, __be32 mask, const struct fib_info
  */
 static int fib_route_seq_show(struct seq_file *seq, void *v)
 {
-       struct tnode *l = v;
-       struct leaf_info *li;
+       struct fib_route_iter *iter = seq->private;
+       struct fib_table *tb = iter->main_tb;
+       struct fib_alias *fa;
+       struct key_vector *l = v;
+       __be32 prefix;
 
        if (v == SEQ_START_TOKEN) {
                seq_printf(seq, "%-127s\n", "Iface\tDestination\tGateway "
@@ -2422,45 +2563,43 @@ static int fib_route_seq_show(struct seq_file *seq, void *v)
                return 0;
        }
 
-       hlist_for_each_entry_rcu(li, &l->list, hlist) {
-               struct fib_alias *fa;
-               __be32 mask, prefix;
+       prefix = htonl(l->key);
 
-               mask = inet_make_mask(li->plen);
-               prefix = htonl(l->key);
+       hlist_for_each_entry_rcu(fa, &l->leaf, fa_list) {
+               const struct fib_info *fi = fa->fa_info;
+               __be32 mask = inet_make_mask(KEYLENGTH - fa->fa_slen);
+               unsigned int flags = fib_flag_trans(fa->fa_type, mask, fi);
 
-               list_for_each_entry_rcu(fa, &li->falh, fa_list) {
-                       const struct fib_info *fi = fa->fa_info;
-                       unsigned int flags = fib_flag_trans(fa->fa_type, mask, fi);
+               if ((fa->fa_type == RTN_BROADCAST) ||
+                   (fa->fa_type == RTN_MULTICAST))
+                       continue;
 
-                       if (fa->fa_type == RTN_BROADCAST
-                           || fa->fa_type == RTN_MULTICAST)
-                               continue;
+               if (fa->tb_id != tb->tb_id)
+                       continue;
 
-                       seq_setwidth(seq, 127);
-
-                       if (fi)
-                               seq_printf(seq,
-                                        "%s\t%08X\t%08X\t%04X\t%d\t%u\t"
-                                        "%d\t%08X\t%d\t%u\t%u",
-                                        fi->fib_dev ? fi->fib_dev->name : "*",
-                                        prefix,
-                                        fi->fib_nh->nh_gw, flags, 0, 0,
-                                        fi->fib_priority,
-                                        mask,
-                                        (fi->fib_advmss ?
-                                         fi->fib_advmss + 40 : 0),
-                                        fi->fib_window,
-                                        fi->fib_rtt >> 3);
-                       else
-                               seq_printf(seq,
-                                        "*\t%08X\t%08X\t%04X\t%d\t%u\t"
-                                        "%d\t%08X\t%d\t%u\t%u",
-                                        prefix, 0, flags, 0, 0, 0,
-                                        mask, 0, 0, 0);
-
-                       seq_pad(seq, '\n');
-               }
+               seq_setwidth(seq, 127);
+
+               if (fi)
+                       seq_printf(seq,
+                                  "%s\t%08X\t%08X\t%04X\t%d\t%u\t"
+                                  "%d\t%08X\t%d\t%u\t%u",
+                                  fi->fib_dev ? fi->fib_dev->name : "*",
+                                  prefix,
+                                  fi->fib_nh->nh_gw, flags, 0, 0,
+                                  fi->fib_priority,
+                                  mask,
+                                  (fi->fib_advmss ?
+                                   fi->fib_advmss + 40 : 0),
+                                  fi->fib_window,
+                                  fi->fib_rtt >> 3);
+               else
+                       seq_printf(seq,
+                                  "*\t%08X\t%08X\t%04X\t%d\t%u\t"
+                                  "%d\t%08X\t%d\t%u\t%u",
+                                  prefix, 0, flags, 0, 0, 0,
+                                  mask, 0, 0, 0);
+
+               seq_pad(seq, '\n');
        }
 
        return 0;
index 5a4828ba05ad7997998e400d10bc8b1dfbe99db0..e64f8e9785d184cd033d23682691b397adc65f1e 100644 (file)
@@ -196,7 +196,7 @@ static struct sk_buff **geneve_gro_receive(struct sk_buff **head,
 
        rcu_read_lock();
        ptype = gro_find_receive_by_type(type);
-       if (ptype == NULL) {
+       if (!ptype) {
                flush = 1;
                goto out_unlock;
        }
@@ -230,7 +230,7 @@ static int geneve_gro_complete(struct sk_buff *skb, int nhoff,
 
        rcu_read_lock();
        ptype = gro_find_complete_by_type(type);
-       if (ptype != NULL)
+       if (ptype)
                err = ptype->callbacks.gro_complete(skb, nhoff + gh_len);
 
        rcu_read_unlock();
index 51973ddc05a68463f8f39e779491374f12e5191a..5aa46d4b44efb99702ccd89005528f20ae422a0e 100644 (file)
@@ -149,7 +149,7 @@ static struct sk_buff **gre_gro_receive(struct sk_buff **head,
 
        rcu_read_lock();
        ptype = gro_find_receive_by_type(type);
-       if (ptype == NULL)
+       if (!ptype)
                goto out_unlock;
 
        grehlen = GRE_HEADER_SECTION;
@@ -243,7 +243,7 @@ static int gre_gro_complete(struct sk_buff *skb, int nhoff)
 
        rcu_read_lock();
        ptype = gro_find_complete_by_type(type);
-       if (ptype != NULL)
+       if (ptype)
                err = ptype->callbacks.gro_complete(skb, nhoff + grehlen);
 
        rcu_read_unlock();
index 5e564014a0b75d04a8f64d48c6d3a14fe6df18a1..f5203fba623638d94b03435db86ac4ed696adba8 100644 (file)
@@ -399,7 +399,7 @@ static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb)
                return;
 
        sk = icmp_xmit_lock(net);
-       if (sk == NULL)
+       if (!sk)
                return;
        inet = inet_sk(sk);
 
@@ -609,7 +609,7 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
                                                 skb_in->data,
                                                 sizeof(_inner_type),
                                                 &_inner_type);
-                       if (itp == NULL)
+                       if (!itp)
                                goto out;
 
                        /*
@@ -627,7 +627,7 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
                return;
 
        sk = icmp_xmit_lock(net);
-       if (sk == NULL)
+       if (!sk)
                goto out_free;
 
        /*
index 666cf364df86dffbab3cd2b7b1ea66e72d3ac7a8..a3a697f5ffbaba1b30db8341ea9b51b229ac29df 100644 (file)
@@ -97,6 +97,7 @@
 #include <net/route.h>
 #include <net/sock.h>
 #include <net/checksum.h>
+#include <net/inet_common.h>
 #include <linux/netfilter_ipv4.h>
 #ifdef CONFIG_IP_MROUTE
 #include <linux/mroute.h>
@@ -369,7 +370,7 @@ static struct sk_buff *igmpv3_newpack(struct net_device *dev, unsigned int mtu)
        pip->saddr    = fl4.saddr;
        pip->protocol = IPPROTO_IGMP;
        pip->tot_len  = 0;      /* filled in later */
-       ip_select_ident(skb, NULL);
+       ip_select_ident(net, skb, NULL);
        ((u8 *)&pip[1])[0] = IPOPT_RA;
        ((u8 *)&pip[1])[1] = 4;
        ((u8 *)&pip[1])[2] = 0;
@@ -691,7 +692,7 @@ static int igmp_send_report(struct in_device *in_dev, struct ip_mc_list *pmc,
        hlen = LL_RESERVED_SPACE(dev);
        tlen = dev->needed_tailroom;
        skb = alloc_skb(IGMP_SIZE + hlen + tlen, GFP_ATOMIC);
-       if (skb == NULL) {
+       if (!skb) {
                ip_rt_put(rt);
                return -1;
        }
@@ -713,7 +714,7 @@ static int igmp_send_report(struct in_device *in_dev, struct ip_mc_list *pmc,
        iph->daddr    = dst;
        iph->saddr    = fl4.saddr;
        iph->protocol = IPPROTO_IGMP;
-       ip_select_ident(skb, NULL);
+       ip_select_ident(net, skb, NULL);
        ((u8 *)&iph[1])[0] = IPOPT_RA;
        ((u8 *)&iph[1])[1] = 4;
        ((u8 *)&iph[1])[2] = 0;
@@ -980,7 +981,7 @@ int igmp_rcv(struct sk_buff *skb)
        int len = skb->len;
        bool dropped = true;
 
-       if (in_dev == NULL)
+       if (!in_dev)
                goto drop;
 
        if (!pskb_may_pull(skb, sizeof(struct igmphdr)))
@@ -1849,30 +1850,28 @@ static void ip_mc_clear_src(struct ip_mc_list *pmc)
        pmc->sfcount[MCAST_EXCLUDE] = 1;
 }
 
-
-/*
- * Join a multicast group
+/* Join a multicast group
  */
-int ip_mc_join_group(struct sock *sk , struct ip_mreqn *imr)
+
+int ip_mc_join_group(struct sock *sk, struct ip_mreqn *imr)
 {
-       int err;
        __be32 addr = imr->imr_multiaddr.s_addr;
-       struct ip_mc_socklist *iml = NULL, *i;
+       struct ip_mc_socklist *iml, *i;
        struct in_device *in_dev;
        struct inet_sock *inet = inet_sk(sk);
        struct net *net = sock_net(sk);
        int ifindex;
        int count = 0;
+       int err;
+
+       ASSERT_RTNL();
 
        if (!ipv4_is_multicast(addr))
                return -EINVAL;
 
-       rtnl_lock();
-
        in_dev = ip_mc_find_dev(net, imr);
 
        if (!in_dev) {
-               iml = NULL;
                err = -ENODEV;
                goto done;
        }
@@ -1889,7 +1888,7 @@ int ip_mc_join_group(struct sock *sk , struct ip_mreqn *imr)
        if (count >= sysctl_igmp_max_memberships)
                goto done;
        iml = sock_kmalloc(sk, sizeof(*iml), GFP_KERNEL);
-       if (iml == NULL)
+       if (!iml)
                goto done;
 
        memcpy(&iml->multi, imr, sizeof(*imr));
@@ -1900,7 +1899,6 @@ int ip_mc_join_group(struct sock *sk , struct ip_mreqn *imr)
        ip_mc_inc_group(in_dev, addr);
        err = 0;
 done:
-       rtnl_unlock();
        return err;
 }
 EXPORT_SYMBOL(ip_mc_join_group);
@@ -1911,7 +1909,7 @@ static int ip_mc_leave_src(struct sock *sk, struct ip_mc_socklist *iml,
        struct ip_sf_socklist *psf = rtnl_dereference(iml->sflist);
        int err;
 
-       if (psf == NULL) {
+       if (!psf) {
                /* any-source empty exclude case */
                return ip_mc_del_src(in_dev, &iml->multi.imr_multiaddr.s_addr,
                        iml->sfmode, 0, NULL, 0);
@@ -1925,10 +1923,6 @@ static int ip_mc_leave_src(struct sock *sk, struct ip_mc_socklist *iml,
        return err;
 }
 
-/*
- *     Ask a socket to leave a group.
- */
-
 int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr)
 {
        struct inet_sock *inet = inet_sk(sk);
@@ -1940,7 +1934,8 @@ int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr)
        u32 ifindex;
        int ret = -EADDRNOTAVAIL;
 
-       rtnl_lock();
+       ASSERT_RTNL();
+
        in_dev = ip_mc_find_dev(net, imr);
        if (!in_dev) {
                ret = -ENODEV;
@@ -1964,14 +1959,13 @@ int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr)
                *imlp = iml->next_rcu;
 
                ip_mc_dec_group(in_dev, group);
-               rtnl_unlock();
+
                /* decrease mem now to avoid the memleak warning */
                atomic_sub(sizeof(*iml), &sk->sk_omem_alloc);
                kfree_rcu(iml, rcu);
                return 0;
        }
 out:
-       rtnl_unlock();
        return ret;
 }
 EXPORT_SYMBOL(ip_mc_leave_group);
@@ -1993,7 +1987,7 @@ int ip_mc_source(int add, int omode, struct sock *sk, struct
        if (!ipv4_is_multicast(addr))
                return -EINVAL;
 
-       rtnl_lock();
+       ASSERT_RTNL();
 
        imr.imr_multiaddr.s_addr = mreqs->imr_multiaddr;
        imr.imr_address.s_addr = mreqs->imr_interface;
@@ -2107,9 +2101,8 @@ int ip_mc_source(int add, int omode, struct sock *sk, struct
        ip_mc_add_src(in_dev, &mreqs->imr_multiaddr, omode, 1,
                &mreqs->imr_sourceaddr, 1);
 done:
-       rtnl_unlock();
        if (leavegroup)
-               return ip_mc_leave_group(sk, &imr);
+               err = ip_mc_leave_group(sk, &imr);
        return err;
 }
 
@@ -2131,7 +2124,7 @@ int ip_mc_msfilter(struct sock *sk, struct ip_msfilter *msf, int ifindex)
            msf->imsf_fmode != MCAST_EXCLUDE)
                return -EINVAL;
 
-       rtnl_lock();
+       ASSERT_RTNL();
 
        imr.imr_multiaddr.s_addr = msf->imsf_multiaddr;
        imr.imr_address.s_addr = msf->imsf_interface;
@@ -2193,7 +2186,6 @@ int ip_mc_msfilter(struct sock *sk, struct ip_msfilter *msf, int ifindex)
        pmc->sfmode = msf->imsf_fmode;
        err = 0;
 done:
-       rtnl_unlock();
        if (leavegroup)
                err = ip_mc_leave_group(sk, &imr);
        return err;
@@ -2368,7 +2360,7 @@ void ip_mc_drop_socket(struct sock *sk)
        struct ip_mc_socklist *iml;
        struct net *net = sock_net(sk);
 
-       if (inet->mc_list == NULL)
+       if (!inet->mc_list)
                return;
 
        rtnl_lock();
@@ -2378,7 +2370,7 @@ void ip_mc_drop_socket(struct sock *sk)
                inet->mc_list = iml->next_rcu;
                in_dev = inetdev_by_index(net, iml->multi.imr_ifindex);
                (void) ip_mc_leave_src(sk, iml, in_dev);
-               if (in_dev != NULL)
+               if (in_dev)
                        ip_mc_dec_group(in_dev, iml->multi.imr_multiaddr.s_addr);
                /* decrease mem now to avoid the memleak warning */
                atomic_sub(sizeof(*iml), &sk->sk_omem_alloc);
@@ -2595,13 +2587,13 @@ static inline struct ip_sf_list *igmp_mcf_get_first(struct seq_file *seq)
        for_each_netdev_rcu(net, state->dev) {
                struct in_device *idev;
                idev = __in_dev_get_rcu(state->dev);
-               if (unlikely(idev == NULL))
+               if (unlikely(!idev))
                        continue;
                im = rcu_dereference(idev->mc_list);
-               if (likely(im != NULL)) {
+               if (likely(im)) {
                        spin_lock_bh(&im->lock);
                        psf = im->sources;
-                       if (likely(psf != NULL)) {
+                       if (likely(psf)) {
                                state->im = im;
                                state->idev = idev;
                                break;
@@ -2671,7 +2663,7 @@ static void igmp_mcf_seq_stop(struct seq_file *seq, void *v)
        __releases(rcu)
 {
        struct igmp_mcf_iter_state *state = igmp_mcf_seq_private(seq);
-       if (likely(state->im != NULL)) {
+       if (likely(state->im)) {
                spin_unlock_bh(&state->im->lock);
                state->im = NULL;
        }
@@ -2724,6 +2716,7 @@ static const struct file_operations igmp_mcf_seq_fops = {
 static int __net_init igmp_net_init(struct net *net)
 {
        struct proc_dir_entry *pde;
+       int err;
 
        pde = proc_create("igmp", S_IRUGO, net->proc_net, &igmp_mc_seq_fops);
        if (!pde)
@@ -2732,8 +2725,18 @@ static int __net_init igmp_net_init(struct net *net)
                          &igmp_mcf_seq_fops);
        if (!pde)
                goto out_mcfilter;
+       err = inet_ctl_sock_create(&net->ipv4.mc_autojoin_sk, AF_INET,
+                                  SOCK_DGRAM, 0, net);
+       if (err < 0) {
+               pr_err("Failed to initialize the IGMP autojoin socket (err %d)\n",
+                      err);
+               goto out_sock;
+       }
+
        return 0;
 
+out_sock:
+       remove_proc_entry("mcfilter", net->proc_net);
 out_mcfilter:
        remove_proc_entry("igmp", net->proc_net);
 out_igmp:
@@ -2744,6 +2747,7 @@ static void __net_exit igmp_net_exit(struct net *net)
 {
        remove_proc_entry("mcfilter", net->proc_net);
        remove_proc_entry("igmp", net->proc_net);
+       inet_ctl_sock_destroy(net->ipv4.mc_autojoin_sk);
 }
 
 static struct pernet_operations igmp_net_ops = {
index 3e44b9b0b78ece392a1f1b0763b5445cadfb2557..5c3dd6267ed3557f2f139f83002fd7b1feaab237 100644 (file)
@@ -23,6 +23,7 @@
 #include <net/route.h>
 #include <net/tcp_states.h>
 #include <net/xfrm.h>
+#include <net/tcp.h>
 
 #ifdef INET_CSK_DEBUG
 const char inet_csk_timer_bug_msg[] = "inet_csk BUG: unknown timer value\n";
@@ -294,8 +295,8 @@ struct sock *inet_csk_accept(struct sock *sk, int flags, int *err)
 {
        struct inet_connection_sock *icsk = inet_csk(sk);
        struct request_sock_queue *queue = &icsk->icsk_accept_queue;
-       struct sock *newsk;
        struct request_sock *req;
+       struct sock *newsk;
        int error;
 
        lock_sock(sk);
@@ -324,9 +325,11 @@ struct sock *inet_csk_accept(struct sock *sk, int flags, int *err)
        newsk = req->sk;
 
        sk_acceptq_removed(sk);
-       if (sk->sk_protocol == IPPROTO_TCP && queue->fastopenq != NULL) {
+       if (sk->sk_protocol == IPPROTO_TCP &&
+           tcp_rsk(req)->tfo_listener &&
+           queue->fastopenq) {
                spin_lock_bh(&queue->fastopenq->lock);
-               if (tcp_rsk(req)->listener) {
+               if (tcp_rsk(req)->tfo_listener) {
                        /* We are still waiting for the final ACK from 3WHS
                         * so can't free req now. Instead, we set req->sk to
                         * NULL to signify that the child socket is taken
@@ -341,7 +344,7 @@ struct sock *inet_csk_accept(struct sock *sk, int flags, int *err)
 out:
        release_sock(sk);
        if (req)
-               __reqsk_free(req);
+               reqsk_put(req);
        return newsk;
 out_err:
        newsk = NULL;
@@ -400,18 +403,17 @@ struct dst_entry *inet_csk_route_req(struct sock *sk,
                                     struct flowi4 *fl4,
                                     const struct request_sock *req)
 {
-       struct rtable *rt;
        const struct inet_request_sock *ireq = inet_rsk(req);
-       struct ip_options_rcu *opt = inet_rsk(req)->opt;
-       struct net *net = sock_net(sk);
-       int flags = inet_sk_flowi_flags(sk);
+       struct net *net = read_pnet(&ireq->ireq_net);
+       struct ip_options_rcu *opt = ireq->opt;
+       struct rtable *rt;
 
-       flowi4_init_output(fl4, sk->sk_bound_dev_if, ireq->ir_mark,
+       flowi4_init_output(fl4, ireq->ir_iif, ireq->ir_mark,
                           RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
-                          sk->sk_protocol,
-                          flags,
+                          sk->sk_protocol, inet_sk_flowi_flags(sk),
                           (opt && opt->opt.srr) ? opt->opt.faddr : ireq->ir_rmt_addr,
-                          ireq->ir_loc_addr, ireq->ir_rmt_port, inet_sk(sk)->inet_sport);
+                          ireq->ir_loc_addr, ireq->ir_rmt_port,
+                          htons(ireq->ir_num));
        security_req_classify_flow(req, flowi4_to_flowi(fl4));
        rt = ip_route_output_flow(net, fl4, sk);
        if (IS_ERR(rt))
@@ -433,9 +435,9 @@ struct dst_entry *inet_csk_route_child_sock(struct sock *sk,
                                            const struct request_sock *req)
 {
        const struct inet_request_sock *ireq = inet_rsk(req);
+       struct net *net = read_pnet(&ireq->ireq_net);
        struct inet_sock *newinet = inet_sk(newsk);
        struct ip_options_rcu *opt;
-       struct net *net = sock_net(sk);
        struct flowi4 *fl4;
        struct rtable *rt;
 
@@ -443,11 +445,12 @@ struct dst_entry *inet_csk_route_child_sock(struct sock *sk,
 
        rcu_read_lock();
        opt = rcu_dereference(newinet->inet_opt);
-       flowi4_init_output(fl4, sk->sk_bound_dev_if, inet_rsk(req)->ir_mark,
+       flowi4_init_output(fl4, ireq->ir_iif, ireq->ir_mark,
                           RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
                           sk->sk_protocol, inet_sk_flowi_flags(sk),
                           (opt && opt->opt.srr) ? opt->opt.faddr : ireq->ir_rmt_addr,
-                          ireq->ir_loc_addr, ireq->ir_rmt_port, inet_sk(sk)->inet_sport);
+                          ireq->ir_loc_addr, ireq->ir_rmt_port,
+                          htons(ireq->ir_num));
        security_req_classify_flow(req, flowi4_to_flowi(fl4));
        rt = ip_route_output_flow(net, fl4, sk);
        if (IS_ERR(rt))
@@ -475,33 +478,37 @@ static inline u32 inet_synq_hash(const __be32 raddr, const __be16 rport,
 #if IS_ENABLED(CONFIG_IPV6)
 #define AF_INET_FAMILY(fam) ((fam) == AF_INET)
 #else
-#define AF_INET_FAMILY(fam) 1
+#define AF_INET_FAMILY(fam) true
 #endif
 
-struct request_sock *inet_csk_search_req(const struct sock *sk,
-                                        struct request_sock ***prevp,
-                                        const __be16 rport, const __be32 raddr,
+/* Note: this is temporary :
+ * req sock will no longer be in listener hash table
+*/
+struct request_sock *inet_csk_search_req(struct sock *sk,
+                                        const __be16 rport,
+                                        const __be32 raddr,
                                         const __be32 laddr)
 {
-       const struct inet_connection_sock *icsk = inet_csk(sk);
+       struct inet_connection_sock *icsk = inet_csk(sk);
        struct listen_sock *lopt = icsk->icsk_accept_queue.listen_opt;
-       struct request_sock *req, **prev;
+       struct request_sock *req;
+       u32 hash = inet_synq_hash(raddr, rport, lopt->hash_rnd,
+                                 lopt->nr_table_entries);
 
-       for (prev = &lopt->syn_table[inet_synq_hash(raddr, rport, lopt->hash_rnd,
-                                                   lopt->nr_table_entries)];
-            (req = *prev) != NULL;
-            prev = &req->dl_next) {
+       spin_lock(&icsk->icsk_accept_queue.syn_wait_lock);
+       for (req = lopt->syn_table[hash]; req != NULL; req = req->dl_next) {
                const struct inet_request_sock *ireq = inet_rsk(req);
 
                if (ireq->ir_rmt_port == rport &&
                    ireq->ir_rmt_addr == raddr &&
                    ireq->ir_loc_addr == laddr &&
                    AF_INET_FAMILY(req->rsk_ops->family)) {
+                       atomic_inc(&req->rsk_refcnt);
                        WARN_ON(req->sk);
-                       *prevp = prev;
                        break;
                }
        }
+       spin_unlock(&icsk->icsk_accept_queue.syn_wait_lock);
 
        return req;
 }
@@ -557,23 +564,24 @@ int inet_rtx_syn_ack(struct sock *parent, struct request_sock *req)
 }
 EXPORT_SYMBOL(inet_rtx_syn_ack);
 
-void inet_csk_reqsk_queue_prune(struct sock *parent,
-                               const unsigned long interval,
-                               const unsigned long timeout,
-                               const unsigned long max_rto)
+static void reqsk_timer_handler(unsigned long data)
 {
-       struct inet_connection_sock *icsk = inet_csk(parent);
+       struct request_sock *req = (struct request_sock *)data;
+       struct sock *sk_listener = req->rsk_listener;
+       struct inet_connection_sock *icsk = inet_csk(sk_listener);
        struct request_sock_queue *queue = &icsk->icsk_accept_queue;
        struct listen_sock *lopt = queue->listen_opt;
-       int max_retries = icsk->icsk_syn_retries ? : sysctl_tcp_synack_retries;
-       int thresh = max_retries;
-       unsigned long now = jiffies;
-       struct request_sock **reqp, *req;
-       int i, budget;
+       int qlen, expire = 0, resend = 0;
+       int max_retries, thresh;
+       u8 defer_accept;
 
-       if (lopt == NULL || lopt->qlen == 0)
+       if (sk_listener->sk_state != TCP_LISTEN || !lopt) {
+               reqsk_put(req);
                return;
+       }
 
+       max_retries = icsk->icsk_syn_retries ? : sysctl_tcp_synack_retries;
+       thresh = max_retries;
        /* Normally all the openreqs are young and become mature
         * (i.e. converted to established socket) for first timeout.
         * If synack was not acknowledged for 1 second, it means
@@ -591,67 +599,65 @@ void inet_csk_reqsk_queue_prune(struct sock *parent,
         * embrions; and abort old ones without pity, if old
         * ones are about to clog our table.
         */
-       if (lopt->qlen>>(lopt->max_qlen_log-1)) {
-               int young = (lopt->qlen_young<<1);
+       qlen = listen_sock_qlen(lopt);
+       if (qlen >> (lopt->max_qlen_log - 1)) {
+               int young = listen_sock_young(lopt) << 1;
 
                while (thresh > 2) {
-                       if (lopt->qlen < young)
+                       if (qlen < young)
                                break;
                        thresh--;
                        young <<= 1;
                }
        }
+       defer_accept = READ_ONCE(queue->rskq_defer_accept);
+       if (defer_accept)
+               max_retries = defer_accept;
+       syn_ack_recalc(req, thresh, max_retries, defer_accept,
+                      &expire, &resend);
+       req->rsk_ops->syn_ack_timeout(req);
+       if (!expire &&
+           (!resend ||
+            !inet_rtx_syn_ack(sk_listener, req) ||
+            inet_rsk(req)->acked)) {
+               unsigned long timeo;
+
+               if (req->num_timeout++ == 0)
+                       atomic_inc(&lopt->young_dec);
+               timeo = min(TCP_TIMEOUT_INIT << req->num_timeout, TCP_RTO_MAX);
+               mod_timer_pinned(&req->rsk_timer, jiffies + timeo);
+               return;
+       }
+       inet_csk_reqsk_queue_drop(sk_listener, req);
+       reqsk_put(req);
+}
 
-       if (queue->rskq_defer_accept)
-               max_retries = queue->rskq_defer_accept;
-
-       budget = 2 * (lopt->nr_table_entries / (timeout / interval));
-       i = lopt->clock_hand;
-
-       do {
-               reqp=&lopt->syn_table[i];
-               while ((req = *reqp) != NULL) {
-                       if (time_after_eq(now, req->expires)) {
-                               int expire = 0, resend = 0;
-
-                               syn_ack_recalc(req, thresh, max_retries,
-                                              queue->rskq_defer_accept,
-                                              &expire, &resend);
-                               req->rsk_ops->syn_ack_timeout(parent, req);
-                               if (!expire &&
-                                   (!resend ||
-                                    !inet_rtx_syn_ack(parent, req) ||
-                                    inet_rsk(req)->acked)) {
-                                       unsigned long timeo;
-
-                                       if (req->num_timeout++ == 0)
-                                               lopt->qlen_young--;
-                                       timeo = min(timeout << req->num_timeout,
-                                                   max_rto);
-                                       req->expires = now + timeo;
-                                       reqp = &req->dl_next;
-                                       continue;
-                               }
-
-                               /* Drop this request */
-                               inet_csk_reqsk_queue_unlink(parent, req, reqp);
-                               reqsk_queue_removed(queue, req);
-                               reqsk_free(req);
-                               continue;
-                       }
-                       reqp = &req->dl_next;
-               }
+void reqsk_queue_hash_req(struct request_sock_queue *queue,
+                         u32 hash, struct request_sock *req,
+                         unsigned long timeout)
+{
+       struct listen_sock *lopt = queue->listen_opt;
 
-               i = (i + 1) & (lopt->nr_table_entries - 1);
+       req->num_retrans = 0;
+       req->num_timeout = 0;
+       req->sk = NULL;
 
-       } while (--budget > 0);
+       /* before letting lookups find us, make sure all req fields
+        * are committed to memory and refcnt initialized.
+        */
+       smp_wmb();
+       atomic_set(&req->rsk_refcnt, 2);
+       setup_timer(&req->rsk_timer, reqsk_timer_handler, (unsigned long)req);
+       req->rsk_hash = hash;
 
-       lopt->clock_hand = i;
+       spin_lock(&queue->syn_wait_lock);
+       req->dl_next = lopt->syn_table[hash];
+       lopt->syn_table[hash] = req;
+       spin_unlock(&queue->syn_wait_lock);
 
-       if (lopt->qlen)
-               inet_csk_reset_keepalive_timer(parent, interval);
+       mod_timer_pinned(&req->rsk_timer, jiffies + timeout);
 }
-EXPORT_SYMBOL_GPL(inet_csk_reqsk_queue_prune);
+EXPORT_SYMBOL(reqsk_queue_hash_req);
 
 /**
  *     inet_csk_clone_lock - clone an inet socket, and lock its clone
@@ -667,7 +673,7 @@ struct sock *inet_csk_clone_lock(const struct sock *sk,
 {
        struct sock *newsk = sk_clone_lock(sk, priority);
 
-       if (newsk != NULL) {
+       if (newsk) {
                struct inet_connection_sock *newicsk = inet_csk(newsk);
 
                newsk->sk_state = TCP_SYN_RECV;
@@ -679,6 +685,8 @@ struct sock *inet_csk_clone_lock(const struct sock *sk,
                newsk->sk_write_space = sk_stream_write_space;
 
                newsk->sk_mark = inet_rsk(req)->ir_mark;
+               atomic64_set(&newsk->sk_cookie,
+                            atomic64_read(&inet_rsk(req)->ir_cookie));
 
                newicsk->icsk_retransmits = 0;
                newicsk->icsk_backoff     = 0;
@@ -785,8 +793,6 @@ void inet_csk_listen_stop(struct sock *sk)
        struct request_sock *acc_req;
        struct request_sock *req;
 
-       inet_csk_delete_keepalive_timer(sk);
-
        /* make all the listen_opt local to us */
        acc_req = reqsk_queue_yank_acceptq(queue);
 
@@ -816,9 +822,9 @@ void inet_csk_listen_stop(struct sock *sk)
 
                percpu_counter_inc(sk->sk_prot->orphan_count);
 
-               if (sk->sk_protocol == IPPROTO_TCP && tcp_rsk(req)->listener) {
+               if (sk->sk_protocol == IPPROTO_TCP && tcp_rsk(req)->tfo_listener) {
                        BUG_ON(tcp_sk(child)->fastopen_rsk != req);
-                       BUG_ON(sk != tcp_rsk(req)->listener);
+                       BUG_ON(sk != req->rsk_listener);
 
                        /* Paranoid, to prevent race condition if
                         * an inbound pkt destined for child is
@@ -827,7 +833,6 @@ void inet_csk_listen_stop(struct sock *sk)
                         * tcp_v4_destroy_sock().
                         */
                        tcp_sk(child)->fastopen_rsk = NULL;
-                       sock_put(sk);
                }
                inet_csk_destroy_sock(child);
 
@@ -836,9 +841,9 @@ void inet_csk_listen_stop(struct sock *sk)
                sock_put(child);
 
                sk_acceptq_removed(sk);
-               __reqsk_free(req);
+               reqsk_put(req);
        }
-       if (queue->fastopenq != NULL) {
+       if (queue->fastopenq) {
                /* Free all the reqs queued in rskq_rst_head. */
                spin_lock_bh(&queue->fastopenq->lock);
                acc_req = queue->fastopenq->rskq_rst_head;
@@ -846,7 +851,7 @@ void inet_csk_listen_stop(struct sock *sk)
                spin_unlock_bh(&queue->fastopenq->lock);
                while ((req = acc_req) != NULL) {
                        acc_req = req->dl_next;
-                       __reqsk_free(req);
+                       reqsk_put(req);
                }
        }
        WARN_ON(sk->sk_ack_backlog);
@@ -870,7 +875,7 @@ int inet_csk_compat_getsockopt(struct sock *sk, int level, int optname,
 {
        const struct inet_connection_sock *icsk = inet_csk(sk);
 
-       if (icsk->icsk_af_ops->compat_getsockopt != NULL)
+       if (icsk->icsk_af_ops->compat_getsockopt)
                return icsk->icsk_af_ops->compat_getsockopt(sk, level, optname,
                                                            optval, optlen);
        return icsk->icsk_af_ops->getsockopt(sk, level, optname,
@@ -883,7 +888,7 @@ int inet_csk_compat_setsockopt(struct sock *sk, int level, int optname,
 {
        const struct inet_connection_sock *icsk = inet_csk(sk);
 
-       if (icsk->icsk_af_ops->compat_setsockopt != NULL)
+       if (icsk->icsk_af_ops->compat_setsockopt)
                return icsk->icsk_af_ops->compat_setsockopt(sk, level, optname,
                                                            optval, optlen);
        return icsk->icsk_af_ops->setsockopt(sk, level, optname,
index 592aff37366bb932c0aeb2bad944b5470374ec21..76322c9867d5eb1ffe7808c908e42208046888a7 100644 (file)
 static const struct inet_diag_handler **inet_diag_table;
 
 struct inet_diag_entry {
-       __be32 *saddr;
-       __be32 *daddr;
+       const __be32 *saddr;
+       const __be32 *daddr;
        u16 sport;
        u16 dport;
        u16 family;
        u16 userlocks;
-#if IS_ENABLED(CONFIG_IPV6)
-       struct in6_addr saddr_storage;  /* for IPv4-mapped-IPv6 addresses */
-       struct in6_addr daddr_storage;  /* for IPv4-mapped-IPv6 addresses */
-#endif
 };
 
 static DEFINE_MUTEX(inet_diag_table_mutex);
@@ -65,12 +61,35 @@ static const struct inet_diag_handler *inet_diag_lock_handler(int proto)
        return inet_diag_table[proto];
 }
 
-static inline void inet_diag_unlock_handler(
-       const struct inet_diag_handler *handler)
+static void inet_diag_unlock_handler(const struct inet_diag_handler *handler)
 {
        mutex_unlock(&inet_diag_table_mutex);
 }
 
+static void inet_diag_msg_common_fill(struct inet_diag_msg *r, struct sock *sk)
+{
+       r->idiag_family = sk->sk_family;
+
+       r->id.idiag_sport = htons(sk->sk_num);
+       r->id.idiag_dport = sk->sk_dport;
+       r->id.idiag_if = sk->sk_bound_dev_if;
+       sock_diag_save_cookie(sk, r->id.idiag_cookie);
+
+#if IS_ENABLED(CONFIG_IPV6)
+       if (sk->sk_family == AF_INET6) {
+               *(struct in6_addr *)r->id.idiag_src = sk->sk_v6_rcv_saddr;
+               *(struct in6_addr *)r->id.idiag_dst = sk->sk_v6_daddr;
+       } else
+#endif
+       {
+       memset(&r->id.idiag_src, 0, sizeof(r->id.idiag_src));
+       memset(&r->id.idiag_dst, 0, sizeof(r->id.idiag_dst));
+
+       r->id.idiag_src[0] = sk->sk_rcv_saddr;
+       r->id.idiag_dst[0] = sk->sk_daddr;
+       }
+}
+
 static size_t inet_sk_attr_size(void)
 {
        return    nla_total_size(sizeof(struct tcp_info))
@@ -86,21 +105,21 @@ static size_t inet_sk_attr_size(void)
 }
 
 int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
-                             struct sk_buff *skb, struct inet_diag_req_v2 *req,
-                             struct user_namespace *user_ns,                   
-                             u32 portid, u32 seq, u16 nlmsg_flags,
-                             const struct nlmsghdr *unlh)
+                     struct sk_buff *skb, const struct inet_diag_req_v2 *req,
+                     struct user_namespace *user_ns,
+                     u32 portid, u32 seq, u16 nlmsg_flags,
+                     const struct nlmsghdr *unlh)
 {
        const struct inet_sock *inet = inet_sk(sk);
+       const struct inet_diag_handler *handler;
+       int ext = req->idiag_ext;
        struct inet_diag_msg *r;
        struct nlmsghdr  *nlh;
        struct nlattr *attr;
        void *info = NULL;
-       const struct inet_diag_handler *handler;
-       int ext = req->idiag_ext;
 
        handler = inet_diag_table[req->sdiag_protocol];
-       BUG_ON(handler == NULL);
+       BUG_ON(!handler);
 
        nlh = nlmsg_put(skb, portid, seq, unlh->nlmsg_type, sizeof(*r),
                        nlmsg_flags);
@@ -108,25 +127,13 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
                return -EMSGSIZE;
 
        r = nlmsg_data(nlh);
-       BUG_ON(sk->sk_state == TCP_TIME_WAIT);
+       BUG_ON(!sk_fullsock(sk));
 
-       r->idiag_family = sk->sk_family;
+       inet_diag_msg_common_fill(r, sk);
        r->idiag_state = sk->sk_state;
        r->idiag_timer = 0;
        r->idiag_retrans = 0;
 
-       r->id.idiag_if = sk->sk_bound_dev_if;
-       sock_diag_save_cookie(sk, r->id.idiag_cookie);
-
-       r->id.idiag_sport = inet->inet_sport;
-       r->id.idiag_dport = inet->inet_dport;
-
-       memset(&r->id.idiag_src, 0, sizeof(r->id.idiag_src));
-       memset(&r->id.idiag_dst, 0, sizeof(r->id.idiag_dst));
-
-       r->id.idiag_src[0] = inet->inet_rcv_saddr;
-       r->id.idiag_dst[0] = inet->inet_daddr;
-
        if (nla_put_u8(skb, INET_DIAG_SHUTDOWN, sk->sk_shutdown))
                goto errout;
 
@@ -139,10 +146,6 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
 
 #if IS_ENABLED(CONFIG_IPV6)
        if (r->idiag_family == AF_INET6) {
-
-               *(struct in6_addr *)r->id.idiag_src = sk->sk_v6_rcv_saddr;
-               *(struct in6_addr *)r->id.idiag_dst = sk->sk_v6_daddr;
-
                if (ext & (1 << (INET_DIAG_TCLASS - 1)))
                        if (nla_put_u8(skb, INET_DIAG_TCLASS,
                                       inet6_sk(sk)->tclass) < 0)
@@ -169,7 +172,7 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
                if (sock_diag_put_meminfo(sk, skb, INET_DIAG_SKMEMINFO))
                        goto errout;
 
-       if (icsk == NULL) {
+       if (!icsk) {
                handler->idiag_get_info(sk, r, NULL);
                goto out;
        }
@@ -227,23 +230,25 @@ errout:
 EXPORT_SYMBOL_GPL(inet_sk_diag_fill);
 
 static int inet_csk_diag_fill(struct sock *sk,
-                             struct sk_buff *skb, struct inet_diag_req_v2 *req,
+                             struct sk_buff *skb,
+                             const struct inet_diag_req_v2 *req,
                              struct user_namespace *user_ns,
                              u32 portid, u32 seq, u16 nlmsg_flags,
                              const struct nlmsghdr *unlh)
 {
-       return inet_sk_diag_fill(sk, inet_csk(sk),
-                       skb, req, user_ns, portid, seq, nlmsg_flags, unlh);
+       return inet_sk_diag_fill(sk, inet_csk(sk), skb, req,
+                                user_ns, portid, seq, nlmsg_flags, unlh);
 }
 
-static int inet_twsk_diag_fill(struct inet_timewait_sock *tw,
-                              struct sk_buff *skb, struct inet_diag_req_v2 *req,
+static int inet_twsk_diag_fill(struct sock *sk,
+                              struct sk_buff *skb,
                               u32 portid, u32 seq, u16 nlmsg_flags,
                               const struct nlmsghdr *unlh)
 {
-       s32 tmo;
+       struct inet_timewait_sock *tw = inet_twsk(sk);
        struct inet_diag_msg *r;
        struct nlmsghdr *nlh;
+       s32 tmo;
 
        nlh = nlmsg_put(skb, portid, seq, unlh->nlmsg_type, sizeof(*r),
                        nlmsg_flags);
@@ -257,21 +262,9 @@ static int inet_twsk_diag_fill(struct inet_timewait_sock *tw,
        if (tmo < 0)
                tmo = 0;
 
-       r->idiag_family       = tw->tw_family;
+       inet_diag_msg_common_fill(r, sk);
        r->idiag_retrans      = 0;
 
-       r->id.idiag_if        = tw->tw_bound_dev_if;
-       sock_diag_save_cookie(tw, r->id.idiag_cookie);
-
-       r->id.idiag_sport     = tw->tw_sport;
-       r->id.idiag_dport     = tw->tw_dport;
-
-       memset(&r->id.idiag_src, 0, sizeof(r->id.idiag_src));
-       memset(&r->id.idiag_dst, 0, sizeof(r->id.idiag_dst));
-
-       r->id.idiag_src[0]    = tw->tw_rcv_saddr;
-       r->id.idiag_dst[0]    = tw->tw_daddr;
-
        r->idiag_state        = tw->tw_substate;
        r->idiag_timer        = 3;
        r->idiag_expires      = jiffies_to_msecs(tmo);
@@ -279,61 +272,91 @@ static int inet_twsk_diag_fill(struct inet_timewait_sock *tw,
        r->idiag_wqueue       = 0;
        r->idiag_uid          = 0;
        r->idiag_inode        = 0;
-#if IS_ENABLED(CONFIG_IPV6)
-       if (tw->tw_family == AF_INET6) {
-               *(struct in6_addr *)r->id.idiag_src = tw->tw_v6_rcv_saddr;
-               *(struct in6_addr *)r->id.idiag_dst = tw->tw_v6_daddr;
-       }
-#endif
+
+       nlmsg_end(skb, nlh);
+       return 0;
+}
+
+static int inet_req_diag_fill(struct sock *sk, struct sk_buff *skb,
+                             u32 portid, u32 seq, u16 nlmsg_flags,
+                             const struct nlmsghdr *unlh)
+{
+       struct inet_diag_msg *r;
+       struct nlmsghdr *nlh;
+       long tmo;
+
+       nlh = nlmsg_put(skb, portid, seq, unlh->nlmsg_type, sizeof(*r),
+                       nlmsg_flags);
+       if (!nlh)
+               return -EMSGSIZE;
+
+       r = nlmsg_data(nlh);
+       inet_diag_msg_common_fill(r, sk);
+       r->idiag_state = TCP_SYN_RECV;
+       r->idiag_timer = 1;
+       r->idiag_retrans = inet_reqsk(sk)->num_retrans;
+
+       BUILD_BUG_ON(offsetof(struct inet_request_sock, ir_cookie) !=
+                    offsetof(struct sock, sk_cookie));
+
+       tmo = inet_reqsk(sk)->rsk_timer.expires - jiffies;
+       r->idiag_expires = (tmo >= 0) ? jiffies_to_msecs(tmo) : 0;
+       r->idiag_rqueue = 0;
+       r->idiag_wqueue = 0;
+       r->idiag_uid    = 0;
+       r->idiag_inode  = 0;
 
        nlmsg_end(skb, nlh);
        return 0;
 }
 
 static int sk_diag_fill(struct sock *sk, struct sk_buff *skb,
-                       struct inet_diag_req_v2 *r,
+                       const struct inet_diag_req_v2 *r,
                        struct user_namespace *user_ns,
                        u32 portid, u32 seq, u16 nlmsg_flags,
                        const struct nlmsghdr *unlh)
 {
        if (sk->sk_state == TCP_TIME_WAIT)
-               return inet_twsk_diag_fill(inet_twsk(sk), skb, r, portid, seq,
+               return inet_twsk_diag_fill(sk, skb, portid, seq,
                                           nlmsg_flags, unlh);
 
+       if (sk->sk_state == TCP_NEW_SYN_RECV)
+               return inet_req_diag_fill(sk, skb, portid, seq,
+                                         nlmsg_flags, unlh);
+
        return inet_csk_diag_fill(sk, skb, r, user_ns, portid, seq,
                                  nlmsg_flags, unlh);
 }
 
-int inet_diag_dump_one_icsk(struct inet_hashinfo *hashinfo, struct sk_buff *in_skb,
-               const struct nlmsghdr *nlh, struct inet_diag_req_v2 *req)
+int inet_diag_dump_one_icsk(struct inet_hashinfo *hashinfo,
+                           struct sk_buff *in_skb,
+                           const struct nlmsghdr *nlh,
+                           const struct inet_diag_req_v2 *req)
 {
-       int err;
-       struct sock *sk;
-       struct sk_buff *rep;
        struct net *net = sock_net(in_skb->sk);
+       struct sk_buff *rep;
+       struct sock *sk;
+       int err;
 
        err = -EINVAL;
-       if (req->sdiag_family == AF_INET) {
+       if (req->sdiag_family == AF_INET)
                sk = inet_lookup(net, hashinfo, req->id.idiag_dst[0],
                                 req->id.idiag_dport, req->id.idiag_src[0],
                                 req->id.idiag_sport, req->id.idiag_if);
-       }
 #if IS_ENABLED(CONFIG_IPV6)
-       else if (req->sdiag_family == AF_INET6) {
+       else if (req->sdiag_family == AF_INET6)
                sk = inet6_lookup(net, hashinfo,
                                  (struct in6_addr *)req->id.idiag_dst,
                                  req->id.idiag_dport,
                                  (struct in6_addr *)req->id.idiag_src,
                                  req->id.idiag_sport,
                                  req->id.idiag_if);
-       }
 #endif
-       else {
+       else
                goto out_nosk;
-       }
 
        err = -ENOENT;
-       if (sk == NULL)
+       if (!sk)
                goto out_nosk;
 
        err = sock_diag_check_cookie(sk, req->id.idiag_cookie);
@@ -371,7 +394,7 @@ EXPORT_SYMBOL_GPL(inet_diag_dump_one_icsk);
 
 static int inet_diag_get_exact(struct sk_buff *in_skb,
                               const struct nlmsghdr *nlh,
-                              struct inet_diag_req_v2 *req)
+                              const struct inet_diag_req_v2 *req)
 {
        const struct inet_diag_handler *handler;
        int err;
@@ -412,9 +435,8 @@ static int bitstring_match(const __be32 *a1, const __be32 *a2, int bits)
        return 1;
 }
 
-
 static int inet_diag_bc_run(const struct nlattr *_bc,
-               const struct inet_diag_entry *entry)
+                           const struct inet_diag_entry *entry)
 {
        const void *bc = nla_data(_bc);
        int len = nla_len(_bc);
@@ -446,10 +468,10 @@ static int inet_diag_bc_run(const struct nlattr *_bc,
                        break;
                case INET_DIAG_BC_S_COND:
                case INET_DIAG_BC_D_COND: {
-                       struct inet_diag_hostcond *cond;
-                       __be32 *addr;
+                       const struct inet_diag_hostcond *cond;
+                       const __be32 *addr;
 
-                       cond = (struct inet_diag_hostcond *)(op + 1);
+                       cond = (const struct inet_diag_hostcond *)(op + 1);
                        if (cond->port != -1 &&
                            cond->port != (op->code == INET_DIAG_BC_S_COND ?
                                             entry->sport : entry->dport)) {
@@ -498,29 +520,36 @@ static int inet_diag_bc_run(const struct nlattr *_bc,
        return len == 0;
 }
 
+/* This helper is available for all sockets (ESTABLISH, TIMEWAIT, SYN_RECV)
+ */
+static void entry_fill_addrs(struct inet_diag_entry *entry,
+                            const struct sock *sk)
+{
+#if IS_ENABLED(CONFIG_IPV6)
+       if (sk->sk_family == AF_INET6) {
+               entry->saddr = sk->sk_v6_rcv_saddr.s6_addr32;
+               entry->daddr = sk->sk_v6_daddr.s6_addr32;
+       } else
+#endif
+       {
+               entry->saddr = &sk->sk_rcv_saddr;
+               entry->daddr = &sk->sk_daddr;
+       }
+}
+
 int inet_diag_bc_sk(const struct nlattr *bc, struct sock *sk)
 {
-       struct inet_diag_entry entry;
        struct inet_sock *inet = inet_sk(sk);
+       struct inet_diag_entry entry;
 
-       if (bc == NULL)
+       if (!bc)
                return 1;
 
        entry.family = sk->sk_family;
-#if IS_ENABLED(CONFIG_IPV6)
-       if (entry.family == AF_INET6) {
-
-               entry.saddr = sk->sk_v6_rcv_saddr.s6_addr32;
-               entry.daddr = sk->sk_v6_daddr.s6_addr32;
-       } else
-#endif
-       {
-               entry.saddr = &inet->inet_rcv_saddr;
-               entry.daddr = &inet->inet_daddr;
-       }
+       entry_fill_addrs(&entry, sk);
        entry.sport = inet->inet_num;
        entry.dport = ntohs(inet->inet_dport);
-       entry.userlocks = sk->sk_userlocks;
+       entry.userlocks = sk_fullsock(sk) ? sk->sk_userlocks : 0;
 
        return inet_diag_bc_run(bc, &entry);
 }
@@ -547,8 +576,8 @@ static int valid_cc(const void *bc, int len, int cc)
 static bool valid_hostcond(const struct inet_diag_bc_op *op, int len,
                           int *min_len)
 {
-       int addr_len;
        struct inet_diag_hostcond *cond;
+       int addr_len;
 
        /* Check hostcond space. */
        *min_len += sizeof(struct inet_diag_hostcond);
@@ -582,8 +611,8 @@ static bool valid_hostcond(const struct inet_diag_bc_op *op, int len,
 }
 
 /* Validate a port comparison operator. */
-static inline bool valid_port_comparison(const struct inet_diag_bc_op *op,
-                                        int len, int *min_len)
+static bool valid_port_comparison(const struct inet_diag_bc_op *op,
+                                 int len, int *min_len)
 {
        /* Port comparisons put the port in a follow-on inet_diag_bc_op. */
        *min_len += sizeof(struct inet_diag_bc_op);
@@ -598,10 +627,9 @@ static int inet_diag_bc_audit(const void *bytecode, int bytecode_len)
        int  len = bytecode_len;
 
        while (len > 0) {
-               const struct inet_diag_bc_op *op = bc;
                int min_len = sizeof(struct inet_diag_bc_op);
+               const struct inet_diag_bc_op *op = bc;
 
-//printk("BC: %d %d %d {%d} / %d\n", op->code, op->yes, op->no, op[1].no, len);
                switch (op->code) {
                case INET_DIAG_BC_S_COND:
                case INET_DIAG_BC_D_COND:
@@ -642,7 +670,7 @@ static int inet_diag_bc_audit(const void *bytecode, int bytecode_len)
 static int inet_csk_diag_dump(struct sock *sk,
                              struct sk_buff *skb,
                              struct netlink_callback *cb,
-                             struct inet_diag_req_v2 *r,
+                             const struct inet_diag_req_v2 *r,
                              const struct nlattr *bc)
 {
        if (!inet_diag_bc_sk(bc, sk))
@@ -654,139 +682,42 @@ static int inet_csk_diag_dump(struct sock *sk,
                                  cb->nlh->nlmsg_seq, NLM_F_MULTI, cb->nlh);
 }
 
-static int inet_twsk_diag_dump(struct sock *sk,
-                              struct sk_buff *skb,
-                              struct netlink_callback *cb,
-                              struct inet_diag_req_v2 *r,
-                              const struct nlattr *bc)
+static void twsk_build_assert(void)
 {
-       struct inet_timewait_sock *tw = inet_twsk(sk);
-
-       if (bc != NULL) {
-               struct inet_diag_entry entry;
+       BUILD_BUG_ON(offsetof(struct inet_timewait_sock, tw_family) !=
+                    offsetof(struct sock, sk_family));
 
-               entry.family = tw->tw_family;
-#if IS_ENABLED(CONFIG_IPV6)
-               if (tw->tw_family == AF_INET6) {
-                       entry.saddr = tw->tw_v6_rcv_saddr.s6_addr32;
-                       entry.daddr = tw->tw_v6_daddr.s6_addr32;
-               } else
-#endif
-               {
-                       entry.saddr = &tw->tw_rcv_saddr;
-                       entry.daddr = &tw->tw_daddr;
-               }
-               entry.sport = tw->tw_num;
-               entry.dport = ntohs(tw->tw_dport);
-               entry.userlocks = 0;
+       BUILD_BUG_ON(offsetof(struct inet_timewait_sock, tw_num) !=
+                    offsetof(struct inet_sock, inet_num));
 
-               if (!inet_diag_bc_run(bc, &entry))
-                       return 0;
-       }
+       BUILD_BUG_ON(offsetof(struct inet_timewait_sock, tw_dport) !=
+                    offsetof(struct inet_sock, inet_dport));
 
-       return inet_twsk_diag_fill(tw, skb, r,
-                                  NETLINK_CB(cb->skb).portid,
-                                  cb->nlh->nlmsg_seq, NLM_F_MULTI, cb->nlh);
-}
+       BUILD_BUG_ON(offsetof(struct inet_timewait_sock, tw_rcv_saddr) !=
+                    offsetof(struct inet_sock, inet_rcv_saddr));
 
-/* Get the IPv4, IPv6, or IPv4-mapped-IPv6 local and remote addresses
- * from a request_sock. For IPv4-mapped-IPv6 we must map IPv4 to IPv6.
- */
-static inline void inet_diag_req_addrs(const struct sock *sk,
-                                      const struct request_sock *req,
-                                      struct inet_diag_entry *entry)
-{
-       struct inet_request_sock *ireq = inet_rsk(req);
+       BUILD_BUG_ON(offsetof(struct inet_timewait_sock, tw_daddr) !=
+                    offsetof(struct inet_sock, inet_daddr));
 
 #if IS_ENABLED(CONFIG_IPV6)
-       if (sk->sk_family == AF_INET6) {
-               if (req->rsk_ops->family == AF_INET6) {
-                       entry->saddr = ireq->ir_v6_loc_addr.s6_addr32;
-                       entry->daddr = ireq->ir_v6_rmt_addr.s6_addr32;
-               } else if (req->rsk_ops->family == AF_INET) {
-                       ipv6_addr_set_v4mapped(ireq->ir_loc_addr,
-                                              &entry->saddr_storage);
-                       ipv6_addr_set_v4mapped(ireq->ir_rmt_addr,
-                                              &entry->daddr_storage);
-                       entry->saddr = entry->saddr_storage.s6_addr32;
-                       entry->daddr = entry->daddr_storage.s6_addr32;
-               }
-       } else
-#endif
-       {
-               entry->saddr = &ireq->ir_loc_addr;
-               entry->daddr = &ireq->ir_rmt_addr;
-       }
-}
-
-static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk,
-                             struct request_sock *req,
-                             struct user_namespace *user_ns,
-                             u32 portid, u32 seq,
-                             const struct nlmsghdr *unlh)
-{
-       const struct inet_request_sock *ireq = inet_rsk(req);
-       struct inet_sock *inet = inet_sk(sk);
-       struct inet_diag_msg *r;
-       struct nlmsghdr *nlh;
-       long tmo;
-
-       nlh = nlmsg_put(skb, portid, seq, unlh->nlmsg_type, sizeof(*r),
-                       NLM_F_MULTI);
-       if (!nlh)
-               return -EMSGSIZE;
-
-       r = nlmsg_data(nlh);
-       r->idiag_family = sk->sk_family;
-       r->idiag_state = TCP_SYN_RECV;
-       r->idiag_timer = 1;
-       r->idiag_retrans = req->num_retrans;
-
-       r->id.idiag_if = sk->sk_bound_dev_if;
-       sock_diag_save_cookie(req, r->id.idiag_cookie);
+       BUILD_BUG_ON(offsetof(struct inet_timewait_sock, tw_v6_rcv_saddr) !=
+                    offsetof(struct sock, sk_v6_rcv_saddr));
 
-       tmo = req->expires - jiffies;
-       if (tmo < 0)
-               tmo = 0;
-
-       r->id.idiag_sport = inet->inet_sport;
-       r->id.idiag_dport = ireq->ir_rmt_port;
-
-       memset(&r->id.idiag_src, 0, sizeof(r->id.idiag_src));
-       memset(&r->id.idiag_dst, 0, sizeof(r->id.idiag_dst));
-
-       r->id.idiag_src[0] = ireq->ir_loc_addr;
-       r->id.idiag_dst[0] = ireq->ir_rmt_addr;
-
-       r->idiag_expires = jiffies_to_msecs(tmo);
-       r->idiag_rqueue = 0;
-       r->idiag_wqueue = 0;
-       r->idiag_uid = from_kuid_munged(user_ns, sock_i_uid(sk));
-       r->idiag_inode = 0;
-#if IS_ENABLED(CONFIG_IPV6)
-       if (r->idiag_family == AF_INET6) {
-               struct inet_diag_entry entry;
-               inet_diag_req_addrs(sk, req, &entry);
-               memcpy(r->id.idiag_src, entry.saddr, sizeof(struct in6_addr));
-               memcpy(r->id.idiag_dst, entry.daddr, sizeof(struct in6_addr));
-       }
+       BUILD_BUG_ON(offsetof(struct inet_timewait_sock, tw_v6_daddr) !=
+                    offsetof(struct sock, sk_v6_daddr));
 #endif
-
-       nlmsg_end(skb, nlh);
-       return 0;
 }
 
 static int inet_diag_dump_reqs(struct sk_buff *skb, struct sock *sk,
                               struct netlink_callback *cb,
-                              struct inet_diag_req_v2 *r,
+                              const struct inet_diag_req_v2 *r,
                               const struct nlattr *bc)
 {
-       struct inet_diag_entry entry;
        struct inet_connection_sock *icsk = inet_csk(sk);
-       struct listen_sock *lopt;
        struct inet_sock *inet = inet_sk(sk);
-       int j, s_j;
-       int reqnum, s_reqnum;
+       struct inet_diag_entry entry;
+       int j, s_j, reqnum, s_reqnum;
+       struct listen_sock *lopt;
        int err = 0;
 
        s_j = cb->args[3];
@@ -797,13 +728,13 @@ static int inet_diag_dump_reqs(struct sk_buff *skb, struct sock *sk,
 
        entry.family = sk->sk_family;
 
-       read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
+       spin_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
 
        lopt = icsk->icsk_accept_queue.listen_opt;
-       if (!lopt || !lopt->qlen)
+       if (!lopt || !listen_sock_qlen(lopt))
                goto out;
 
-       if (bc != NULL) {
+       if (bc) {
                entry.sport = inet->inet_num;
                entry.userlocks = sk->sk_userlocks;
        }
@@ -822,17 +753,18 @@ static int inet_diag_dump_reqs(struct sk_buff *skb, struct sock *sk,
                                continue;
 
                        if (bc) {
-                               inet_diag_req_addrs(sk, req, &entry);
+                               /* Note: entry.sport and entry.userlocks are already set */
+                               entry_fill_addrs(&entry, req_to_sk(req));
                                entry.dport = ntohs(ireq->ir_rmt_port);
 
                                if (!inet_diag_bc_run(bc, &entry))
                                        continue;
                        }
 
-                       err = inet_diag_fill_req(skb, sk, req,
-                                              sk_user_ns(NETLINK_CB(cb->skb).sk),
-                                              NETLINK_CB(cb->skb).portid,
-                                              cb->nlh->nlmsg_seq, cb->nlh);
+                       err = inet_req_diag_fill(req_to_sk(req), skb,
+                                                NETLINK_CB(cb->skb).portid,
+                                                cb->nlh->nlmsg_seq,
+                                                NLM_F_MULTI, cb->nlh);
                        if (err < 0) {
                                cb->args[3] = j + 1;
                                cb->args[4] = reqnum;
@@ -844,17 +776,17 @@ static int inet_diag_dump_reqs(struct sk_buff *skb, struct sock *sk,
        }
 
 out:
-       read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
+       spin_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
 
        return err;
 }
 
 void inet_diag_dump_icsk(struct inet_hashinfo *hashinfo, struct sk_buff *skb,
-               struct netlink_callback *cb, struct inet_diag_req_v2 *r, struct nlattr *bc)
+                        struct netlink_callback *cb,
+                        const struct inet_diag_req_v2 *r, struct nlattr *bc)
 {
-       int i, num;
-       int s_i, s_num;
        struct net *net = sock_net(skb->sk);
+       int i, num, s_i, s_num;
 
        s_i = cb->args[1];
        s_num = num = cb->args[2];
@@ -864,9 +796,9 @@ void inet_diag_dump_icsk(struct inet_hashinfo *hashinfo, struct sk_buff *skb,
                        goto skip_listen_ht;
 
                for (i = s_i; i < INET_LHTABLE_SIZE; i++) {
-                       struct sock *sk;
-                       struct hlist_nulls_node *node;
                        struct inet_listen_hashbucket *ilb;
+                       struct hlist_nulls_node *node;
+                       struct sock *sk;
 
                        num = 0;
                        ilb = &hashinfo->listening_hash[i];
@@ -883,7 +815,7 @@ void inet_diag_dump_icsk(struct inet_hashinfo *hashinfo, struct sk_buff *skb,
                                }
 
                                if (r->sdiag_family != AF_UNSPEC &&
-                                               sk->sk_family != r->sdiag_family)
+                                   sk->sk_family != r->sdiag_family)
                                        goto next_listen;
 
                                if (r->id.idiag_sport != inet->inet_sport &&
@@ -931,8 +863,8 @@ skip_listen_ht:
        for (i = s_i; i <= hashinfo->ehash_mask; i++) {
                struct inet_ehash_bucket *head = &hashinfo->ehash[i];
                spinlock_t *lock = inet_ehash_lockp(hashinfo, i);
-               struct sock *sk;
                struct hlist_nulls_node *node;
+               struct sock *sk;
 
                num = 0;
 
@@ -944,8 +876,7 @@ skip_listen_ht:
 
                spin_lock_bh(lock);
                sk_nulls_for_each(sk, node, &head->chain) {
-                       int res;
-                       int state;
+                       int state, res;
 
                        if (!net_eq(sock_net(sk), net))
                                continue;
@@ -964,10 +895,16 @@ skip_listen_ht:
                        if (r->id.idiag_dport != sk->sk_dport &&
                            r->id.idiag_dport)
                                goto next_normal;
-                       if (sk->sk_state == TCP_TIME_WAIT)
-                               res = inet_twsk_diag_dump(sk, skb, cb, r, bc);
-                       else
-                               res = inet_csk_diag_dump(sk, skb, cb, r, bc);
+                       twsk_build_assert();
+
+                       if (!inet_diag_bc_sk(bc, sk))
+                               goto next_normal;
+
+                       res = sk_diag_fill(sk, skb, r,
+                                          sk_user_ns(NETLINK_CB(cb->skb).sk),
+                                          NETLINK_CB(cb->skb).portid,
+                                          cb->nlh->nlmsg_seq, NLM_F_MULTI,
+                                          cb->nlh);
                        if (res < 0) {
                                spin_unlock_bh(lock);
                                goto done;
@@ -988,7 +925,8 @@ out:
 EXPORT_SYMBOL_GPL(inet_diag_dump_icsk);
 
 static int __inet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
-               struct inet_diag_req_v2 *r, struct nlattr *bc)
+                           const struct inet_diag_req_v2 *r,
+                           struct nlattr *bc)
 {
        const struct inet_diag_handler *handler;
        int err = 0;
@@ -1005,8 +943,8 @@ static int __inet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
 
 static int inet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
 {
-       struct nlattr *bc = NULL;
        int hdrlen = sizeof(struct inet_diag_req_v2);
+       struct nlattr *bc = NULL;
 
        if (nlmsg_attrlen(cb->nlh, hdrlen))
                bc = nlmsg_find_attr(cb->nlh, hdrlen, INET_DIAG_REQ_BYTECODE);
@@ -1014,7 +952,7 @@ static int inet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
        return __inet_diag_dump(skb, cb, nlmsg_data(cb->nlh), bc);
 }
 
-static inline int inet_diag_type2proto(int type)
+static int inet_diag_type2proto(int type)
 {
        switch (type) {
        case TCPDIAG_GETSOCK:
@@ -1026,12 +964,13 @@ static inline int inet_diag_type2proto(int type)
        }
 }
 
-static int inet_diag_dump_compat(struct sk_buff *skb, struct netlink_callback *cb)
+static int inet_diag_dump_compat(struct sk_buff *skb,
+                                struct netlink_callback *cb)
 {
        struct inet_diag_req *rc = nlmsg_data(cb->nlh);
+       int hdrlen = sizeof(struct inet_diag_req);
        struct inet_diag_req_v2 req;
        struct nlattr *bc = NULL;
-       int hdrlen = sizeof(struct inet_diag_req);
 
        req.sdiag_family = AF_UNSPEC; /* compatibility */
        req.sdiag_protocol = inet_diag_type2proto(cb->nlh->nlmsg_type);
@@ -1046,7 +985,7 @@ static int inet_diag_dump_compat(struct sk_buff *skb, struct netlink_callback *c
 }
 
 static int inet_diag_get_exact_compat(struct sk_buff *in_skb,
-                              const struct nlmsghdr *nlh)
+                                     const struct nlmsghdr *nlh)
 {
        struct inet_diag_req *rc = nlmsg_data(nlh);
        struct inet_diag_req_v2 req;
@@ -1075,7 +1014,7 @@ static int inet_diag_rcv_msg_compat(struct sk_buff *skb, struct nlmsghdr *nlh)
 
                        attr = nlmsg_find_attr(nlh, hdrlen,
                                               INET_DIAG_REQ_BYTECODE);
-                       if (attr == NULL ||
+                       if (!attr ||
                            nla_len(attr) < sizeof(struct inet_diag_bc_op) ||
                            inet_diag_bc_audit(nla_data(attr), nla_len(attr)))
                                return -EINVAL;
@@ -1102,9 +1041,10 @@ static int inet_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h)
        if (h->nlmsg_flags & NLM_F_DUMP) {
                if (nlmsg_attrlen(h, hdrlen)) {
                        struct nlattr *attr;
+
                        attr = nlmsg_find_attr(h, hdrlen,
                                               INET_DIAG_REQ_BYTECODE);
-                       if (attr == NULL ||
+                       if (!attr ||
                            nla_len(attr) < sizeof(struct inet_diag_bc_op) ||
                            inet_diag_bc_audit(nla_data(attr), nla_len(attr)))
                                return -EINVAL;
@@ -1140,7 +1080,7 @@ int inet_diag_register(const struct inet_diag_handler *h)
 
        mutex_lock(&inet_diag_table_mutex);
        err = -EEXIST;
-       if (inet_diag_table[type] == NULL) {
+       if (!inet_diag_table[type]) {
                inet_diag_table[type] = h;
                err = 0;
        }
index e7920352646aed0a0680557babecfb586283ce92..5e346a082e5ff05b58cfebb64917ee26001d809d 100644 (file)
@@ -385,7 +385,7 @@ static struct inet_frag_queue *inet_frag_alloc(struct netns_frags *nf,
        }
 
        q = kmem_cache_zalloc(f->frags_cachep, GFP_ATOMIC);
-       if (q == NULL)
+       if (!q)
                return NULL;
 
        q->net = nf;
@@ -406,7 +406,7 @@ static struct inet_frag_queue *inet_frag_create(struct netns_frags *nf,
        struct inet_frag_queue *q;
 
        q = inet_frag_alloc(nf, f, arg);
-       if (q == NULL)
+       if (!q)
                return NULL;
 
        return inet_frag_intern(nf, q, f, arg);
index 9111a4e221557173df0ce08e95632ee059d00b61..d4630bf2d9aad1fd9070a11323b1cd0f7c0b9949 100644 (file)
@@ -24,9 +24,9 @@
 #include <net/secure_seq.h>
 #include <net/ip.h>
 
-static unsigned int inet_ehashfn(struct net *net, const __be32 laddr,
-                                const __u16 lport, const __be32 faddr,
-                                const __be16 fport)
+static u32 inet_ehashfn(const struct net *net, const __be32 laddr,
+                       const __u16 lport, const __be32 faddr,
+                       const __be16 fport)
 {
        static u32 inet_ehash_secret __read_mostly;
 
@@ -36,17 +36,21 @@ static unsigned int inet_ehashfn(struct net *net, const __be32 laddr,
                              inet_ehash_secret + net_hash_mix(net));
 }
 
-
-static unsigned int inet_sk_ehashfn(const struct sock *sk)
+/* This function handles inet_sock, but also timewait and request sockets
+ * for IPv4/IPv6.
+ */
+u32 sk_ehashfn(const struct sock *sk)
 {
-       const struct inet_sock *inet = inet_sk(sk);
-       const __be32 laddr = inet->inet_rcv_saddr;
-       const __u16 lport = inet->inet_num;
-       const __be32 faddr = inet->inet_daddr;
-       const __be16 fport = inet->inet_dport;
-       struct net *net = sock_net(sk);
-
-       return inet_ehashfn(net, laddr, lport, faddr, fport);
+#if IS_ENABLED(CONFIG_IPV6)
+       if (sk->sk_family == AF_INET6 &&
+           !ipv6_addr_v4mapped(&sk->sk_v6_daddr))
+               return inet6_ehashfn(sock_net(sk),
+                                    &sk->sk_v6_rcv_saddr, sk->sk_num,
+                                    &sk->sk_v6_daddr, sk->sk_dport);
+#endif
+       return inet_ehashfn(sock_net(sk),
+                           sk->sk_rcv_saddr, sk->sk_num,
+                           sk->sk_daddr, sk->sk_dport);
 }
 
 /*
@@ -60,8 +64,8 @@ struct inet_bind_bucket *inet_bind_bucket_create(struct kmem_cache *cachep,
 {
        struct inet_bind_bucket *tb = kmem_cache_alloc(cachep, GFP_ATOMIC);
 
-       if (tb != NULL) {
-               write_pnet(&tb->ib_net, hold_net(net));
+       if (tb) {
+               write_pnet(&tb->ib_net, net);
                tb->port      = snum;
                tb->fastreuse = 0;
                tb->fastreuseport = 0;
@@ -79,7 +83,6 @@ void inet_bind_bucket_destroy(struct kmem_cache *cachep, struct inet_bind_bucket
 {
        if (hlist_empty(&tb->owners)) {
                __hlist_del(&tb->node);
-               release_net(ib_net(tb));
                kmem_cache_free(cachep, tb);
        }
 }
@@ -263,11 +266,19 @@ void sock_gen_put(struct sock *sk)
 
        if (sk->sk_state == TCP_TIME_WAIT)
                inet_twsk_free(inet_twsk(sk));
+       else if (sk->sk_state == TCP_NEW_SYN_RECV)
+               reqsk_free(inet_reqsk(sk));
        else
                sk_free(sk);
 }
 EXPORT_SYMBOL_GPL(sock_gen_put);
 
+void sock_edemux(struct sk_buff *skb)
+{
+       sock_gen_put(skb->sk);
+}
+EXPORT_SYMBOL(sock_edemux);
+
 struct sock *__inet_lookup_established(struct net *net,
                                  struct inet_hashinfo *hashinfo,
                                  const __be32 saddr, const __be16 sport,
@@ -400,13 +411,13 @@ int __inet_hash_nolisten(struct sock *sk, struct inet_timewait_sock *tw)
 {
        struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
        struct hlist_nulls_head *list;
-       spinlock_t *lock;
        struct inet_ehash_bucket *head;
+       spinlock_t *lock;
        int twrefcnt = 0;
 
        WARN_ON(!sk_unhashed(sk));
 
-       sk->sk_hash = inet_sk_ehashfn(sk);
+       sk->sk_hash = sk_ehashfn(sk);
        head = inet_ehash_bucket(hashinfo, sk->sk_hash);
        list = &head->chain;
        lock = inet_ehash_lockp(hashinfo, sk->sk_hash);
@@ -423,15 +434,13 @@ int __inet_hash_nolisten(struct sock *sk, struct inet_timewait_sock *tw)
 }
 EXPORT_SYMBOL_GPL(__inet_hash_nolisten);
 
-static void __inet_hash(struct sock *sk)
+int __inet_hash(struct sock *sk, struct inet_timewait_sock *tw)
 {
        struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
        struct inet_listen_hashbucket *ilb;
 
-       if (sk->sk_state != TCP_LISTEN) {
-               __inet_hash_nolisten(sk, NULL);
-               return;
-       }
+       if (sk->sk_state != TCP_LISTEN)
+               return __inet_hash_nolisten(sk, tw);
 
        WARN_ON(!sk_unhashed(sk));
        ilb = &hashinfo->listening_hash[inet_sk_listen_hashfn(sk)];
@@ -440,13 +449,15 @@ static void __inet_hash(struct sock *sk)
        __sk_nulls_add_node_rcu(sk, &ilb->head);
        sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
        spin_unlock(&ilb->lock);
+       return 0;
 }
+EXPORT_SYMBOL(__inet_hash);
 
 void inet_hash(struct sock *sk)
 {
        if (sk->sk_state != TCP_CLOSE) {
                local_bh_disable();
-               __inet_hash(sk);
+               __inet_hash(sk, NULL);
                local_bh_enable();
        }
 }
@@ -477,8 +488,7 @@ EXPORT_SYMBOL_GPL(inet_unhash);
 int __inet_hash_connect(struct inet_timewait_death_row *death_row,
                struct sock *sk, u32 port_offset,
                int (*check_established)(struct inet_timewait_death_row *,
-                       struct sock *, __u16, struct inet_timewait_sock **),
-               int (*hash)(struct sock *sk, struct inet_timewait_sock *twp))
+                       struct sock *, __u16, struct inet_timewait_sock **))
 {
        struct inet_hashinfo *hinfo = death_row->hashinfo;
        const unsigned short snum = inet_sk(sk)->inet_num;
@@ -548,7 +558,7 @@ ok:
                inet_bind_hash(sk, tb, port);
                if (sk_unhashed(sk)) {
                        inet_sk(sk)->inet_sport = htons(port);
-                       twrefcnt += hash(sk, tw);
+                       twrefcnt += __inet_hash_nolisten(sk, tw);
                }
                if (tw)
                        twrefcnt += inet_twsk_bind_unhash(tw, hinfo);
@@ -570,7 +580,7 @@ ok:
        tb  = inet_csk(sk)->icsk_bind_hash;
        spin_lock_bh(&head->lock);
        if (sk_head(&tb->owners) == sk && !sk->sk_bind_node.next) {
-               hash(sk, NULL);
+               __inet_hash_nolisten(sk, NULL);
                spin_unlock_bh(&head->lock);
                return 0;
        } else {
@@ -590,7 +600,7 @@ int inet_hash_connect(struct inet_timewait_death_row *death_row,
                      struct sock *sk)
 {
        return __inet_hash_connect(death_row, sk, inet_sk_port_offset(sk),
-                       __inet_check_established, __inet_hash_nolisten);
+                                  __inet_check_established);
 }
 EXPORT_SYMBOL_GPL(inet_hash_connect);
 
index 6d592f8555fb8bf15506d828b53c6582386f7bec..118f0f195820fa98554bafa5e1ddbd0da7c002c7 100644 (file)
@@ -98,7 +98,6 @@ void inet_twsk_free(struct inet_timewait_sock *tw)
 #ifdef SOCK_REFCNT_DEBUG
        pr_debug("%s timewait_sock %p released\n", tw->tw_prot->name, tw);
 #endif
-       release_net(twsk_net(tw));
        kmem_cache_free(tw->tw_prot->twsk_prot->twsk_slab, tw);
        module_put(owner);
 }
@@ -174,7 +173,7 @@ struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk, const int stat
        struct inet_timewait_sock *tw =
                kmem_cache_alloc(sk->sk_prot_creator->twsk_prot->twsk_slab,
                                 GFP_ATOMIC);
-       if (tw != NULL) {
+       if (tw) {
                const struct inet_sock *inet = inet_sk(sk);
 
                kmemcheck_annotate_bitfield(tw, flags);
@@ -195,7 +194,8 @@ struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk, const int stat
                tw->tw_ipv6only     = 0;
                tw->tw_transparent  = inet->transparent;
                tw->tw_prot         = sk->sk_prot_creator;
-               twsk_net_set(tw, hold_net(sock_net(sk)));
+               atomic64_set(&tw->tw_cookie, atomic64_read(&sk->sk_cookie));
+               twsk_net_set(tw, sock_net(sk));
                /*
                 * Because we use RCU lookups, we should not set tw_refcnt
                 * to a non null value before everything is setup for this
@@ -487,6 +487,7 @@ void inet_twsk_purge(struct inet_hashinfo *hashinfo,
        for (slot = 0; slot <= hashinfo->ehash_mask; slot++) {
                struct inet_ehash_bucket *head = &hashinfo->ehash[slot];
 restart_rcu:
+               cond_resched();
                rcu_read_lock();
 restart:
                sk_nulls_for_each_rcu(sk, node, &head->chain) {
index 145a50c4d56630a5fc97283d85c3fa29e10ab476..cc1da6d9cb351de56c7f357faebe32cdbb6f7c27 100644 (file)
@@ -372,7 +372,7 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
                goto err;
 
        err = -ENOMEM;
-       if (pskb_pull(skb, ihl) == NULL)
+       if (!pskb_pull(skb, ihl))
                goto err;
 
        err = pskb_trim_rcsum(skb, end - offset);
@@ -537,7 +537,7 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
                qp->q.fragments = head;
        }
 
-       WARN_ON(head == NULL);
+       WARN_ON(!head);
        WARN_ON(FRAG_CB(head)->offset != 0);
 
        /* Allocate a new buffer for the datagram. */
@@ -559,7 +559,8 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
                struct sk_buff *clone;
                int i, plen = 0;
 
-               if ((clone = alloc_skb(0, GFP_ATOMIC)) == NULL)
+               clone = alloc_skb(0, GFP_ATOMIC);
+               if (!clone)
                        goto out_nomem;
                clone->next = head->next;
                head->next = clone;
@@ -638,7 +639,8 @@ int ip_defrag(struct sk_buff *skb, u32 user)
        IP_INC_STATS_BH(net, IPSTATS_MIB_REASMREQDS);
 
        /* Lookup (or create) queue header */
-       if ((qp = ip_find(net, ip_hdr(skb), user)) != NULL) {
+       qp = ip_find(net, ip_hdr(skb), user);
+       if (qp) {
                int ret;
 
                spin_lock(&qp->q.lock);
@@ -754,7 +756,7 @@ static int __net_init ip4_frags_ns_ctl_register(struct net *net)
        table = ip4_frags_ns_ctl_table;
        if (!net_eq(net, &init_net)) {
                table = kmemdup(table, sizeof(ip4_frags_ns_ctl_table), GFP_KERNEL);
-               if (table == NULL)
+               if (!table)
                        goto err_alloc;
 
                table[0].data = &net->ipv4.frags.high_thresh;
@@ -770,7 +772,7 @@ static int __net_init ip4_frags_ns_ctl_register(struct net *net)
        }
 
        hdr = register_net_sysctl(net, "net/ipv4", table);
-       if (hdr == NULL)
+       if (!hdr)
                goto err_reg;
 
        net->ipv4.frags_hdr = hdr;
index 6207275fc749fc52f00e63d6cae212148a791f1b..5fd706473c733402c9aad9c6c30466549de8c54d 100644 (file)
@@ -182,7 +182,7 @@ static int ipgre_err(struct sk_buff *skb, u32 info,
        t = ip_tunnel_lookup(itn, skb->dev->ifindex, tpi->flags,
                             iph->daddr, iph->saddr, tpi->key);
 
-       if (t == NULL)
+       if (!t)
                return PACKET_REJECT;
 
        if (t->parms.iph.daddr == 0 ||
@@ -423,7 +423,7 @@ static int ipgre_open(struct net_device *dev)
                        return -EADDRNOTAVAIL;
                dev = rt->dst.dev;
                ip_rt_put(rt);
-               if (__in_dev_get_rtnl(dev) == NULL)
+               if (!__in_dev_get_rtnl(dev))
                        return -EADDRNOTAVAIL;
                t->mlink = dev->ifindex;
                ip_mc_inc_group(__in_dev_get_rtnl(dev), t->parms.iph.daddr);
@@ -456,6 +456,7 @@ static const struct net_device_ops ipgre_netdev_ops = {
        .ndo_do_ioctl           = ipgre_tunnel_ioctl,
        .ndo_change_mtu         = ip_tunnel_change_mtu,
        .ndo_get_stats64        = ip_tunnel_get_stats64,
+       .ndo_get_iflink         = ip_tunnel_get_iflink,
 };
 
 #define GRE_FEATURES (NETIF_F_SG |             \
@@ -621,10 +622,10 @@ static void ipgre_netlink_parms(struct nlattr *data[], struct nlattr *tb[],
                parms->o_key = nla_get_be32(data[IFLA_GRE_OKEY]);
 
        if (data[IFLA_GRE_LOCAL])
-               parms->iph.saddr = nla_get_be32(data[IFLA_GRE_LOCAL]);
+               parms->iph.saddr = nla_get_in_addr(data[IFLA_GRE_LOCAL]);
 
        if (data[IFLA_GRE_REMOTE])
-               parms->iph.daddr = nla_get_be32(data[IFLA_GRE_REMOTE]);
+               parms->iph.daddr = nla_get_in_addr(data[IFLA_GRE_REMOTE]);
 
        if (data[IFLA_GRE_TTL])
                parms->iph.ttl = nla_get_u8(data[IFLA_GRE_TTL]);
@@ -686,6 +687,7 @@ static const struct net_device_ops gre_tap_netdev_ops = {
        .ndo_validate_addr      = eth_validate_addr,
        .ndo_change_mtu         = ip_tunnel_change_mtu,
        .ndo_get_stats64        = ip_tunnel_get_stats64,
+       .ndo_get_iflink         = ip_tunnel_get_iflink,
 };
 
 static void ipgre_tap_setup(struct net_device *dev)
@@ -776,8 +778,8 @@ static int ipgre_fill_info(struct sk_buff *skb, const struct net_device *dev)
            nla_put_be16(skb, IFLA_GRE_OFLAGS, tnl_flags_to_gre_flags(p->o_flags)) ||
            nla_put_be32(skb, IFLA_GRE_IKEY, p->i_key) ||
            nla_put_be32(skb, IFLA_GRE_OKEY, p->o_key) ||
-           nla_put_be32(skb, IFLA_GRE_LOCAL, p->iph.saddr) ||
-           nla_put_be32(skb, IFLA_GRE_REMOTE, p->iph.daddr) ||
+           nla_put_in_addr(skb, IFLA_GRE_LOCAL, p->iph.saddr) ||
+           nla_put_in_addr(skb, IFLA_GRE_REMOTE, p->iph.daddr) ||
            nla_put_u8(skb, IFLA_GRE_TTL, p->iph.ttl) ||
            nla_put_u8(skb, IFLA_GRE_TOS, p->iph.tos) ||
            nla_put_u8(skb, IFLA_GRE_PMTUDISC,
index 3d4da2c16b6a3c6d6bc41d8fec0ed182037b0801..2e0410ed8f16f0d41189a6846633a7598a04b504 100644 (file)
@@ -203,7 +203,7 @@ static int ip_local_deliver_finish(struct sk_buff *skb)
                raw = raw_local_deliver(skb, protocol);
 
                ipprot = rcu_dereference(inet_protos[protocol]);
-               if (ipprot != NULL) {
+               if (ipprot) {
                        int ret;
 
                        if (!ipprot->no_policy) {
@@ -314,7 +314,7 @@ static int ip_rcv_finish(struct sk_buff *skb)
        const struct iphdr *iph = ip_hdr(skb);
        struct rtable *rt;
 
-       if (sysctl_ip_early_demux && !skb_dst(skb) && skb->sk == NULL) {
+       if (sysctl_ip_early_demux && !skb_dst(skb) && !skb->sk) {
                const struct net_protocol *ipprot;
                int protocol = iph->protocol;
 
@@ -387,7 +387,8 @@ int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt,
 
        IP_UPD_PO_STATS_BH(dev_net(dev), IPSTATS_MIB_IN, skb->len);
 
-       if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) {
+       skb = skb_share_check(skb, GFP_ATOMIC);
+       if (!skb) {
                IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_INDISCARDS);
                goto out;
        }
index 5b3d91be2db0c8f1a78606727c703475dd61b598..bd246792360b4b8dcda2c13328ea5f01bb603e06 100644 (file)
@@ -264,7 +264,7 @@ int ip_options_compile(struct net *net,
        unsigned char *iph;
        int optlen, l;
 
-       if (skb != NULL) {
+       if (skb) {
                rt = skb_rtable(skb);
                optptr = (unsigned char *)&(ip_hdr(skb)[1]);
        } else
index a7aea2048a0d7a624ceb79923d25e9750ec6fa9a..26f6f7956168a795e1c465a65e89939c5b7431d6 100644 (file)
@@ -148,7 +148,7 @@ int ip_build_and_send_pkt(struct sk_buff *skb, struct sock *sk,
        iph->daddr    = (opt && opt->opt.srr ? opt->opt.faddr : daddr);
        iph->saddr    = saddr;
        iph->protocol = sk->sk_protocol;
-       ip_select_ident(skb, sk);
+       ip_select_ident(sock_net(sk), skb, sk);
 
        if (opt && opt->opt.optlen) {
                iph->ihl += opt->opt.optlen>>2;
@@ -182,7 +182,7 @@ static inline int ip_finish_output2(struct sk_buff *skb)
                struct sk_buff *skb2;
 
                skb2 = skb_realloc_headroom(skb, LL_RESERVED_SPACE(dev));
-               if (skb2 == NULL) {
+               if (!skb2) {
                        kfree_skb(skb);
                        return -ENOMEM;
                }
@@ -257,7 +257,7 @@ static int ip_finish_output(struct sk_buff *skb)
 {
 #if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM)
        /* Policy lookup after SNAT yielded a new policy */
-       if (skb_dst(skb)->xfrm != NULL) {
+       if (skb_dst(skb)->xfrm) {
                IPCB(skb)->flags |= IPSKB_REROUTED;
                return dst_output(skb);
        }
@@ -376,12 +376,12 @@ int ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl)
        inet_opt = rcu_dereference(inet->inet_opt);
        fl4 = &fl->u.ip4;
        rt = skb_rtable(skb);
-       if (rt != NULL)
+       if (rt)
                goto packet_routed;
 
        /* Make sure we can route this packet. */
        rt = (struct rtable *)__sk_dst_check(sk, 0);
-       if (rt == NULL) {
+       if (!rt) {
                __be32 daddr;
 
                /* Use correct destination address if we have options. */
@@ -430,7 +430,8 @@ packet_routed:
                ip_options_build(skb, &inet_opt->opt, inet->inet_daddr, rt, 0);
        }
 
-       ip_select_ident_segs(skb, sk, skb_shinfo(skb)->gso_segs ?: 1);
+       ip_select_ident_segs(sock_net(sk), skb, sk,
+                            skb_shinfo(skb)->gso_segs ?: 1);
 
        /* TODO : should we use skb->sk here instead of sk ? */
        skb->priority = sk->sk_priority;
@@ -586,7 +587,7 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
                                        ip_options_fragment(frag);
                                offset += skb->len - hlen;
                                iph->frag_off = htons(offset>>3);
-                               if (frag->next != NULL)
+                               if (frag->next)
                                        iph->frag_off |= htons(IP_MF);
                                /* Ready, complete checksum */
                                ip_send_check(iph);
@@ -636,10 +637,7 @@ slow_path:
        left = skb->len - hlen;         /* Space per frame */
        ptr = hlen;             /* Where to start from */
 
-       /* for bridged IP traffic encapsulated inside f.e. a vlan header,
-        * we need to make room for the encapsulating header
-        */
-       ll_rs = LL_RESERVED_SPACE_EXTRA(rt->dst.dev, nf_bridge_pad(skb));
+       ll_rs = LL_RESERVED_SPACE(rt->dst.dev);
 
        /*
         *      Fragment the datagram.
@@ -792,12 +790,13 @@ static inline int ip_ufo_append_data(struct sock *sk,
         * device, so create one single skb packet containing complete
         * udp datagram
         */
-       if ((skb = skb_peek_tail(queue)) == NULL) {
+       skb = skb_peek_tail(queue);
+       if (!skb) {
                skb = sock_alloc_send_skb(sk,
                        hh_len + fragheaderlen + transhdrlen + 20,
                        (flags & MSG_DONTWAIT), &err);
 
-               if (skb == NULL)
+               if (!skb)
                        return err;
 
                /* reserve space for Hardware header */
@@ -963,10 +962,10 @@ alloc_new_skb:
                                        skb = sock_wmalloc(sk,
                                                           alloclen + hh_len + 15, 1,
                                                           sk->sk_allocation);
-                               if (unlikely(skb == NULL))
+                               if (unlikely(!skb))
                                        err = -ENOBUFS;
                        }
-                       if (skb == NULL)
+                       if (!skb)
                                goto error;
 
                        /*
@@ -1090,10 +1089,10 @@ static int ip_setup_cork(struct sock *sk, struct inet_cork *cork,
         */
        opt = ipc->opt;
        if (opt) {
-               if (cork->opt == NULL) {
+               if (!cork->opt) {
                        cork->opt = kmalloc(sizeof(struct ip_options) + 40,
                                            sk->sk_allocation);
-                       if (unlikely(cork->opt == NULL))
+                       if (unlikely(!cork->opt))
                                return -ENOBUFS;
                }
                memcpy(cork->opt, &opt->opt, sizeof(struct ip_options) + opt->opt.optlen);
@@ -1200,7 +1199,8 @@ ssize_t   ip_append_page(struct sock *sk, struct flowi4 *fl4, struct page *page,
                return -EMSGSIZE;
        }
 
-       if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL)
+       skb = skb_peek_tail(&sk->sk_write_queue);
+       if (!skb)
                return -EINVAL;
 
        cork->length += size;
@@ -1331,7 +1331,8 @@ struct sk_buff *__ip_make_skb(struct sock *sk,
        __be16 df = 0;
        __u8 ttl;
 
-       if ((skb = __skb_dequeue(queue)) == NULL)
+       skb = __skb_dequeue(queue);
+       if (!skb)
                goto out;
        tail_skb = &(skb_shinfo(skb)->frag_list);
 
@@ -1382,7 +1383,7 @@ struct sk_buff *__ip_make_skb(struct sock *sk,
        iph->ttl = ttl;
        iph->protocol = sk->sk_protocol;
        ip_copy_addrs(iph, fl4);
-       ip_select_ident(skb, sk);
+       ip_select_ident(net, skb, sk);
 
        if (opt) {
                iph->ihl += opt->optlen>>2;
index 5cd99271d3a6a07c17a915fddde7a5a0c8a86618..7cfb0893f2636bcc87537da3014643362f72b10f 100644 (file)
@@ -351,7 +351,7 @@ int ip_ra_control(struct sock *sk, unsigned char on,
                        return 0;
                }
        }
-       if (new_ra == NULL) {
+       if (!new_ra) {
                spin_unlock_bh(&ip_ra_lock);
                return -ENOBUFS;
        }
@@ -387,7 +387,7 @@ void ip_icmp_error(struct sock *sk, struct sk_buff *skb, int err,
                                   skb_network_header(skb);
        serr->port = port;
 
-       if (skb_pull(skb, payload - skb->data) != NULL) {
+       if (skb_pull(skb, payload - skb->data)) {
                skb_reset_transport_header(skb);
                if (sock_queue_err_skb(sk, skb) == 0)
                        return;
@@ -482,7 +482,7 @@ int ip_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
 
        err = -EAGAIN;
        skb = sock_dequeue_err_skb(sk);
-       if (skb == NULL)
+       if (!skb)
                goto out;
 
        copied = skb->len;
@@ -536,12 +536,34 @@ out:
  *     Socket option code for IP. This is the end of the line after any
  *     TCP,UDP etc options on an IP socket.
  */
+static bool setsockopt_needs_rtnl(int optname)
+{
+       switch (optname) {
+       case IP_ADD_MEMBERSHIP:
+       case IP_ADD_SOURCE_MEMBERSHIP:
+       case IP_BLOCK_SOURCE:
+       case IP_DROP_MEMBERSHIP:
+       case IP_DROP_SOURCE_MEMBERSHIP:
+       case IP_MSFILTER:
+       case IP_UNBLOCK_SOURCE:
+       case MCAST_BLOCK_SOURCE:
+       case MCAST_MSFILTER:
+       case MCAST_JOIN_GROUP:
+       case MCAST_JOIN_SOURCE_GROUP:
+       case MCAST_LEAVE_GROUP:
+       case MCAST_LEAVE_SOURCE_GROUP:
+       case MCAST_UNBLOCK_SOURCE:
+               return true;
+       }
+       return false;
+}
 
 static int do_ip_setsockopt(struct sock *sk, int level,
                            int optname, char __user *optval, unsigned int optlen)
 {
        struct inet_sock *inet = inet_sk(sk);
        int val = 0, err;
+       bool needs_rtnl = setsockopt_needs_rtnl(optname);
 
        switch (optname) {
        case IP_PKTINFO:
@@ -584,6 +606,8 @@ static int do_ip_setsockopt(struct sock *sk, int level,
                return ip_mroute_setsockopt(sk, optname, optval, optlen);
 
        err = 0;
+       if (needs_rtnl)
+               rtnl_lock();
        lock_sock(sk);
 
        switch (optname) {
@@ -1118,10 +1142,14 @@ mc_msf_out:
                break;
        }
        release_sock(sk);
+       if (needs_rtnl)
+               rtnl_unlock();
        return err;
 
 e_inval:
        release_sock(sk);
+       if (needs_rtnl)
+               rtnl_unlock();
        return -EINVAL;
 }
 
index 2cd08280c77bc33cac90c62e0f6f8f36343a768d..6d364ab8e14eec5ff24c49357fbea45b02b92b39 100644 (file)
@@ -389,7 +389,6 @@ static int ip_tunnel_bind_dev(struct net_device *dev)
                hlen = tdev->hard_header_len + tdev->needed_headroom;
                mtu = tdev->mtu;
        }
-       dev->iflink = tunnel->parms.link;
 
        dev->needed_headroom = t_hlen + hlen;
        mtu -= (dev->hard_header_len + t_hlen);
@@ -655,7 +654,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
        if (dst == 0) {
                /* NBMA tunnel */
 
-               if (skb_dst(skb) == NULL) {
+               if (!skb_dst(skb)) {
                        dev->stats.tx_fifo_errors++;
                        goto tx_error;
                }
@@ -673,7 +672,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
 
                        neigh = dst_neigh_lookup(skb_dst(skb),
                                                 &ipv6_hdr(skb)->daddr);
-                       if (neigh == NULL)
+                       if (!neigh)
                                goto tx_error;
 
                        addr6 = (const struct in6_addr *)&neigh->primary_key;
@@ -844,7 +843,7 @@ int ip_tunnel_ioctl(struct net_device *dev, struct ip_tunnel_parm *p, int cmd)
        case SIOCGETTUNNEL:
                if (dev == itn->fb_tunnel_dev) {
                        t = ip_tunnel_find(itn, p, itn->fb_tunnel_dev->type);
-                       if (t == NULL)
+                       if (!t)
                                t = netdev_priv(dev);
                }
                memcpy(p, &t->parms, sizeof(*p));
@@ -877,7 +876,7 @@ int ip_tunnel_ioctl(struct net_device *dev, struct ip_tunnel_parm *p, int cmd)
                        break;
                }
                if (dev != itn->fb_tunnel_dev && cmd == SIOCCHGTUNNEL) {
-                       if (t != NULL) {
+                       if (t) {
                                if (t->dev != dev) {
                                        err = -EEXIST;
                                        break;
@@ -915,7 +914,7 @@ int ip_tunnel_ioctl(struct net_device *dev, struct ip_tunnel_parm *p, int cmd)
                if (dev == itn->fb_tunnel_dev) {
                        err = -ENOENT;
                        t = ip_tunnel_find(itn, p, itn->fb_tunnel_dev->type);
-                       if (t == NULL)
+                       if (!t)
                                goto done;
                        err = -EPERM;
                        if (t == netdev_priv(itn->fb_tunnel_dev))
@@ -980,6 +979,14 @@ struct net *ip_tunnel_get_link_net(const struct net_device *dev)
 }
 EXPORT_SYMBOL(ip_tunnel_get_link_net);
 
+int ip_tunnel_get_iflink(const struct net_device *dev)
+{
+       struct ip_tunnel *tunnel = netdev_priv(dev);
+
+       return tunnel->parms.link;
+}
+EXPORT_SYMBOL(ip_tunnel_get_iflink);
+
 int ip_tunnel_init_net(struct net *net, int ip_tnl_net_id,
                                  struct rtnl_link_ops *ops, char *devname)
 {
index 88c386cf7d85a985b9bd0aca11d3528347aea152..8c4dcc46acd2932e062faf546331c938b160b0d1 100644 (file)
@@ -74,7 +74,7 @@ int iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb,
        iph->daddr      =       dst;
        iph->saddr      =       src;
        iph->ttl        =       ttl;
-       __ip_select_ident(iph, skb_shinfo(skb)->gso_segs ?: 1);
+       __ip_select_ident(sock_net(sk), iph, skb_shinfo(skb)->gso_segs ?: 1);
 
        err = ip_local_out_sk(sk, skb);
        if (unlikely(net_xmit_eval(err)))
index 94efe148181cde3bef3b58ad8d6dd32deee8747f..9f7269f3c54af2ecbc74db4ec2c0f71d5184dc1c 100644 (file)
@@ -60,7 +60,7 @@ static int vti_input(struct sk_buff *skb, int nexthdr, __be32 spi,
 
        tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, TUNNEL_NO_KEY,
                                  iph->saddr, iph->daddr, 0);
-       if (tunnel != NULL) {
+       if (tunnel) {
                if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
                        goto drop;
 
@@ -341,6 +341,7 @@ static const struct net_device_ops vti_netdev_ops = {
        .ndo_do_ioctl   = vti_tunnel_ioctl,
        .ndo_change_mtu = ip_tunnel_change_mtu,
        .ndo_get_stats64 = ip_tunnel_get_stats64,
+       .ndo_get_iflink = ip_tunnel_get_iflink,
 };
 
 static void vti_tunnel_setup(struct net_device *dev)
@@ -361,7 +362,6 @@ static int vti_tunnel_init(struct net_device *dev)
        dev->hard_header_len    = LL_MAX_HEADER + sizeof(struct iphdr);
        dev->mtu                = ETH_DATA_LEN;
        dev->flags              = IFF_NOARP;
-       dev->iflink             = 0;
        dev->addr_len           = 4;
        dev->features           |= NETIF_F_LLTX;
        netif_keep_dst(dev);
@@ -456,10 +456,10 @@ static void vti_netlink_parms(struct nlattr *data[],
                parms->o_key = nla_get_be32(data[IFLA_VTI_OKEY]);
 
        if (data[IFLA_VTI_LOCAL])
-               parms->iph.saddr = nla_get_be32(data[IFLA_VTI_LOCAL]);
+               parms->iph.saddr = nla_get_in_addr(data[IFLA_VTI_LOCAL]);
 
        if (data[IFLA_VTI_REMOTE])
-               parms->iph.daddr = nla_get_be32(data[IFLA_VTI_REMOTE]);
+               parms->iph.daddr = nla_get_in_addr(data[IFLA_VTI_REMOTE]);
 
 }
 
@@ -505,8 +505,8 @@ static int vti_fill_info(struct sk_buff *skb, const struct net_device *dev)
        nla_put_u32(skb, IFLA_VTI_LINK, p->link);
        nla_put_be32(skb, IFLA_VTI_IKEY, p->i_key);
        nla_put_be32(skb, IFLA_VTI_OKEY, p->o_key);
-       nla_put_be32(skb, IFLA_VTI_LOCAL, p->iph.saddr);
-       nla_put_be32(skb, IFLA_VTI_REMOTE, p->iph.daddr);
+       nla_put_in_addr(skb, IFLA_VTI_LOCAL, p->iph.saddr);
+       nla_put_in_addr(skb, IFLA_VTI_REMOTE, p->iph.daddr);
 
        return 0;
 }
index c0855d50a3fa775831ada20254b74d94f5d410ae..d97f4f2787f5f85c5f073df5e5276daf03c4ba6a 100644 (file)
@@ -63,7 +63,7 @@ static struct xfrm_state *ipcomp_tunnel_create(struct xfrm_state *x)
        struct xfrm_state *t;
 
        t = xfrm_state_alloc(net);
-       if (t == NULL)
+       if (!t)
                goto out;
 
        t->id.proto = IPPROTO_IPIP;
index b26376ef87f616d249dbfebbd6705eb818fefd05..8e7328c6a390a9bc064a67e4ca4263c891239378 100644 (file)
@@ -504,7 +504,8 @@ ic_rarp_recv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt
        if (!net_eq(dev_net(dev), &init_net))
                goto drop;
 
-       if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL)
+       skb = skb_share_check(skb, GFP_ATOMIC);
+       if (!skb)
                return NET_RX_DROP;
 
        if (!pskb_may_pull(skb, sizeof(struct arphdr)))
@@ -958,7 +959,8 @@ static int __init ic_bootp_recv(struct sk_buff *skb, struct net_device *dev, str
        if (skb->pkt_type == PACKET_OTHERHOST)
                goto drop;
 
-       if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL)
+       skb = skb_share_check(skb, GFP_ATOMIC);
+       if (!skb)
                return NET_RX_DROP;
 
        if (!pskb_may_pull(skb,
index 915d215a7d145028ced69ea6e17f2c0ff40d1c7f..ff96396ebec5bdf794cf84776f21505d0b7de737 100644 (file)
@@ -144,7 +144,7 @@ static int ipip_err(struct sk_buff *skb, u32 info)
        err = -ENOENT;
        t = ip_tunnel_lookup(itn, skb->dev->ifindex, TUNNEL_NO_KEY,
                             iph->daddr, iph->saddr, 0);
-       if (t == NULL)
+       if (!t)
                goto out;
 
        if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
@@ -272,6 +272,7 @@ static const struct net_device_ops ipip_netdev_ops = {
        .ndo_do_ioctl   = ipip_tunnel_ioctl,
        .ndo_change_mtu = ip_tunnel_change_mtu,
        .ndo_get_stats64 = ip_tunnel_get_stats64,
+       .ndo_get_iflink = ip_tunnel_get_iflink,
 };
 
 #define IPIP_FEATURES (NETIF_F_SG |            \
@@ -286,7 +287,6 @@ static void ipip_tunnel_setup(struct net_device *dev)
 
        dev->type               = ARPHRD_TUNNEL;
        dev->flags              = IFF_NOARP;
-       dev->iflink             = 0;
        dev->addr_len           = 4;
        dev->features           |= NETIF_F_LLTX;
        netif_keep_dst(dev);
@@ -325,10 +325,10 @@ static void ipip_netlink_parms(struct nlattr *data[],
                parms->link = nla_get_u32(data[IFLA_IPTUN_LINK]);
 
        if (data[IFLA_IPTUN_LOCAL])
-               parms->iph.saddr = nla_get_be32(data[IFLA_IPTUN_LOCAL]);
+               parms->iph.saddr = nla_get_in_addr(data[IFLA_IPTUN_LOCAL]);
 
        if (data[IFLA_IPTUN_REMOTE])
-               parms->iph.daddr = nla_get_be32(data[IFLA_IPTUN_REMOTE]);
+               parms->iph.daddr = nla_get_in_addr(data[IFLA_IPTUN_REMOTE]);
 
        if (data[IFLA_IPTUN_TTL]) {
                parms->iph.ttl = nla_get_u8(data[IFLA_IPTUN_TTL]);
@@ -450,8 +450,8 @@ static int ipip_fill_info(struct sk_buff *skb, const struct net_device *dev)
        struct ip_tunnel_parm *parm = &tunnel->parms;
 
        if (nla_put_u32(skb, IFLA_IPTUN_LINK, parm->link) ||
-           nla_put_be32(skb, IFLA_IPTUN_LOCAL, parm->iph.saddr) ||
-           nla_put_be32(skb, IFLA_IPTUN_REMOTE, parm->iph.daddr) ||
+           nla_put_in_addr(skb, IFLA_IPTUN_LOCAL, parm->iph.saddr) ||
+           nla_put_in_addr(skb, IFLA_IPTUN_REMOTE, parm->iph.daddr) ||
            nla_put_u8(skb, IFLA_IPTUN_TTL, parm->iph.ttl) ||
            nla_put_u8(skb, IFLA_IPTUN_TOS, parm->iph.tos) ||
            nla_put_u8(skb, IFLA_IPTUN_PMTUDISC,
index 92825443fad6ea0f063fedfaa176761717fb739e..5f17d0e780716eafc4df6fc66b71106016ce8a9b 100644 (file)
@@ -73,9 +73,7 @@
 
 struct mr_table {
        struct list_head        list;
-#ifdef CONFIG_NET_NS
-       struct net              *net;
-#endif
+       possible_net_t          net;
        u32                     id;
        struct sock __rcu       *mroute_sk;
        struct timer_list       ipmr_expire_timer;
@@ -191,7 +189,7 @@ static int ipmr_rule_action(struct fib_rule *rule, struct flowi *flp,
        }
 
        mrt = ipmr_get_table(rule->fr_net, rule->table);
-       if (mrt == NULL)
+       if (!mrt)
                return -EAGAIN;
        res->mrt = mrt;
        return 0;
@@ -255,7 +253,7 @@ static int __net_init ipmr_rules_init(struct net *net)
        INIT_LIST_HEAD(&net->ipv4.mr_tables);
 
        mrt = ipmr_new_table(net, RT_TABLE_DEFAULT);
-       if (mrt == NULL) {
+       if (!mrt) {
                err = -ENOMEM;
                goto err1;
        }
@@ -278,11 +276,13 @@ static void __net_exit ipmr_rules_exit(struct net *net)
 {
        struct mr_table *mrt, *next;
 
+       rtnl_lock();
        list_for_each_entry_safe(mrt, next, &net->ipv4.mr_tables, list) {
                list_del(&mrt->list);
                ipmr_free_table(mrt);
        }
        fib_rules_unregister(net->ipv4.mr_rules_ops);
+       rtnl_unlock();
 }
 #else
 #define ipmr_for_each_table(mrt, net) \
@@ -308,7 +308,10 @@ static int __net_init ipmr_rules_init(struct net *net)
 
 static void __net_exit ipmr_rules_exit(struct net *net)
 {
+       rtnl_lock();
        ipmr_free_table(net->ipv4.mrt);
+       net->ipv4.mrt = NULL;
+       rtnl_unlock();
 }
 #endif
 
@@ -318,11 +321,11 @@ static struct mr_table *ipmr_new_table(struct net *net, u32 id)
        unsigned int i;
 
        mrt = ipmr_get_table(net, id);
-       if (mrt != NULL)
+       if (mrt)
                return mrt;
 
        mrt = kzalloc(sizeof(*mrt), GFP_KERNEL);
-       if (mrt == NULL)
+       if (!mrt)
                return NULL;
        write_pnet(&mrt->net, net);
        mrt->id = id;
@@ -424,7 +427,7 @@ struct net_device *ipmr_new_tunnel(struct net *net, struct vifctl *v)
                        dev->flags |= IFF_MULTICAST;
 
                        in_dev = __in_dev_get_rtnl(dev);
-                       if (in_dev == NULL)
+                       if (!in_dev)
                                goto failure;
 
                        ipv4_devconf_setall(in_dev);
@@ -475,8 +478,14 @@ static netdev_tx_t reg_vif_xmit(struct sk_buff *skb, struct net_device *dev)
        return NETDEV_TX_OK;
 }
 
+static int reg_vif_get_iflink(const struct net_device *dev)
+{
+       return 0;
+}
+
 static const struct net_device_ops reg_vif_netdev_ops = {
        .ndo_start_xmit = reg_vif_xmit,
+       .ndo_get_iflink = reg_vif_get_iflink,
 };
 
 static void reg_vif_setup(struct net_device *dev)
@@ -502,7 +511,7 @@ static struct net_device *ipmr_reg_vif(struct net *net, struct mr_table *mrt)
 
        dev = alloc_netdev(0, name, NET_NAME_UNKNOWN, reg_vif_setup);
 
-       if (dev == NULL)
+       if (!dev)
                return NULL;
 
        dev_net_set(dev, net);
@@ -511,7 +520,6 @@ static struct net_device *ipmr_reg_vif(struct net *net, struct mr_table *mrt)
                free_netdev(dev);
                return NULL;
        }
-       dev->iflink = 0;
 
        rcu_read_lock();
        in_dev = __in_dev_get_rcu(dev);
@@ -759,7 +767,7 @@ static int vif_add(struct net *net, struct mr_table *mrt,
        case 0:
                if (vifc->vifc_flags == VIFF_USE_IFINDEX) {
                        dev = dev_get_by_index(net, vifc->vifc_lcl_ifindex);
-                       if (dev && __in_dev_get_rtnl(dev) == NULL) {
+                       if (dev && !__in_dev_get_rtnl(dev)) {
                                dev_put(dev);
                                return -EADDRNOTAVAIL;
                        }
@@ -803,7 +811,7 @@ static int vif_add(struct net *net, struct mr_table *mrt,
        v->pkt_out = 0;
        v->link = dev->ifindex;
        if (v->flags & (VIFF_TUNNEL | VIFF_REGISTER))
-               v->link = dev->iflink;
+               v->link = dev_get_iflink(dev);
 
        /* And finish update writing critical data */
        write_lock_bh(&mrt_lock);
@@ -1005,7 +1013,7 @@ static int ipmr_cache_report(struct mr_table *mrt,
 
        rcu_read_lock();
        mroute_sk = rcu_dereference(mrt->mroute_sk);
-       if (mroute_sk == NULL) {
+       if (!mroute_sk) {
                rcu_read_unlock();
                kfree_skb(skb);
                return -EINVAL;
@@ -1158,7 +1166,7 @@ static int ipmr_mfc_add(struct net *net, struct mr_table *mrt,
                return -EINVAL;
 
        c = ipmr_cache_alloc();
-       if (c == NULL)
+       if (!c)
                return -ENOMEM;
 
        c->mfc_origin = mfc->mfcc_origin.s_addr;
@@ -1280,7 +1288,7 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsi
                return -EOPNOTSUPP;
 
        mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
-       if (mrt == NULL)
+       if (!mrt)
                return -ENOENT;
 
        if (optname != MRT_INIT) {
@@ -1443,7 +1451,7 @@ int ip_mroute_getsockopt(struct sock *sk, int optname, char __user *optval, int
                return -EOPNOTSUPP;
 
        mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
-       if (mrt == NULL)
+       if (!mrt)
                return -ENOENT;
 
        if (optname != MRT_VERSION &&
@@ -1489,7 +1497,7 @@ int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg)
        struct mr_table *mrt;
 
        mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
-       if (mrt == NULL)
+       if (!mrt)
                return -ENOENT;
 
        switch (cmd) {
@@ -1563,7 +1571,7 @@ int ipmr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg)
        struct mr_table *mrt;
 
        mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
-       if (mrt == NULL)
+       if (!mrt)
                return -ENOENT;
 
        switch (cmd) {
@@ -1644,7 +1652,8 @@ static struct notifier_block ip_mr_notifier = {
  *     important for multicast video.
  */
 
-static void ip_encap(struct sk_buff *skb, __be32 saddr, __be32 daddr)
+static void ip_encap(struct net *net, struct sk_buff *skb,
+                    __be32 saddr, __be32 daddr)
 {
        struct iphdr *iph;
        const struct iphdr *old_iph = ip_hdr(skb);
@@ -1663,7 +1672,7 @@ static void ip_encap(struct sk_buff *skb, __be32 saddr, __be32 daddr)
        iph->protocol   =       IPPROTO_IPIP;
        iph->ihl        =       5;
        iph->tot_len    =       htons(skb->len);
-       ip_select_ident(skb, NULL);
+       ip_select_ident(net, skb, NULL);
        ip_send_check(iph);
 
        memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
@@ -1697,7 +1706,7 @@ static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt,
        struct flowi4 fl4;
        int    encap = 0;
 
-       if (vif->dev == NULL)
+       if (!vif->dev)
                goto out_free;
 
 #ifdef CONFIG_IP_PIMSM
@@ -1760,7 +1769,7 @@ static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt,
         * What do we do with netfilter? -- RR
         */
        if (vif->flags & VIFF_TUNNEL) {
-               ip_encap(skb, vif->local, vif->remote);
+               ip_encap(net, skb, vif->local, vif->remote);
                /* FIXME: extra output firewall step used to be here. --RR */
                vif->dev->stats.tx_packets++;
                vif->dev->stats.tx_bytes += skb->len;
@@ -1988,7 +1997,7 @@ int ip_mr_input(struct sk_buff *skb)
 
        /* already under rcu_read_lock() */
        cache = ipmr_cache_find(mrt, ip_hdr(skb)->saddr, ip_hdr(skb)->daddr);
-       if (cache == NULL) {
+       if (!cache) {
                int vif = ipmr_find_vif(mrt, skb->dev);
 
                if (vif >= 0)
@@ -1999,13 +2008,13 @@ int ip_mr_input(struct sk_buff *skb)
        /*
         *      No usable cache entry
         */
-       if (cache == NULL) {
+       if (!cache) {
                int vif;
 
                if (local) {
                        struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
                        ip_local_deliver(skb);
-                       if (skb2 == NULL)
+                       if (!skb2)
                                return -ENOBUFS;
                        skb = skb2;
                }
@@ -2064,7 +2073,7 @@ static int __pim_rcv(struct mr_table *mrt, struct sk_buff *skb,
                reg_dev = mrt->vif_table[mrt->mroute_reg_vif_num].dev;
        read_unlock(&mrt_lock);
 
-       if (reg_dev == NULL)
+       if (!reg_dev)
                return 1;
 
        skb->mac_header = skb->network_header;
@@ -2194,18 +2203,18 @@ int ipmr_get_route(struct net *net, struct sk_buff *skb,
        int err;
 
        mrt = ipmr_get_table(net, RT_TABLE_DEFAULT);
-       if (mrt == NULL)
+       if (!mrt)
                return -ENOENT;
 
        rcu_read_lock();
        cache = ipmr_cache_find(mrt, saddr, daddr);
-       if (cache == NULL && skb->dev) {
+       if (!cache && skb->dev) {
                int vif = ipmr_find_vif(mrt, skb->dev);
 
                if (vif >= 0)
                        cache = ipmr_cache_find_any(mrt, daddr, vif);
        }
-       if (cache == NULL) {
+       if (!cache) {
                struct sk_buff *skb2;
                struct iphdr *iph;
                struct net_device *dev;
@@ -2263,7 +2272,7 @@ static int ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
        int err;
 
        nlh = nlmsg_put(skb, portid, seq, cmd, sizeof(*rtm), flags);
-       if (nlh == NULL)
+       if (!nlh)
                return -EMSGSIZE;
 
        rtm = nlmsg_data(nlh);
@@ -2282,8 +2291,8 @@ static int ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
                rtm->rtm_protocol = RTPROT_MROUTED;
        rtm->rtm_flags    = 0;
 
-       if (nla_put_be32(skb, RTA_SRC, c->mfc_origin) ||
-           nla_put_be32(skb, RTA_DST, c->mfc_mcastgrp))
+       if (nla_put_in_addr(skb, RTA_SRC, c->mfc_origin) ||
+           nla_put_in_addr(skb, RTA_DST, c->mfc_mcastgrp))
                goto nla_put_failure;
        err = __ipmr_fill_mroute(mrt, skb, c, rtm);
        /* do not break the dump if cache is unresolved */
@@ -2328,7 +2337,7 @@ static void mroute_netlink_event(struct mr_table *mrt, struct mfc_cache *mfc,
 
        skb = nlmsg_new(mroute_msgsize(mfc->mfc_parent >= MAXVIFS, mrt->maxvif),
                        GFP_ATOMIC);
-       if (skb == NULL)
+       if (!skb)
                goto errout;
 
        err = ipmr_fill_mroute(mrt, skb, 0, 0, mfc, cmd, 0);
@@ -2443,7 +2452,7 @@ static void *ipmr_vif_seq_start(struct seq_file *seq, loff_t *pos)
        struct mr_table *mrt;
 
        mrt = ipmr_get_table(net, RT_TABLE_DEFAULT);
-       if (mrt == NULL)
+       if (!mrt)
                return ERR_PTR(-ENOENT);
 
        iter->mrt = mrt;
@@ -2562,7 +2571,7 @@ static void *ipmr_mfc_seq_start(struct seq_file *seq, loff_t *pos)
        struct mr_table *mrt;
 
        mrt = ipmr_get_table(net, RT_TABLE_DEFAULT);
-       if (mrt == NULL)
+       if (!mrt)
                return ERR_PTR(-ENOENT);
 
        it->mrt = mrt;
index 7ebd6e37875cc95b08d294ff64306925d05e550e..65de0684e22a17862663096da407eee16bc33a31 100644 (file)
@@ -94,7 +94,7 @@ static void nf_ip_saveroute(const struct sk_buff *skb,
 {
        struct ip_rt_info *rt_info = nf_queue_entry_reroute(entry);
 
-       if (entry->hook == NF_INET_LOCAL_OUT) {
+       if (entry->state.hook == NF_INET_LOCAL_OUT) {
                const struct iphdr *iph = ip_hdr(skb);
 
                rt_info->tos = iph->tos;
@@ -109,7 +109,7 @@ static int nf_ip_reroute(struct sk_buff *skb,
 {
        const struct ip_rt_info *rt_info = nf_queue_entry_reroute(entry);
 
-       if (entry->hook == NF_INET_LOCAL_OUT) {
+       if (entry->state.hook == NF_INET_LOCAL_OUT) {
                const struct iphdr *iph = ip_hdr(skb);
 
                if (!(iph->tos == rt_info->tos &&
index 59f883d9cadfcdfb69d775c506b0c911d31ff549..fb20f363151f138b6a1ca96a7a46ad5c68351cbb 100644 (file)
@@ -36,24 +36,16 @@ config NF_CONNTRACK_PROC_COMPAT
 
          If unsure, say Y.
 
-config NF_LOG_ARP
-       tristate "ARP packet logging"
-       default m if NETFILTER_ADVANCED=n
-       select NF_LOG_COMMON
-
-config NF_LOG_IPV4
-       tristate "IPv4 packet logging"
-       default m if NETFILTER_ADVANCED=n
-       select NF_LOG_COMMON
+if NF_TABLES
 
 config NF_TABLES_IPV4
-       depends on NF_TABLES
        tristate "IPv4 nf_tables support"
        help
          This option enables the IPv4 support for nf_tables.
 
+if NF_TABLES_IPV4
+
 config NFT_CHAIN_ROUTE_IPV4
-       depends on NF_TABLES_IPV4
        tristate "IPv4 nf_tables route chain support"
        help
          This option enables the "route" chain for IPv4 in nf_tables. This
@@ -61,22 +53,34 @@ config NFT_CHAIN_ROUTE_IPV4
          fields such as the source, destination, type of service and
          the packet mark.
 
-config NF_REJECT_IPV4
-       tristate "IPv4 packet rejection"
-       default m if NETFILTER_ADVANCED=n
-
 config NFT_REJECT_IPV4
-       depends on NF_TABLES_IPV4
        select NF_REJECT_IPV4
        default NFT_REJECT
        tristate
 
+endif # NF_TABLES_IPV4
+
 config NF_TABLES_ARP
-       depends on NF_TABLES
        tristate "ARP nf_tables support"
        help
          This option enables the ARP support for nf_tables.
 
+endif # NF_TABLES
+
+config NF_LOG_ARP
+       tristate "ARP packet logging"
+       default m if NETFILTER_ADVANCED=n
+       select NF_LOG_COMMON
+
+config NF_LOG_IPV4
+       tristate "IPv4 packet logging"
+       default m if NETFILTER_ADVANCED=n
+       select NF_LOG_COMMON
+
+config NF_REJECT_IPV4
+       tristate "IPv4 packet rejection"
+       default m if NETFILTER_ADVANCED=n
+
 config NF_NAT_IPV4
        tristate "IPv4 NAT"
        depends on NF_CONNTRACK_IPV4
index f95b6f93814b95b2c810eff8d4573a996f9a9f63..13bfe84bf3ca5a6aafe6982b8782958b0cce529f 100644 (file)
@@ -248,8 +248,7 @@ struct arpt_entry *arpt_next_entry(const struct arpt_entry *entry)
 
 unsigned int arpt_do_table(struct sk_buff *skb,
                           unsigned int hook,
-                          const struct net_device *in,
-                          const struct net_device *out,
+                          const struct nf_hook_state *state,
                           struct xt_table *table)
 {
        static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
@@ -265,8 +264,8 @@ unsigned int arpt_do_table(struct sk_buff *skb,
        if (!pskb_may_pull(skb, arp_hdr_len(skb->dev)))
                return NF_DROP;
 
-       indev = in ? in->name : nulldevname;
-       outdev = out ? out->name : nulldevname;
+       indev = state->in ? state->in->name : nulldevname;
+       outdev = state->out ? state->out->name : nulldevname;
 
        local_bh_disable();
        addend = xt_write_recseq_begin();
@@ -281,8 +280,8 @@ unsigned int arpt_do_table(struct sk_buff *skb,
        e = get_entry(table_base, private->hook_entry[hook]);
        back = get_entry(table_base, private->underflow[hook]);
 
-       acpar.in      = in;
-       acpar.out     = out;
+       acpar.in      = state->in;
+       acpar.out     = state->out;
        acpar.hooknum = hook;
        acpar.family  = NFPROTO_ARP;
        acpar.hotdrop = false;
index 802ddecb30b8110474da0e0a34c134aceaece43b..93876d03120ca85fbc1e5aaa689d245d4508f01e 100644 (file)
@@ -28,12 +28,11 @@ static const struct xt_table packet_filter = {
 /* The work comes in here from netfilter.c */
 static unsigned int
 arptable_filter_hook(const struct nf_hook_ops *ops, struct sk_buff *skb,
-                    const struct net_device *in, const struct net_device *out,
-                    int (*okfn)(struct sk_buff *))
+                    const struct nf_hook_state *state)
 {
-       const struct net *net = dev_net((in != NULL) ? in : out);
+       const struct net *net = dev_net(state->in ? state->in : state->out);
 
-       return arpt_do_table(skb, ops->hooknum, in, out,
+       return arpt_do_table(skb, ops->hooknum, state,
                             net->ipv4.arptable_filter);
 }
 
index cf5e82f39d3b87d7f8163320bffbc26af38d6f98..c69db7fa25ee6376ee3f2bee87d4ce7f09105fb3 100644 (file)
@@ -288,8 +288,7 @@ struct ipt_entry *ipt_next_entry(const struct ipt_entry *entry)
 unsigned int
 ipt_do_table(struct sk_buff *skb,
             unsigned int hook,
-            const struct net_device *in,
-            const struct net_device *out,
+            const struct nf_hook_state *state,
             struct xt_table *table)
 {
        static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
@@ -306,8 +305,8 @@ ipt_do_table(struct sk_buff *skb,
 
        /* Initialization */
        ip = ip_hdr(skb);
-       indev = in ? in->name : nulldevname;
-       outdev = out ? out->name : nulldevname;
+       indev = state->in ? state->in->name : nulldevname;
+       outdev = state->out ? state->out->name : nulldevname;
        /* We handle fragments by dealing with the first fragment as
         * if it was a normal packet.  All other fragments are treated
         * normally, except that they will NEVER match rules that ask
@@ -317,8 +316,8 @@ ipt_do_table(struct sk_buff *skb,
        acpar.fragoff = ntohs(ip->frag_off) & IP_OFFSET;
        acpar.thoff   = ip_hdrlen(skb);
        acpar.hotdrop = false;
-       acpar.in      = in;
-       acpar.out     = out;
+       acpar.in      = state->in;
+       acpar.out     = state->out;
        acpar.family  = NFPROTO_IPV4;
        acpar.hooknum = hook;
 
@@ -370,7 +369,7 @@ ipt_do_table(struct sk_buff *skb,
 #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
                /* The packet is traced: log it */
                if (unlikely(skb->nf_trace))
-                       trace_packet(skb, hook, in, out,
+                       trace_packet(skb, hook, state->in, state->out,
                                     table->name, private, e);
 #endif
                /* Standard target? */
index e90f83a3415b464014cecf0f072b2188f6889b7a..771ab3d01ad3dc303ac999e539a9c8ab5845baf2 100644 (file)
@@ -418,6 +418,13 @@ static int clusterip_tg_check(const struct xt_tgchk_param *par)
        if (ret < 0)
                pr_info("cannot load conntrack support for proto=%u\n",
                        par->family);
+
+       if (!par->net->xt.clusterip_deprecated_warning) {
+               pr_info("ipt_CLUSTERIP is deprecated and it will removed soon, "
+                       "use xt_cluster instead\n");
+               par->net->xt.clusterip_deprecated_warning = true;
+       }
+
        return ret;
 }
 
@@ -497,14 +504,12 @@ static void arp_print(struct arp_payload *payload)
 static unsigned int
 arp_mangle(const struct nf_hook_ops *ops,
           struct sk_buff *skb,
-          const struct net_device *in,
-          const struct net_device *out,
-          int (*okfn)(struct sk_buff *))
+          const struct nf_hook_state *state)
 {
        struct arphdr *arp = arp_hdr(skb);
        struct arp_payload *payload;
        struct clusterip_config *c;
-       struct net *net = dev_net(in ? in : out);
+       struct net *net = dev_net(state->in ? state->in : state->out);
 
        /* we don't care about non-ethernet and non-ipv4 ARP */
        if (arp->ar_hrd != htons(ARPHRD_ETHER) ||
@@ -529,10 +534,10 @@ arp_mangle(const struct nf_hook_ops *ops,
         * addresses on different interfacs.  However, in the CLUSTERIP case
         * this wouldn't work, since we didn't subscribe the mcast group on
         * other interfaces */
-       if (c->dev != out) {
+       if (c->dev != state->out) {
                pr_debug("not mangling arp reply on different "
                         "interface: cip'%s'-skb'%s'\n",
-                        c->dev->name, out->name);
+                        c->dev->name, state->out->name);
                clusterip_config_put(c);
                return NF_ACCEPT;
        }
index 8f48f5517e336e61e3457379744fffd792816fcb..87907d4bd259a725cfeea6e4cbccb77be7995972 100644 (file)
@@ -34,31 +34,32 @@ static unsigned int
 reject_tg(struct sk_buff *skb, const struct xt_action_param *par)
 {
        const struct ipt_reject_info *reject = par->targinfo;
+       int hook = par->hooknum;
 
        switch (reject->with) {
        case IPT_ICMP_NET_UNREACHABLE:
-               nf_send_unreach(skb, ICMP_NET_UNREACH);
+               nf_send_unreach(skb, ICMP_NET_UNREACH, hook);
                break;
        case IPT_ICMP_HOST_UNREACHABLE:
-               nf_send_unreach(skb, ICMP_HOST_UNREACH);
+               nf_send_unreach(skb, ICMP_HOST_UNREACH, hook);
                break;
        case IPT_ICMP_PROT_UNREACHABLE:
-               nf_send_unreach(skb, ICMP_PROT_UNREACH);
+               nf_send_unreach(skb, ICMP_PROT_UNREACH, hook);
                break;
        case IPT_ICMP_PORT_UNREACHABLE:
-               nf_send_unreach(skb, ICMP_PORT_UNREACH);
+               nf_send_unreach(skb, ICMP_PORT_UNREACH, hook);
                break;
        case IPT_ICMP_NET_PROHIBITED:
-               nf_send_unreach(skb, ICMP_NET_ANO);
+               nf_send_unreach(skb, ICMP_NET_ANO, hook);
                break;
        case IPT_ICMP_HOST_PROHIBITED:
-               nf_send_unreach(skb, ICMP_HOST_ANO);
+               nf_send_unreach(skb, ICMP_HOST_ANO, hook);
                break;
        case IPT_ICMP_ADMIN_PROHIBITED:
-               nf_send_unreach(skb, ICMP_PKT_FILTERED);
+               nf_send_unreach(skb, ICMP_PKT_FILTERED, hook);
                break;
        case IPT_TCP_RESET:
-               nf_send_reset(skb, par->hooknum);
+               nf_send_reset(skb, hook);
        case IPT_ICMP_ECHOREPLY:
                /* Doesn't happen. */
                break;
index a313c3fbeb469e0594b2f7bccd788d687184bc61..e9e67793055fce9b20ee836275a3eb4a9437592a 100644 (file)
@@ -300,11 +300,9 @@ synproxy_tg4(struct sk_buff *skb, const struct xt_action_param *par)
 
 static unsigned int ipv4_synproxy_hook(const struct nf_hook_ops *ops,
                                       struct sk_buff *skb,
-                                      const struct net_device *in,
-                                      const struct net_device *out,
-                                      int (*okfn)(struct sk_buff *))
+                                      const struct nf_hook_state *nhs)
 {
-       struct synproxy_net *snet = synproxy_pernet(dev_net(in ? : out));
+       struct synproxy_net *snet = synproxy_pernet(dev_net(nhs->in ? : nhs->out));
        enum ip_conntrack_info ctinfo;
        struct nf_conn *ct;
        struct nf_conn_synproxy *synproxy;
index e08a74a243a85d125ccbd043314f06c6b528b368..a0f3beca52d2107b12ae748a4328d5491e7553c3 100644 (file)
@@ -34,8 +34,7 @@ static const struct xt_table packet_filter = {
 
 static unsigned int
 iptable_filter_hook(const struct nf_hook_ops *ops, struct sk_buff *skb,
-                   const struct net_device *in, const struct net_device *out,
-                   int (*okfn)(struct sk_buff *))
+                   const struct nf_hook_state *state)
 {
        const struct net *net;
 
@@ -45,9 +44,8 @@ iptable_filter_hook(const struct nf_hook_ops *ops, struct sk_buff *skb,
                /* root is playing with raw sockets. */
                return NF_ACCEPT;
 
-       net = dev_net((in != NULL) ? in : out);
-       return ipt_do_table(skb, ops->hooknum, in, out,
-                           net->ipv4.iptable_filter);
+       net = dev_net(state->in ? state->in : state->out);
+       return ipt_do_table(skb, ops->hooknum, state, net->ipv4.iptable_filter);
 }
 
 static struct nf_hook_ops *filter_ops __read_mostly;
index 6a5079c34bb363c34135e9bed5700a645f15b249..62cbb8c5f4a8f246428599186e0e9b498ae460a6 100644 (file)
@@ -37,8 +37,9 @@ static const struct xt_table packet_mangler = {
 };
 
 static unsigned int
-ipt_mangle_out(struct sk_buff *skb, const struct net_device *out)
+ipt_mangle_out(struct sk_buff *skb, const struct nf_hook_state *state)
 {
+       struct net_device *out = state->out;
        unsigned int ret;
        const struct iphdr *iph;
        u_int8_t tos;
@@ -58,7 +59,7 @@ ipt_mangle_out(struct sk_buff *skb, const struct net_device *out)
        daddr = iph->daddr;
        tos = iph->tos;
 
-       ret = ipt_do_table(skb, NF_INET_LOCAL_OUT, NULL, out,
+       ret = ipt_do_table(skb, NF_INET_LOCAL_OUT, state,
                           dev_net(out)->ipv4.iptable_mangle);
        /* Reroute for ANY change. */
        if (ret != NF_DROP && ret != NF_STOLEN) {
@@ -81,18 +82,16 @@ ipt_mangle_out(struct sk_buff *skb, const struct net_device *out)
 static unsigned int
 iptable_mangle_hook(const struct nf_hook_ops *ops,
                     struct sk_buff *skb,
-                    const struct net_device *in,
-                    const struct net_device *out,
-                    int (*okfn)(struct sk_buff *))
+                    const struct nf_hook_state *state)
 {
        if (ops->hooknum == NF_INET_LOCAL_OUT)
-               return ipt_mangle_out(skb, out);
+               return ipt_mangle_out(skb, state);
        if (ops->hooknum == NF_INET_POST_ROUTING)
-               return ipt_do_table(skb, ops->hooknum, in, out,
-                                   dev_net(out)->ipv4.iptable_mangle);
+               return ipt_do_table(skb, ops->hooknum, state,
+                                   dev_net(state->out)->ipv4.iptable_mangle);
        /* PREROUTING/INPUT/FORWARD: */
-       return ipt_do_table(skb, ops->hooknum, in, out,
-                           dev_net(in)->ipv4.iptable_mangle);
+       return ipt_do_table(skb, ops->hooknum, state,
+                           dev_net(state->in)->ipv4.iptable_mangle);
 }
 
 static struct nf_hook_ops *mangle_ops __read_mostly;
index 6b67d7e9a75d69e95d9d25f9b8524bc58d9ef607..0d4d9cdf98a4c0dcb2da3fbebe9264efeb99d4c7 100644 (file)
@@ -30,49 +30,40 @@ static const struct xt_table nf_nat_ipv4_table = {
 
 static unsigned int iptable_nat_do_chain(const struct nf_hook_ops *ops,
                                         struct sk_buff *skb,
-                                        const struct net_device *in,
-                                        const struct net_device *out,
+                                        const struct nf_hook_state *state,
                                         struct nf_conn *ct)
 {
        struct net *net = nf_ct_net(ct);
 
-       return ipt_do_table(skb, ops->hooknum, in, out, net->ipv4.nat_table);
+       return ipt_do_table(skb, ops->hooknum, state, net->ipv4.nat_table);
 }
 
 static unsigned int iptable_nat_ipv4_fn(const struct nf_hook_ops *ops,
                                        struct sk_buff *skb,
-                                       const struct net_device *in,
-                                       const struct net_device *out,
-                                       int (*okfn)(struct sk_buff *))
+                                       const struct nf_hook_state *state)
 {
-       return nf_nat_ipv4_fn(ops, skb, in, out, iptable_nat_do_chain);
+       return nf_nat_ipv4_fn(ops, skb, state, iptable_nat_do_chain);
 }
 
 static unsigned int iptable_nat_ipv4_in(const struct nf_hook_ops *ops,
                                        struct sk_buff *skb,
-                                       const struct net_device *in,
-                                       const struct net_device *out,
-                                       int (*okfn)(struct sk_buff *))
+                                       const struct nf_hook_state *state)
 {
-       return nf_nat_ipv4_in(ops, skb, in, out, iptable_nat_do_chain);
+       return nf_nat_ipv4_in(ops, skb, state, iptable_nat_do_chain);
 }
 
 static unsigned int iptable_nat_ipv4_out(const struct nf_hook_ops *ops,
                                         struct sk_buff *skb,
-                                        const struct net_device *in,
-                                        const struct net_device *out,
-                                        int (*okfn)(struct sk_buff *))
+                                        const struct nf_hook_state *state)
 {
-       return nf_nat_ipv4_out(ops, skb, in, out, iptable_nat_do_chain);
+       return nf_nat_ipv4_out(ops, skb, state, iptable_nat_do_chain);
 }
 
 static unsigned int iptable_nat_ipv4_local_fn(const struct nf_hook_ops *ops,
                                              struct sk_buff *skb,
-                                             const struct net_device *in,
-                                             const struct net_device *out,
-                                             int (*okfn)(struct sk_buff *))
+                                             const struct nf_hook_state *state)
 {
-       return nf_nat_ipv4_local_fn(ops, skb, in, out, iptable_nat_do_chain);
+       return nf_nat_ipv4_local_fn(ops, skb, state, iptable_nat_do_chain);
 }
 
 static struct nf_hook_ops nf_nat_ipv4_ops[] __read_mostly = {
index b2f7e8f98316d2733e3936ead8dec20da2978f73..0356e6da4bb749ba1dcfa07667dcac7b0aa92878 100644 (file)
@@ -21,8 +21,7 @@ static const struct xt_table packet_raw = {
 /* The work comes in here from netfilter.c. */
 static unsigned int
 iptable_raw_hook(const struct nf_hook_ops *ops, struct sk_buff *skb,
-                const struct net_device *in, const struct net_device *out,
-                int (*okfn)(struct sk_buff *))
+                const struct nf_hook_state *state)
 {
        const struct net *net;
 
@@ -32,8 +31,8 @@ iptable_raw_hook(const struct nf_hook_ops *ops, struct sk_buff *skb,
                /* root is playing with raw sockets. */
                return NF_ACCEPT;
 
-       net = dev_net((in != NULL) ? in : out);
-       return ipt_do_table(skb, ops->hooknum, in, out, net->ipv4.iptable_raw);
+       net = dev_net(state->in ? state->in : state->out);
+       return ipt_do_table(skb, ops->hooknum, state, net->ipv4.iptable_raw);
 }
 
 static struct nf_hook_ops *rawtable_ops __read_mostly;
index c86647ed2078f660cf0e9f8b69957a6f4c79b1f6..4bce3980ccd935f891c55329c127478dc031ed77 100644 (file)
@@ -38,9 +38,7 @@ static const struct xt_table security_table = {
 
 static unsigned int
 iptable_security_hook(const struct nf_hook_ops *ops, struct sk_buff *skb,
-                     const struct net_device *in,
-                     const struct net_device *out,
-                     int (*okfn)(struct sk_buff *))
+                     const struct nf_hook_state *state)
 {
        const struct net *net;
 
@@ -50,8 +48,8 @@ iptable_security_hook(const struct nf_hook_ops *ops, struct sk_buff *skb,
                /* Somebody is playing with raw sockets. */
                return NF_ACCEPT;
 
-       net = dev_net((in != NULL) ? in : out);
-       return ipt_do_table(skb, ops->hooknum, in, out,
+       net = dev_net(state->in ? state->in : state->out);
+       return ipt_do_table(skb, ops->hooknum, state,
                            net->ipv4.iptable_security);
 }
 
index 5c61328b7704bc56001b5f65a4e9a8ade8ef3387..30ad9554b5e9931ad37329f0ffda6a8aacdf55d1 100644 (file)
@@ -94,9 +94,7 @@ static int ipv4_get_l4proto(const struct sk_buff *skb, unsigned int nhoff,
 
 static unsigned int ipv4_helper(const struct nf_hook_ops *ops,
                                struct sk_buff *skb,
-                               const struct net_device *in,
-                               const struct net_device *out,
-                               int (*okfn)(struct sk_buff *))
+                               const struct nf_hook_state *state)
 {
        struct nf_conn *ct;
        enum ip_conntrack_info ctinfo;
@@ -123,9 +121,7 @@ static unsigned int ipv4_helper(const struct nf_hook_ops *ops,
 
 static unsigned int ipv4_confirm(const struct nf_hook_ops *ops,
                                 struct sk_buff *skb,
-                                const struct net_device *in,
-                                const struct net_device *out,
-                                int (*okfn)(struct sk_buff *))
+                                const struct nf_hook_state *state)
 {
        struct nf_conn *ct;
        enum ip_conntrack_info ctinfo;
@@ -149,24 +145,20 @@ out:
 
 static unsigned int ipv4_conntrack_in(const struct nf_hook_ops *ops,
                                      struct sk_buff *skb,
-                                     const struct net_device *in,
-                                     const struct net_device *out,
-                                     int (*okfn)(struct sk_buff *))
+                                     const struct nf_hook_state *state)
 {
-       return nf_conntrack_in(dev_net(in), PF_INET, ops->hooknum, skb);
+       return nf_conntrack_in(dev_net(state->in), PF_INET, ops->hooknum, skb);
 }
 
 static unsigned int ipv4_conntrack_local(const struct nf_hook_ops *ops,
                                         struct sk_buff *skb,
-                                        const struct net_device *in,
-                                        const struct net_device *out,
-                                        int (*okfn)(struct sk_buff *))
+                                        const struct nf_hook_state *state)
 {
        /* root is playing with raw sockets. */
        if (skb->len < sizeof(struct iphdr) ||
            ip_hdrlen(skb) < sizeof(struct iphdr))
                return NF_ACCEPT;
-       return nf_conntrack_in(dev_net(out), PF_INET, ops->hooknum, skb);
+       return nf_conntrack_in(dev_net(state->out), PF_INET, ops->hooknum, skb);
 }
 
 /* Connection tracking may drop packets, but never alters them, so
@@ -322,8 +314,8 @@ getorigdst(struct sock *sk, int optval, void __user *user, int *len)
 static int ipv4_tuple_to_nlattr(struct sk_buff *skb,
                                const struct nf_conntrack_tuple *tuple)
 {
-       if (nla_put_be32(skb, CTA_IP_V4_SRC, tuple->src.u3.ip) ||
-           nla_put_be32(skb, CTA_IP_V4_DST, tuple->dst.u3.ip))
+       if (nla_put_in_addr(skb, CTA_IP_V4_SRC, tuple->src.u3.ip) ||
+           nla_put_in_addr(skb, CTA_IP_V4_DST, tuple->dst.u3.ip))
                goto nla_put_failure;
        return 0;
 
@@ -342,8 +334,8 @@ static int ipv4_nlattr_to_tuple(struct nlattr *tb[],
        if (!tb[CTA_IP_V4_SRC] || !tb[CTA_IP_V4_DST])
                return -EINVAL;
 
-       t->src.u3.ip = nla_get_be32(tb[CTA_IP_V4_SRC]);
-       t->dst.u3.ip = nla_get_be32(tb[CTA_IP_V4_DST]);
+       t->src.u3.ip = nla_get_in_addr(tb[CTA_IP_V4_SRC]);
+       t->dst.u3.ip = nla_get_in_addr(tb[CTA_IP_V4_DST]);
 
        return 0;
 }
index a460a87e14f890437a65a2434f6c6ea9fcb56c9d..f0dfe92a00d66a6a58301a94b238baa5cae32fb7 100644 (file)
@@ -300,7 +300,9 @@ static int exp_seq_show(struct seq_file *s, void *v)
                    __nf_ct_l3proto_find(exp->tuple.src.l3num),
                    __nf_ct_l4proto_find(exp->tuple.src.l3num,
                                         exp->tuple.dst.protonum));
-       return seq_putc(s, '\n');
+       seq_putc(s, '\n');
+
+       return 0;
 }
 
 static const struct seq_operations exp_seq_ops = {
index 7e5ca6f2d0cd57a7084cd9cd7386c9cdd40ac076..c88b7d4347187260e789612c44f34059092dc41d 100644 (file)
@@ -63,9 +63,7 @@ static enum ip_defrag_users nf_ct_defrag_user(unsigned int hooknum,
 
 static unsigned int ipv4_conntrack_defrag(const struct nf_hook_ops *ops,
                                          struct sk_buff *skb,
-                                         const struct net_device *in,
-                                         const struct net_device *out,
-                                         int (*okfn)(struct sk_buff *))
+                                         const struct nf_hook_state *state)
 {
        struct sock *sk = skb->sk;
        struct inet_sock *inet = inet_sk(skb->sk);
index d059182c1466fabbc689d0c49715bc441b9c0f72..e7ad950cf9ef9a7f7b4e92ac146e1f92f6c20c19 100644 (file)
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
  */
+
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
+#include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/spinlock.h>
 #include <linux/skbuff.h>
@@ -27,7 +29,7 @@ static struct nf_loginfo default_loginfo = {
        .type   = NF_LOG_TYPE_LOG,
        .u = {
                .log = {
-                       .level    = 5,
+                       .level    = LOGLEVEL_NOTICE,
                        .logflags = NF_LOG_MASK,
                },
        },
index 75101980eeee197a4f8413bbd7d29f4fd9e4bb74..076aadda04737eb7fa829adc14eb98ea37867655 100644 (file)
@@ -5,8 +5,10 @@
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
  */
+
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
+#include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/spinlock.h>
 #include <linux/skbuff.h>
@@ -26,7 +28,7 @@ static struct nf_loginfo default_loginfo = {
        .type   = NF_LOG_TYPE_LOG,
        .u = {
                .log = {
-                       .level    = 5,
+                       .level    = LOGLEVEL_NOTICE,
                        .logflags = NF_LOG_MASK,
                },
        },
index fc37711e11f38be40252086913ff5b913a40ed68..e59cc05c09e96c8f6996e5e0063c4d138d0dee11 100644 (file)
@@ -256,11 +256,10 @@ EXPORT_SYMBOL_GPL(nf_nat_icmp_reply_translation);
 
 unsigned int
 nf_nat_ipv4_fn(const struct nf_hook_ops *ops, struct sk_buff *skb,
-              const struct net_device *in, const struct net_device *out,
+              const struct nf_hook_state *state,
               unsigned int (*do_chain)(const struct nf_hook_ops *ops,
                                        struct sk_buff *skb,
-                                       const struct net_device *in,
-                                       const struct net_device *out,
+                                       const struct nf_hook_state *state,
                                        struct nf_conn *ct))
 {
        struct nf_conn *ct;
@@ -309,7 +308,7 @@ nf_nat_ipv4_fn(const struct nf_hook_ops *ops, struct sk_buff *skb,
                if (!nf_nat_initialized(ct, maniptype)) {
                        unsigned int ret;
 
-                       ret = do_chain(ops, skb, in, out, ct);
+                       ret = do_chain(ops, skb, state, ct);
                        if (ret != NF_ACCEPT)
                                return ret;
 
@@ -323,7 +322,8 @@ nf_nat_ipv4_fn(const struct nf_hook_ops *ops, struct sk_buff *skb,
                        pr_debug("Already setup manip %s for ct %p\n",
                                 maniptype == NF_NAT_MANIP_SRC ? "SRC" : "DST",
                                 ct);
-                       if (nf_nat_oif_changed(ops->hooknum, ctinfo, nat, out))
+                       if (nf_nat_oif_changed(ops->hooknum, ctinfo, nat,
+                                              state->out))
                                goto oif_changed;
                }
                break;
@@ -332,7 +332,7 @@ nf_nat_ipv4_fn(const struct nf_hook_ops *ops, struct sk_buff *skb,
                /* ESTABLISHED */
                NF_CT_ASSERT(ctinfo == IP_CT_ESTABLISHED ||
                             ctinfo == IP_CT_ESTABLISHED_REPLY);
-               if (nf_nat_oif_changed(ops->hooknum, ctinfo, nat, out))
+               if (nf_nat_oif_changed(ops->hooknum, ctinfo, nat, state->out))
                        goto oif_changed;
        }
 
@@ -346,17 +346,16 @@ EXPORT_SYMBOL_GPL(nf_nat_ipv4_fn);
 
 unsigned int
 nf_nat_ipv4_in(const struct nf_hook_ops *ops, struct sk_buff *skb,
-              const struct net_device *in, const struct net_device *out,
+              const struct nf_hook_state *state,
               unsigned int (*do_chain)(const struct nf_hook_ops *ops,
                                         struct sk_buff *skb,
-                                        const struct net_device *in,
-                                        const struct net_device *out,
+                                        const struct nf_hook_state *state,
                                         struct nf_conn *ct))
 {
        unsigned int ret;
        __be32 daddr = ip_hdr(skb)->daddr;
 
-       ret = nf_nat_ipv4_fn(ops, skb, in, out, do_chain);
+       ret = nf_nat_ipv4_fn(ops, skb, state, do_chain);
        if (ret != NF_DROP && ret != NF_STOLEN &&
            daddr != ip_hdr(skb)->daddr)
                skb_dst_drop(skb);
@@ -367,11 +366,10 @@ EXPORT_SYMBOL_GPL(nf_nat_ipv4_in);
 
 unsigned int
 nf_nat_ipv4_out(const struct nf_hook_ops *ops, struct sk_buff *skb,
-               const struct net_device *in, const struct net_device *out,
+               const struct nf_hook_state *state,
                unsigned int (*do_chain)(const struct nf_hook_ops *ops,
                                          struct sk_buff *skb,
-                                         const struct net_device *in,
-                                         const struct net_device *out,
+                                         const struct nf_hook_state *state,
                                          struct nf_conn *ct))
 {
 #ifdef CONFIG_XFRM
@@ -386,7 +384,7 @@ nf_nat_ipv4_out(const struct nf_hook_ops *ops, struct sk_buff *skb,
            ip_hdrlen(skb) < sizeof(struct iphdr))
                return NF_ACCEPT;
 
-       ret = nf_nat_ipv4_fn(ops, skb, in, out, do_chain);
+       ret = nf_nat_ipv4_fn(ops, skb, state, do_chain);
 #ifdef CONFIG_XFRM
        if (ret != NF_DROP && ret != NF_STOLEN &&
            !(IPCB(skb)->flags & IPSKB_XFRM_TRANSFORMED) &&
@@ -410,11 +408,10 @@ EXPORT_SYMBOL_GPL(nf_nat_ipv4_out);
 
 unsigned int
 nf_nat_ipv4_local_fn(const struct nf_hook_ops *ops, struct sk_buff *skb,
-                    const struct net_device *in, const struct net_device *out,
+                    const struct nf_hook_state *state,
                     unsigned int (*do_chain)(const struct nf_hook_ops *ops,
                                               struct sk_buff *skb,
-                                              const struct net_device *in,
-                                              const struct net_device *out,
+                                              const struct nf_hook_state *state,
                                               struct nf_conn *ct))
 {
        const struct nf_conn *ct;
@@ -427,7 +424,7 @@ nf_nat_ipv4_local_fn(const struct nf_hook_ops *ops, struct sk_buff *skb,
            ip_hdrlen(skb) < sizeof(struct iphdr))
                return NF_ACCEPT;
 
-       ret = nf_nat_ipv4_fn(ops, skb, in, out, do_chain);
+       ret = nf_nat_ipv4_fn(ops, skb, state, do_chain);
        if (ret != NF_DROP && ret != NF_STOLEN &&
            (ct = nf_ct_get(skb, &ctinfo)) != NULL) {
                enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
index 536da7bc598ae9321224bb16c8d800571a163153..c5b794da51a91fd8b2236379b532910cafc0568f 100644 (file)
@@ -43,7 +43,7 @@ EXPORT_SYMBOL_GPL(nf_reject_ip_tcphdr_get);
 
 struct iphdr *nf_reject_iphdr_put(struct sk_buff *nskb,
                                  const struct sk_buff *oldskb,
-                                 __be16 protocol, int ttl)
+                                 __u8 protocol, int ttl)
 {
        struct iphdr *niph, *oiph = ip_hdr(oldskb);
 
@@ -164,4 +164,27 @@ void nf_send_reset(struct sk_buff *oldskb, int hook)
 }
 EXPORT_SYMBOL_GPL(nf_send_reset);
 
+void nf_send_unreach(struct sk_buff *skb_in, int code, int hook)
+{
+       struct iphdr *iph = ip_hdr(skb_in);
+       u8 proto;
+
+       if (skb_in->csum_bad || iph->frag_off & htons(IP_OFFSET))
+               return;
+
+       if (skb_csum_unnecessary(skb_in)) {
+               icmp_send(skb_in, ICMP_DEST_UNREACH, code, 0);
+               return;
+       }
+
+       if (iph->protocol == IPPROTO_TCP || iph->protocol == IPPROTO_UDP)
+               proto = iph->protocol;
+       else
+               proto = 0;
+
+       if (nf_ip_checksum(skb_in, hook, ip_hdrlen(skb_in), proto) == 0)
+               icmp_send(skb_in, ICMP_DEST_UNREACH, code, 0);
+}
+EXPORT_SYMBOL_GPL(nf_send_unreach);
+
 MODULE_LICENSE("GPL");
index 19412a4063fbe27de956f4d7c380b8e774d61aae..8412268bbad1852851c6fc3d5337d463d1ff1c51 100644 (file)
 static unsigned int
 nft_do_chain_arp(const struct nf_hook_ops *ops,
                  struct sk_buff *skb,
-                 const struct net_device *in,
-                 const struct net_device *out,
-                 int (*okfn)(struct sk_buff *))
+                 const struct nf_hook_state *state)
 {
        struct nft_pktinfo pkt;
 
-       nft_set_pktinfo(&pkt, ops, skb, in, out);
+       nft_set_pktinfo(&pkt, ops, skb, state);
 
        return nft_do_chain(&pkt, ops);
 }
index 6820c8c40842170039cfad8645ca72d88508daa1..aa180d3a69a5a196e65fdc46cad267944ef6c5cc 100644 (file)
 
 static unsigned int nft_do_chain_ipv4(const struct nf_hook_ops *ops,
                                      struct sk_buff *skb,
-                                     const struct net_device *in,
-                                     const struct net_device *out,
-                                     int (*okfn)(struct sk_buff *))
+                                     const struct nf_hook_state *state)
 {
        struct nft_pktinfo pkt;
 
-       nft_set_pktinfo_ipv4(&pkt, ops, skb, in, out);
+       nft_set_pktinfo_ipv4(&pkt, ops, skb, state);
 
        return nft_do_chain(&pkt, ops);
 }
 
 static unsigned int nft_ipv4_output(const struct nf_hook_ops *ops,
                                    struct sk_buff *skb,
-                                   const struct net_device *in,
-                                   const struct net_device *out,
-                                   int (*okfn)(struct sk_buff *))
+                                   const struct nf_hook_state *state)
 {
        if (unlikely(skb->len < sizeof(struct iphdr) ||
                     ip_hdr(skb)->ihl < sizeof(struct iphdr) / 4)) {
@@ -45,7 +41,7 @@ static unsigned int nft_ipv4_output(const struct nf_hook_ops *ops,
                return NF_ACCEPT;
        }
 
-       return nft_do_chain_ipv4(ops, skb, in, out, okfn);
+       return nft_do_chain_ipv4(ops, skb, state);
 }
 
 struct nft_af_info nft_af_ipv4 __read_mostly = {
index df547bf50078c4016c0e1924da77315cb1e6127a..bf5c30ae14e4e768b61dd758be2acef6e8a0aa86 100644 (file)
 
 static unsigned int nft_nat_do_chain(const struct nf_hook_ops *ops,
                                      struct sk_buff *skb,
-                                     const struct net_device *in,
-                                     const struct net_device *out,
+                                     const struct nf_hook_state *state,
                                      struct nf_conn *ct)
 {
        struct nft_pktinfo pkt;
 
-       nft_set_pktinfo_ipv4(&pkt, ops, skb, in, out);
+       nft_set_pktinfo_ipv4(&pkt, ops, skb, state);
 
        return nft_do_chain(&pkt, ops);
 }
 
 static unsigned int nft_nat_ipv4_fn(const struct nf_hook_ops *ops,
                                    struct sk_buff *skb,
-                                   const struct net_device *in,
-                                   const struct net_device *out,
-                                   int (*okfn)(struct sk_buff *))
+                                   const struct nf_hook_state *state)
 {
-       return nf_nat_ipv4_fn(ops, skb, in, out, nft_nat_do_chain);
+       return nf_nat_ipv4_fn(ops, skb, state, nft_nat_do_chain);
 }
 
 static unsigned int nft_nat_ipv4_in(const struct nf_hook_ops *ops,
                                    struct sk_buff *skb,
-                                   const struct net_device *in,
-                                   const struct net_device *out,
-                                   int (*okfn)(struct sk_buff *))
+                                   const struct nf_hook_state *state)
 {
-       return nf_nat_ipv4_in(ops, skb, in, out, nft_nat_do_chain);
+       return nf_nat_ipv4_in(ops, skb, state, nft_nat_do_chain);
 }
 
 static unsigned int nft_nat_ipv4_out(const struct nf_hook_ops *ops,
                                     struct sk_buff *skb,
-                                    const struct net_device *in,
-                                    const struct net_device *out,
-                                    int (*okfn)(struct sk_buff *))
+                                    const struct nf_hook_state *state)
 {
-       return nf_nat_ipv4_out(ops, skb, in, out, nft_nat_do_chain);
+       return nf_nat_ipv4_out(ops, skb, state, nft_nat_do_chain);
 }
 
 static unsigned int nft_nat_ipv4_local_fn(const struct nf_hook_ops *ops,
                                          struct sk_buff *skb,
-                                         const struct net_device *in,
-                                         const struct net_device *out,
-                                         int (*okfn)(struct sk_buff *))
+                                         const struct nf_hook_state *state)
 {
-       return nf_nat_ipv4_local_fn(ops, skb, in, out, nft_nat_do_chain);
+       return nf_nat_ipv4_local_fn(ops, skb, state, nft_nat_do_chain);
 }
 
 static const struct nf_chain_type nft_chain_nat_ipv4 = {
index 125b66766c0a8799f0137b12352ab2448a0f9979..e335b0afdaf33405f05804e72bcadf48cac5c3ea 100644 (file)
@@ -23,9 +23,7 @@
 
 static unsigned int nf_route_table_hook(const struct nf_hook_ops *ops,
                                        struct sk_buff *skb,
-                                       const struct net_device *in,
-                                       const struct net_device *out,
-                                       int (*okfn)(struct sk_buff *))
+                                       const struct nf_hook_state *state)
 {
        unsigned int ret;
        struct nft_pktinfo pkt;
@@ -39,7 +37,7 @@ static unsigned int nf_route_table_hook(const struct nf_hook_ops *ops,
            ip_hdrlen(skb) < sizeof(struct iphdr))
                return NF_ACCEPT;
 
-       nft_set_pktinfo_ipv4(&pkt, ops, skb, in, out);
+       nft_set_pktinfo_ipv4(&pkt, ops, skb, state);
 
        mark = skb->mark;
        iph = ip_hdr(skb);
index d729542bd1b7901c174c62996b13110517a49032..16a5d4d73d7565807490074d9f4df6f0ab90b297 100644 (file)
@@ -27,7 +27,8 @@ static void nft_reject_ipv4_eval(const struct nft_expr *expr,
 
        switch (priv->type) {
        case NFT_REJECT_ICMP_UNREACH:
-               nf_send_unreach(pkt->skb, priv->icmp_code);
+               nf_send_unreach(pkt->skb, priv->icmp_code,
+                               pkt->ops->hooknum);
                break;
        case NFT_REJECT_TCP_RST:
                nf_send_reset(pkt->skb, pkt->ops->hooknum);
index 208d5439e59b2e8c3ccb2da46c292ad4f75b3784..a93f260cf24ca0a9d60346dc085eb51afdb43927 100644 (file)
@@ -64,11 +64,11 @@ EXPORT_SYMBOL_GPL(pingv6_ops);
 
 static u16 ping_port_rover;
 
-static inline int ping_hashfn(struct net *net, unsigned int num, unsigned int mask)
+static inline u32 ping_hashfn(const struct net *net, u32 num, u32 mask)
 {
-       int res = (num + net_hash_mix(net)) & mask;
+       u32 res = (num + net_hash_mix(net)) & mask;
 
-       pr_debug("hash(%d) = %d\n", num, res);
+       pr_debug("hash(%u) = %u\n", num, res);
        return res;
 }
 EXPORT_SYMBOL_GPL(ping_hash);
@@ -516,7 +516,7 @@ void ping_err(struct sk_buff *skb, int offset, u32 info)
                 ntohs(icmph->un.echo.sequence));
 
        sk = ping_lookup(net, skb, ntohs(icmph->un.echo.id));
-       if (sk == NULL) {
+       if (!sk) {
                pr_debug("no socket, dropping\n");
                return; /* No socket for error */
        }
@@ -692,8 +692,7 @@ int ping_common_sendmsg(int family, struct msghdr *msg, size_t len,
 }
 EXPORT_SYMBOL_GPL(ping_common_sendmsg);
 
-static int ping_v4_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
-                          size_t len)
+static int ping_v4_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
 {
        struct net *net = sock_net(sk);
        struct flowi4 fl4;
@@ -849,8 +848,8 @@ do_confirm:
        goto out;
 }
 
-int ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
-                size_t len, int noblock, int flags, int *addr_len)
+int ping_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int noblock,
+                int flags, int *addr_len)
 {
        struct inet_sock *isk = inet_sk(sk);
        int family = sk->sk_family;
@@ -972,7 +971,7 @@ bool ping_rcv(struct sk_buff *skb)
        skb_push(skb, skb->data - (u8 *)icmph);
 
        sk = ping_lookup(net, skb, ntohs(icmph->un.echo.id));
-       if (sk != NULL) {
+       if (sk) {
                struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
 
                pr_debug("rcv on socket %p\n", sk);
index f027a708b7e01029574535e20f7461cfa4b84190..f2fc92a1241af90777c23f1bb6ded06c0f5868be 100644 (file)
@@ -46,7 +46,6 @@
 #include <linux/stddef.h>
 #include <linux/slab.h>
 #include <linux/errno.h>
-#include <linux/aio.h>
 #include <linux/kernel.h>
 #include <linux/export.h>
 #include <linux/spinlock.h>
@@ -293,7 +292,7 @@ void raw_icmp_error(struct sk_buff *skb, int protocol, u32 info)
 
        read_lock(&raw_v4_hashinfo.lock);
        raw_sk = sk_head(&raw_v4_hashinfo.ht[hash]);
-       if (raw_sk != NULL) {
+       if (raw_sk) {
                iph = (const struct iphdr *)skb->data;
                net = dev_net(skb->dev);
 
@@ -363,7 +362,7 @@ static int raw_send_hdrinc(struct sock *sk, struct flowi4 *fl4,
        skb = sock_alloc_send_skb(sk,
                                  length + hlen + tlen + 15,
                                  flags & MSG_DONTWAIT, &err);
-       if (skb == NULL)
+       if (!skb)
                goto error;
        skb_reserve(skb, hlen);
 
@@ -404,7 +403,7 @@ static int raw_send_hdrinc(struct sock *sk, struct flowi4 *fl4,
                iph->check   = 0;
                iph->tot_len = htons(length);
                if (!iph->id)
-                       ip_select_ident(skb, NULL);
+                       ip_select_ident(net, skb, NULL);
 
                iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
        }
@@ -481,8 +480,7 @@ static int raw_getfrag(void *from, char *to, int offset, int len, int odd,
        return ip_generic_getfrag(rfv->msg, to, offset, len, odd, skb);
 }
 
-static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
-                      size_t len)
+static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
 {
        struct inet_sock *inet = inet_sk(sk);
        struct ipcm_cookie ipc;
@@ -709,8 +707,8 @@ out:        return ret;
  *     we return it, otherwise we block.
  */
 
-static int raw_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
-                      size_t len, int noblock, int flags, int *addr_len)
+static int raw_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
+                      int noblock, int flags, int *addr_len)
 {
        struct inet_sock *inet = inet_sk(sk);
        size_t copied = 0;
@@ -873,7 +871,7 @@ static int raw_ioctl(struct sock *sk, int cmd, unsigned long arg)
 
                spin_lock_bh(&sk->sk_receive_queue.lock);
                skb = skb_peek(&sk->sk_receive_queue);
-               if (skb != NULL)
+               if (skb)
                        amount = skb->len;
                spin_unlock_bh(&sk->sk_receive_queue.lock);
                return put_user(amount, (int __user *)arg);
index ad5064362c5c7da56e9e22f1a012972bfff58e0e..a78540f28276771e4c8f35024d3ee133c31317ab 100644 (file)
@@ -152,7 +152,6 @@ static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst,
 
 static struct dst_ops ipv4_dst_ops = {
        .family =               AF_INET,
-       .protocol =             cpu_to_be16(ETH_P_IP),
        .check =                ipv4_dst_check,
        .default_advmss =       ipv4_default_advmss,
        .mtu =                  ipv4_mtu,
@@ -483,7 +482,7 @@ u32 ip_idents_reserve(u32 hash, int segs)
 }
 EXPORT_SYMBOL(ip_idents_reserve);
 
-void __ip_select_ident(struct iphdr *iph, int segs)
+void __ip_select_ident(struct net *net, struct iphdr *iph, int segs)
 {
        static u32 ip_idents_hashrnd __read_mostly;
        u32 hash, id;
@@ -492,7 +491,7 @@ void __ip_select_ident(struct iphdr *iph, int segs)
 
        hash = jhash_3words((__force u32)iph->daddr,
                            (__force u32)iph->saddr,
-                           iph->protocol,
+                           iph->protocol ^ net_hash_mix(net),
                            ip_idents_hashrnd);
        id = ip_idents_reserve(hash, segs);
        iph->id = htons(id);
@@ -1057,7 +1056,7 @@ void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
        __build_flow_key(&fl4, sk, iph, 0, 0, 0, 0, 0);
 
        rt = (struct rtable *)odst;
-       if (odst->obsolete && odst->ops->check(odst, 0) == NULL) {
+       if (odst->obsolete && !odst->ops->check(odst, 0)) {
                rt = ip_route_output_flow(sock_net(sk), &fl4, sk);
                if (IS_ERR(rt))
                        goto out;
@@ -1451,7 +1450,7 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
 
        /* Primary sanity checks. */
 
-       if (in_dev == NULL)
+       if (!in_dev)
                return -EINVAL;
 
        if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr) ||
@@ -1554,7 +1553,7 @@ static int __mkroute_input(struct sk_buff *skb,
 
        /* get a working reference to the output device */
        out_dev = __in_dev_get_rcu(FIB_RES_DEV(*res));
-       if (out_dev == NULL) {
+       if (!out_dev) {
                net_crit_ratelimited("Bug in ip_route_input_slow(). Please report.\n");
                return -EINVAL;
        }
@@ -1592,7 +1591,7 @@ static int __mkroute_input(struct sk_buff *skb,
 
        fnhe = find_exception(&FIB_RES_NH(*res), daddr);
        if (do_cache) {
-               if (fnhe != NULL)
+               if (fnhe)
                        rth = rcu_dereference(fnhe->fnhe_rth_input);
                else
                        rth = rcu_dereference(FIB_RES_NH(*res).nh_rth_input);
@@ -2055,7 +2054,7 @@ struct rtable *__ip_route_output_key(struct net *net, struct flowi4 *fl4)
                     ipv4_is_lbcast(fl4->daddr))) {
                        /* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
                        dev_out = __ip_dev_find(net, fl4->saddr, false);
-                       if (dev_out == NULL)
+                       if (!dev_out)
                                goto out;
 
                        /* Special hack: user can direct multicasts
@@ -2088,7 +2087,7 @@ struct rtable *__ip_route_output_key(struct net *net, struct flowi4 *fl4)
        if (fl4->flowi4_oif) {
                dev_out = dev_get_by_index_rcu(net, fl4->flowi4_oif);
                rth = ERR_PTR(-ENODEV);
-               if (dev_out == NULL)
+               if (!dev_out)
                        goto out;
 
                /* RACE: Check return value of inet_select_addr instead. */
@@ -2225,7 +2224,6 @@ static u32 *ipv4_rt_blackhole_cow_metrics(struct dst_entry *dst,
 
 static struct dst_ops ipv4_dst_blackhole_ops = {
        .family                 =       AF_INET,
-       .protocol               =       cpu_to_be16(ETH_P_IP),
        .check                  =       ipv4_blackhole_dst_check,
        .mtu                    =       ipv4_blackhole_mtu,
        .default_advmss         =       ipv4_default_advmss,
@@ -2301,7 +2299,7 @@ static int rt_fill_info(struct net *net,  __be32 dst, __be32 src,
        u32 metrics[RTAX_MAX];
 
        nlh = nlmsg_put(skb, portid, seq, event, sizeof(*r), flags);
-       if (nlh == NULL)
+       if (!nlh)
                return -EMSGSIZE;
 
        r = nlmsg_data(nlh);
@@ -2321,11 +2319,11 @@ static int rt_fill_info(struct net *net,  __be32 dst, __be32 src,
        if (IPCB(skb)->flags & IPSKB_DOREDIRECT)
                r->rtm_flags |= RTCF_DOREDIRECT;
 
-       if (nla_put_be32(skb, RTA_DST, dst))
+       if (nla_put_in_addr(skb, RTA_DST, dst))
                goto nla_put_failure;
        if (src) {
                r->rtm_src_len = 32;
-               if (nla_put_be32(skb, RTA_SRC, src))
+               if (nla_put_in_addr(skb, RTA_SRC, src))
                        goto nla_put_failure;
        }
        if (rt->dst.dev &&
@@ -2338,11 +2336,11 @@ static int rt_fill_info(struct net *net,  __be32 dst, __be32 src,
 #endif
        if (!rt_is_input_route(rt) &&
            fl4->saddr != src) {
-               if (nla_put_be32(skb, RTA_PREFSRC, fl4->saddr))
+               if (nla_put_in_addr(skb, RTA_PREFSRC, fl4->saddr))
                        goto nla_put_failure;
        }
        if (rt->rt_uses_gateway &&
-           nla_put_be32(skb, RTA_GATEWAY, rt->rt_gateway))
+           nla_put_in_addr(skb, RTA_GATEWAY, rt->rt_gateway))
                goto nla_put_failure;
 
        expires = rt->dst.expires;
@@ -2423,7 +2421,7 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh)
        rtm = nlmsg_data(nlh);
 
        skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
-       if (skb == NULL) {
+       if (!skb) {
                err = -ENOBUFS;
                goto errout;
        }
@@ -2438,8 +2436,8 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh)
        ip_hdr(skb)->protocol = IPPROTO_ICMP;
        skb_reserve(skb, MAX_HEADER + sizeof(struct iphdr));
 
-       src = tb[RTA_SRC] ? nla_get_be32(tb[RTA_SRC]) : 0;
-       dst = tb[RTA_DST] ? nla_get_be32(tb[RTA_DST]) : 0;
+       src = tb[RTA_SRC] ? nla_get_in_addr(tb[RTA_SRC]) : 0;
+       dst = tb[RTA_DST] ? nla_get_in_addr(tb[RTA_DST]) : 0;
        iif = tb[RTA_IIF] ? nla_get_u32(tb[RTA_IIF]) : 0;
        mark = tb[RTA_MARK] ? nla_get_u32(tb[RTA_MARK]) : 0;
 
@@ -2454,7 +2452,7 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh)
                struct net_device *dev;
 
                dev = __dev_get_by_index(net, iif);
-               if (dev == NULL) {
+               if (!dev) {
                        err = -ENODEV;
                        goto errout_free;
                }
@@ -2653,7 +2651,7 @@ static __net_init int sysctl_route_net_init(struct net *net)
        tbl = ipv4_route_flush_table;
        if (!net_eq(net, &init_net)) {
                tbl = kmemdup(tbl, sizeof(ipv4_route_flush_table), GFP_KERNEL);
-               if (tbl == NULL)
+               if (!tbl)
                        goto err_dup;
 
                /* Don't export sysctls to unprivileged users */
@@ -2663,7 +2661,7 @@ static __net_init int sysctl_route_net_init(struct net *net)
        tbl[0].extra1 = net;
 
        net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", tbl);
-       if (net->ipv4.route_hdr == NULL)
+       if (!net->ipv4.route_hdr)
                goto err_reg;
        return 0;
 
index 45fe60c5238e99d0a639ecf4698cddab141f0fdd..df849e5a10f1d7f41fb3353a26356ede81f56f72 100644 (file)
@@ -219,19 +219,20 @@ int __cookie_v4_check(const struct iphdr *iph, const struct tcphdr *th,
 }
 EXPORT_SYMBOL_GPL(__cookie_v4_check);
 
-static inline struct sock *get_cookie_sock(struct sock *sk, struct sk_buff *skb,
-                                          struct request_sock *req,
-                                          struct dst_entry *dst)
+static struct sock *get_cookie_sock(struct sock *sk, struct sk_buff *skb,
+                                   struct request_sock *req,
+                                   struct dst_entry *dst)
 {
        struct inet_connection_sock *icsk = inet_csk(sk);
        struct sock *child;
 
        child = icsk->icsk_af_ops->syn_recv_sock(sk, skb, req, dst);
-       if (child)
+       if (child) {
+               atomic_set(&req->rsk_refcnt, 1);
                inet_csk_reqsk_queue_add(sk, req, child);
-       else
+       } else {
                reqsk_free(req);
-
+       }
        return child;
 }
 
@@ -325,7 +326,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb)
                goto out;
 
        ret = NULL;
-       req = inet_reqsk_alloc(&tcp_request_sock_ops); /* for safety */
+       req = inet_reqsk_alloc(&tcp_request_sock_ops, sk); /* for safety */
        if (!req)
                goto out;
 
@@ -336,8 +337,8 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb)
        req->mss                = mss;
        ireq->ir_num            = ntohs(th->dest);
        ireq->ir_rmt_port       = th->source;
-       ireq->ir_loc_addr       = ip_hdr(skb)->daddr;
-       ireq->ir_rmt_addr       = ip_hdr(skb)->saddr;
+       sk_rcv_saddr_set(req_to_sk(req), ip_hdr(skb)->daddr);
+       sk_daddr_set(req_to_sk(req), ip_hdr(skb)->saddr);
        ireq->ir_mark           = inet_request_mark(sk, skb);
        ireq->snd_wscale        = tcp_opt.snd_wscale;
        ireq->sack_ok           = tcp_opt.sack_ok;
@@ -345,7 +346,9 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb)
        ireq->tstamp_ok         = tcp_opt.saw_tstamp;
        req->ts_recent          = tcp_opt.saw_tstamp ? tcp_opt.rcv_tsval : 0;
        treq->snt_synack        = tcp_opt.saw_tstamp ? tcp_opt.rcv_tsecr : 0;
-       treq->listener          = NULL;
+       treq->tfo_listener      = false;
+
+       ireq->ir_iif = sk->sk_bound_dev_if;
 
        /* We throwed the options of the initial SYN away, so we hope
         * the ACK carries the same options again (see RFC1122 4.2.3.8)
@@ -357,7 +360,6 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb)
                goto out;
        }
 
-       req->expires    = 0UL;
        req->num_retrans = 0;
 
        /*
index d151539da8e6948571bfdfbc105c838b3b561d71..c3852a7ff3c7630f4114cbc33a51a35fa3645e8c 100644 (file)
@@ -883,6 +883,20 @@ static struct ctl_table ipv4_net_table[] = {
                .mode           = 0644,
                .proc_handler   = proc_dointvec,
        },
+       {
+               .procname       = "tcp_probe_threshold",
+               .data           = &init_net.ipv4.sysctl_tcp_probe_threshold,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec,
+       },
+       {
+               .procname       = "tcp_probe_interval",
+               .data           = &init_net.ipv4.sysctl_tcp_probe_interval,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec,
+       },
        { }
 };
 
@@ -895,7 +909,7 @@ static __net_init int ipv4_sysctl_init_net(struct net *net)
                int i;
 
                table = kmemdup(table, sizeof(ipv4_net_table), GFP_KERNEL);
-               if (table == NULL)
+               if (!table)
                        goto err_alloc;
 
                /* Update the variables to point into the current struct net */
@@ -904,7 +918,7 @@ static __net_init int ipv4_sysctl_init_net(struct net *net)
        }
 
        net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", table);
-       if (net->ipv4.ipv4_hdr == NULL)
+       if (!net->ipv4.ipv4_hdr)
                goto err_reg;
 
        net->ipv4.sysctl_local_reserved_ports = kzalloc(65536 / 8, GFP_KERNEL);
@@ -942,7 +956,7 @@ static __init int sysctl_ipv4_init(void)
        struct ctl_table_header *hdr;
 
        hdr = register_net_sysctl(&init_net, "net/ipv4", ipv4_table);
-       if (hdr == NULL)
+       if (!hdr)
                return -ENOMEM;
 
        if (register_pernet_subsys(&ipv4_sysctl_ops)) {
index 995a2259bcfc80894caec08fe2e7ccd62311e227..18e3a12eb1b283bd370bdb4c16f5969e30bcec15 100644 (file)
@@ -496,7 +496,7 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
 
        /* Connected or passive Fast Open socket? */
        if (sk->sk_state != TCP_SYN_SENT &&
-           (sk->sk_state != TCP_SYN_RECV || tp->fastopen_rsk != NULL)) {
+           (sk->sk_state != TCP_SYN_RECV || tp->fastopen_rsk)) {
                int target = sock_rcvlowat(sk, 0, INT_MAX);
 
                if (tp->urg_seq == tp->copied_seq &&
@@ -1028,7 +1028,7 @@ static inline int select_size(const struct sock *sk, bool sg)
 
 void tcp_free_fastopen_req(struct tcp_sock *tp)
 {
-       if (tp->fastopen_req != NULL) {
+       if (tp->fastopen_req) {
                kfree(tp->fastopen_req);
                tp->fastopen_req = NULL;
        }
@@ -1042,12 +1042,12 @@ static int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg,
 
        if (!(sysctl_tcp_fastopen & TFO_CLIENT_ENABLE))
                return -EOPNOTSUPP;
-       if (tp->fastopen_req != NULL)
+       if (tp->fastopen_req)
                return -EALREADY; /* Another Fast Open is in progress */
 
        tp->fastopen_req = kzalloc(sizeof(struct tcp_fastopen_request),
                                   sk->sk_allocation);
-       if (unlikely(tp->fastopen_req == NULL))
+       if (unlikely(!tp->fastopen_req))
                return -ENOBUFS;
        tp->fastopen_req->data = msg;
        tp->fastopen_req->size = size;
@@ -1060,8 +1060,7 @@ static int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg,
        return err;
 }
 
-int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
-               size_t size)
+int tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
 {
        struct tcp_sock *tp = tcp_sk(sk);
        struct sk_buff *skb;
@@ -1120,7 +1119,7 @@ int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
 
        sg = !!(sk->sk_route_caps & NETIF_F_SG);
 
-       while (iov_iter_count(&msg->msg_iter)) {
+       while (msg_data_left(msg)) {
                int copy = 0;
                int max = size_goal;
 
@@ -1164,8 +1163,8 @@ new_segment:
                }
 
                /* Try to append data to the end of skb. */
-               if (copy > iov_iter_count(&msg->msg_iter))
-                       copy = iov_iter_count(&msg->msg_iter);
+               if (copy > msg_data_left(msg))
+                       copy = msg_data_left(msg);
 
                /* Where to copy to? */
                if (skb_availroom(skb) > 0) {
@@ -1222,7 +1221,7 @@ new_segment:
                tcp_skb_pcount_set(skb, 0);
 
                copied += copy;
-               if (!iov_iter_count(&msg->msg_iter)) {
+               if (!msg_data_left(msg)) {
                        tcp_tx_timestamp(sk, skb);
                        goto out;
                }
@@ -1539,8 +1538,8 @@ EXPORT_SYMBOL(tcp_read_sock);
  *     Probably, code can be easily improved even more.
  */
 
-int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
-               size_t len, int nonblock, int flags, int *addr_len)
+int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
+               int flags, int *addr_len)
 {
        struct tcp_sock *tp = tcp_sk(sk);
        int copied = 0;
@@ -1914,18 +1913,19 @@ EXPORT_SYMBOL_GPL(tcp_set_state);
 
 static const unsigned char new_state[16] = {
   /* current state:        new state:      action:     */
-  /* (Invalid)         */ TCP_CLOSE,
-  /* TCP_ESTABLISHED   */ TCP_FIN_WAIT1 | TCP_ACTION_FIN,
-  /* TCP_SYN_SENT      */ TCP_CLOSE,
-  /* TCP_SYN_RECV      */ TCP_FIN_WAIT1 | TCP_ACTION_FIN,
-  /* TCP_FIN_WAIT1     */ TCP_FIN_WAIT1,
-  /* TCP_FIN_WAIT2     */ TCP_FIN_WAIT2,
-  /* TCP_TIME_WAIT     */ TCP_CLOSE,
-  /* TCP_CLOSE         */ TCP_CLOSE,
-  /* TCP_CLOSE_WAIT    */ TCP_LAST_ACK  | TCP_ACTION_FIN,
-  /* TCP_LAST_ACK      */ TCP_LAST_ACK,
-  /* TCP_LISTEN                */ TCP_CLOSE,
-  /* TCP_CLOSING       */ TCP_CLOSING,
+  [0 /* (Invalid) */]  = TCP_CLOSE,
+  [TCP_ESTABLISHED]    = TCP_FIN_WAIT1 | TCP_ACTION_FIN,
+  [TCP_SYN_SENT]       = TCP_CLOSE,
+  [TCP_SYN_RECV]       = TCP_FIN_WAIT1 | TCP_ACTION_FIN,
+  [TCP_FIN_WAIT1]      = TCP_FIN_WAIT1,
+  [TCP_FIN_WAIT2]      = TCP_FIN_WAIT2,
+  [TCP_TIME_WAIT]      = TCP_CLOSE,
+  [TCP_CLOSE]          = TCP_CLOSE,
+  [TCP_CLOSE_WAIT]     = TCP_LAST_ACK  | TCP_ACTION_FIN,
+  [TCP_LAST_ACK]       = TCP_LAST_ACK,
+  [TCP_LISTEN]         = TCP_CLOSE,
+  [TCP_CLOSING]                = TCP_CLOSING,
+  [TCP_NEW_SYN_RECV]   = TCP_CLOSE,    /* should not happen ! */
 };
 
 static int tcp_close_state(struct sock *sk)
@@ -2138,7 +2138,7 @@ adjudge_to_death:
                 * aborted (e.g., closed with unread data) before 3WHS
                 * finishes.
                 */
-               if (req != NULL)
+               if (req)
                        reqsk_fastopen_remove(sk, req, false);
                inet_csk_destroy_sock(sk);
        }
@@ -2776,7 +2776,7 @@ static int do_tcp_getsockopt(struct sock *sk, int level,
                break;
 
        case TCP_FASTOPEN:
-               if (icsk->icsk_accept_queue.fastopenq != NULL)
+               if (icsk->icsk_accept_queue.fastopenq)
                        val = icsk->icsk_accept_queue.fastopenq->max_qlen;
                else
                        val = 0;
@@ -2960,7 +2960,7 @@ void tcp_done(struct sock *sk)
 
        tcp_set_state(sk, TCP_CLOSE);
        tcp_clear_xmit_timers(sk);
-       if (req != NULL)
+       if (req)
                reqsk_fastopen_remove(sk, req, false);
 
        sk->sk_shutdown = SHUTDOWN_MASK;
@@ -3001,12 +3001,11 @@ static void __init tcp_init_mem(void)
 
 void __init tcp_init(void)
 {
-       struct sk_buff *skb = NULL;
        unsigned long limit;
        int max_rshare, max_wshare, cnt;
        unsigned int i;
 
-       BUILD_BUG_ON(sizeof(struct tcp_skb_cb) > sizeof(skb->cb));
+       sock_skb_cb_check_size(sizeof(struct tcp_skb_cb));
 
        percpu_counter_init(&tcp_sockets_allocated, 0, GFP_KERNEL);
        percpu_counter_init(&tcp_orphan_count, 0, GFP_KERNEL);
index 62856e185a935e44deb26de59ff94c8bf7500579..7a5ae50c80c87add1e46e8255f0837796d2e4947 100644 (file)
@@ -83,7 +83,7 @@ int tcp_register_congestion_control(struct tcp_congestion_ops *ca)
                ret = -EEXIST;
        } else {
                list_add_tail_rcu(&ca->list, &tcp_cong_list);
-               pr_info("%s registered\n", ca->name);
+               pr_debug("%s registered\n", ca->name);
        }
        spin_unlock(&tcp_cong_list_lock);
 
index 0d73f9ddb55b8d55204643541dad651cd9d6eb06..79b34a0f4a4ae519c3f66c511c989b92fca02b09 100644 (file)
@@ -29,18 +29,18 @@ static void tcp_diag_get_info(struct sock *sk, struct inet_diag_msg *r,
                r->idiag_rqueue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
                r->idiag_wqueue = tp->write_seq - tp->snd_una;
        }
-       if (info != NULL)
+       if (info)
                tcp_get_info(sk, info);
 }
 
 static void tcp_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
-                         struct inet_diag_req_v2 *r, struct nlattr *bc)
+                         const struct inet_diag_req_v2 *r, struct nlattr *bc)
 {
        inet_diag_dump_icsk(&tcp_hashinfo, skb, cb, r, bc);
 }
 
 static int tcp_diag_dump_one(struct sk_buff *in_skb, const struct nlmsghdr *nlh,
-                            struct inet_diag_req_v2 *req)
+                            const struct inet_diag_req_v2 *req)
 {
        return inet_diag_dump_one_icsk(&tcp_hashinfo, in_skb, nlh, req);
 }
index ea82fd492c1bf4788ffb5dce8275f653d066b2e4..5da55e2b5cd22941b29abde6ef2993b79d97a848 100644 (file)
@@ -141,7 +141,7 @@ static bool tcp_fastopen_create_child(struct sock *sk,
        req->sk = NULL;
 
        child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL);
-       if (child == NULL)
+       if (!child)
                return false;
 
        spin_lock(&queue->fastopenq->lock);
@@ -155,12 +155,7 @@ static bool tcp_fastopen_create_child(struct sock *sk,
        tp = tcp_sk(child);
 
        tp->fastopen_rsk = req;
-       /* Do a hold on the listner sk so that if the listener is being
-        * closed, the child that has been accepted can live on and still
-        * access listen_lock.
-        */
-       sock_hold(sk);
-       tcp_rsk(req)->listener = sk;
+       tcp_rsk(req)->tfo_listener = true;
 
        /* RFC1323: The window in SYN & SYN/ACK segments is never
         * scaled. So correct it appropriately.
@@ -174,6 +169,7 @@ static bool tcp_fastopen_create_child(struct sock *sk,
        inet_csk_reset_xmit_timer(child, ICSK_TIME_RETRANS,
                                  TCP_TIMEOUT_INIT, TCP_RTO_MAX);
 
+       atomic_set(&req->rsk_refcnt, 1);
        /* Add the child socket directly into the accept queue */
        inet_csk_reqsk_queue_add(sk, req, child);
 
@@ -218,10 +214,9 @@ static bool tcp_fastopen_create_child(struct sock *sk,
        sk->sk_data_ready(sk);
        bh_unlock_sock(child);
        sock_put(child);
-       WARN_ON(req->sk == NULL);
+       WARN_ON(!req->sk);
        return true;
 }
-EXPORT_SYMBOL(tcp_fastopen_create_child);
 
 static bool tcp_fastopen_queue_check(struct sock *sk)
 {
@@ -238,14 +233,14 @@ static bool tcp_fastopen_queue_check(struct sock *sk)
         * temporarily vs a server not supporting Fast Open at all.
         */
        fastopenq = inet_csk(sk)->icsk_accept_queue.fastopenq;
-       if (fastopenq == NULL || fastopenq->max_qlen == 0)
+       if (!fastopenq || fastopenq->max_qlen == 0)
                return false;
 
        if (fastopenq->qlen >= fastopenq->max_qlen) {
                struct request_sock *req1;
                spin_lock(&fastopenq->lock);
                req1 = fastopenq->rskq_rst_head;
-               if ((req1 == NULL) || time_after(req1->expires, jiffies)) {
+               if (!req1 || time_after(req1->rsk_timer.expires, jiffies)) {
                        spin_unlock(&fastopenq->lock);
                        NET_INC_STATS_BH(sock_net(sk),
                                         LINUX_MIB_TCPFASTOPENLISTENOVERFLOW);
@@ -254,7 +249,7 @@ static bool tcp_fastopen_queue_check(struct sock *sk)
                fastopenq->rskq_rst_head = req1->dl_next;
                fastopenq->qlen--;
                spin_unlock(&fastopenq->lock);
-               reqsk_free(req1);
+               reqsk_put(req1);
        }
        return true;
 }
index fb4cf8b8e121acd4bffcf2fdfbd7e03c76bad7cc..c1ce304ba8d2b63930a8e70d11bd3b83370c134c 100644 (file)
@@ -866,7 +866,7 @@ static void tcp_update_reordering(struct sock *sk, const int metric,
 /* This must be called before lost_out is incremented */
 static void tcp_verify_retransmit_hint(struct tcp_sock *tp, struct sk_buff *skb)
 {
-       if ((tp->retransmit_skb_hint == NULL) ||
+       if (!tp->retransmit_skb_hint ||
            before(TCP_SKB_CB(skb)->seq,
                   TCP_SKB_CB(tp->retransmit_skb_hint)->seq))
                tp->retransmit_skb_hint = skb;
@@ -1256,7 +1256,7 @@ static u8 tcp_sacktag_one(struct sock *sk,
                fack_count += pcount;
 
                /* Lost marker hint past SACKed? Tweak RFC3517 cnt */
-               if (!tcp_is_fack(tp) && (tp->lost_skb_hint != NULL) &&
+               if (!tcp_is_fack(tp) && tp->lost_skb_hint &&
                    before(start_seq, TCP_SKB_CB(tp->lost_skb_hint)->seq))
                        tp->lost_cnt_hint += pcount;
 
@@ -1535,7 +1535,7 @@ static struct sk_buff *tcp_sacktag_walk(struct sk_buff *skb, struct sock *sk,
                if (!before(TCP_SKB_CB(skb)->seq, end_seq))
                        break;
 
-               if ((next_dup != NULL) &&
+               if (next_dup  &&
                    before(TCP_SKB_CB(skb)->seq, next_dup->end_seq)) {
                        in_sack = tcp_match_skb_to_sack(sk, skb,
                                                        next_dup->start_seq,
@@ -1551,7 +1551,7 @@ static struct sk_buff *tcp_sacktag_walk(struct sk_buff *skb, struct sock *sk,
                if (in_sack <= 0) {
                        tmp = tcp_shift_skb_data(sk, skb, state,
                                                 start_seq, end_seq, dup_sack);
-                       if (tmp != NULL) {
+                       if (tmp) {
                                if (tmp != skb) {
                                        skb = tmp;
                                        continue;
@@ -1614,7 +1614,7 @@ static struct sk_buff *tcp_maybe_skipping_dsack(struct sk_buff *skb,
                                                struct tcp_sacktag_state *state,
                                                u32 skip_to_seq)
 {
-       if (next_dup == NULL)
+       if (!next_dup)
                return skb;
 
        if (before(next_dup->start_seq, skip_to_seq)) {
@@ -1783,7 +1783,7 @@ tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb,
                        if (tcp_highest_sack_seq(tp) == cache->end_seq) {
                                /* ...but better entrypoint exists! */
                                skb = tcp_highest_sack(sk);
-                               if (skb == NULL)
+                               if (!skb)
                                        break;
                                state.fack_count = tp->fackets_out;
                                cache++;
@@ -1798,7 +1798,7 @@ tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb,
 
                if (!before(start_seq, tcp_highest_sack_seq(tp))) {
                        skb = tcp_highest_sack(sk);
-                       if (skb == NULL)
+                       if (!skb)
                                break;
                        state.fack_count = tp->fackets_out;
                }
@@ -3105,10 +3105,11 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
                        if (!first_ackt.v64)
                                first_ackt = last_ackt;
 
-                       if (!(sacked & TCPCB_SACKED_ACKED))
+                       if (!(sacked & TCPCB_SACKED_ACKED)) {
                                reord = min(pkts_acked, reord);
-                       if (!after(scb->end_seq, tp->high_seq))
-                               flag |= FLAG_ORIG_SACK_ACKED;
+                               if (!after(scb->end_seq, tp->high_seq))
+                                       flag |= FLAG_ORIG_SACK_ACKED;
+                       }
                }
 
                if (sacked & TCPCB_SACKED_ACKED)
@@ -3321,6 +3322,36 @@ static int tcp_ack_update_window(struct sock *sk, const struct sk_buff *skb, u32
        return flag;
 }
 
+/* Return true if we're currently rate-limiting out-of-window ACKs and
+ * thus shouldn't send a dupack right now. We rate-limit dupacks in
+ * response to out-of-window SYNs or ACKs to mitigate ACK loops or DoS
+ * attacks that send repeated SYNs or ACKs for the same connection. To
+ * do this, we do not send a duplicate SYNACK or ACK if the remote
+ * endpoint is sending out-of-window SYNs or pure ACKs at a high rate.
+ */
+bool tcp_oow_rate_limited(struct net *net, const struct sk_buff *skb,
+                         int mib_idx, u32 *last_oow_ack_time)
+{
+       /* Data packets without SYNs are not likely part of an ACK loop. */
+       if ((TCP_SKB_CB(skb)->seq != TCP_SKB_CB(skb)->end_seq) &&
+           !tcp_hdr(skb)->syn)
+               goto not_rate_limited;
+
+       if (*last_oow_ack_time) {
+               s32 elapsed = (s32)(tcp_time_stamp - *last_oow_ack_time);
+
+               if (0 <= elapsed && elapsed < sysctl_tcp_invalid_ratelimit) {
+                       NET_INC_STATS_BH(net, mib_idx);
+                       return true;    /* rate-limited: don't send yet! */
+               }
+       }
+
+       *last_oow_ack_time = tcp_time_stamp;
+
+not_rate_limited:
+       return false;   /* not rate-limited: go ahead, send dupack now! */
+}
+
 /* RFC 5961 7 [ACK Throttling] */
 static void tcp_send_challenge_ack(struct sock *sk, const struct sk_buff *skb)
 {
@@ -3668,7 +3699,7 @@ void tcp_parse_options(const struct sk_buff *skb,
                                 */
                                if (opsize < TCPOLEN_EXP_FASTOPEN_BASE ||
                                    get_unaligned_be16(ptr) != TCPOPT_FASTOPEN_MAGIC ||
-                                   foc == NULL || !th->syn || (opsize & 1))
+                                   !foc || !th->syn || (opsize & 1))
                                        break;
                                foc->len = opsize - TCPOLEN_EXP_FASTOPEN_BASE;
                                if (foc->len >= TCP_FASTOPEN_COOKIE_MIN &&
@@ -4639,7 +4670,7 @@ static void tcp_collapse_ofo_queue(struct sock *sk)
        struct sk_buff *head;
        u32 start, end;
 
-       if (skb == NULL)
+       if (!skb)
                return;
 
        start = TCP_SKB_CB(skb)->seq;
@@ -5094,7 +5125,7 @@ void tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
 {
        struct tcp_sock *tp = tcp_sk(sk);
 
-       if (unlikely(sk->sk_rx_dst == NULL))
+       if (unlikely(!sk->sk_rx_dst))
                inet_csk(sk)->icsk_af_ops->sk_rx_dst_set(sk, skb);
        /*
         *      Header prediction.
@@ -5291,7 +5322,7 @@ void tcp_finish_connect(struct sock *sk, struct sk_buff *skb)
 
        tcp_set_state(sk, TCP_ESTABLISHED);
 
-       if (skb != NULL) {
+       if (skb) {
                icsk->icsk_af_ops->sk_rx_dst_set(sk, skb);
                security_inet_conn_established(sk, skb);
        }
@@ -5660,11 +5691,11 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
        }
 
        req = tp->fastopen_rsk;
-       if (req != NULL) {
+       if (req) {
                WARN_ON_ONCE(sk->sk_state != TCP_SYN_RECV &&
                    sk->sk_state != TCP_FIN_WAIT1);
 
-               if (tcp_check_req(sk, skb, req, NULL, true) == NULL)
+               if (!tcp_check_req(sk, skb, req, true))
                        goto discard;
        }
 
@@ -5750,7 +5781,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
                 * ACK we have received, this would have acknowledged
                 * our SYNACK so stop the SYNACK timer.
                 */
-               if (req != NULL) {
+               if (req) {
                        /* Return RST if ack_seq is invalid.
                         * Note that RFC793 only says to generate a
                         * DUPACK for it but for TCP Fast Open it seems
@@ -5912,6 +5943,80 @@ static void tcp_ecn_create_request(struct request_sock *req,
                inet_rsk(req)->ecn_ok = 1;
 }
 
+static void tcp_openreq_init(struct request_sock *req,
+                            const struct tcp_options_received *rx_opt,
+                            struct sk_buff *skb, const struct sock *sk)
+{
+       struct inet_request_sock *ireq = inet_rsk(req);
+
+       req->rcv_wnd = 0;               /* So that tcp_send_synack() knows! */
+       req->cookie_ts = 0;
+       tcp_rsk(req)->rcv_isn = TCP_SKB_CB(skb)->seq;
+       tcp_rsk(req)->rcv_nxt = TCP_SKB_CB(skb)->seq + 1;
+       tcp_rsk(req)->snt_synack = tcp_time_stamp;
+       tcp_rsk(req)->last_oow_ack_time = 0;
+       req->mss = rx_opt->mss_clamp;
+       req->ts_recent = rx_opt->saw_tstamp ? rx_opt->rcv_tsval : 0;
+       ireq->tstamp_ok = rx_opt->tstamp_ok;
+       ireq->sack_ok = rx_opt->sack_ok;
+       ireq->snd_wscale = rx_opt->snd_wscale;
+       ireq->wscale_ok = rx_opt->wscale_ok;
+       ireq->acked = 0;
+       ireq->ecn_ok = 0;
+       ireq->ir_rmt_port = tcp_hdr(skb)->source;
+       ireq->ir_num = ntohs(tcp_hdr(skb)->dest);
+       ireq->ir_mark = inet_request_mark(sk, skb);
+}
+
+struct request_sock *inet_reqsk_alloc(const struct request_sock_ops *ops,
+                                     struct sock *sk_listener)
+{
+       struct request_sock *req = reqsk_alloc(ops, sk_listener);
+
+       if (req) {
+               struct inet_request_sock *ireq = inet_rsk(req);
+
+               kmemcheck_annotate_bitfield(ireq, flags);
+               ireq->opt = NULL;
+               atomic64_set(&ireq->ir_cookie, 0);
+               ireq->ireq_state = TCP_NEW_SYN_RECV;
+               write_pnet(&ireq->ireq_net, sock_net(sk_listener));
+               ireq->ireq_family = sk_listener->sk_family;
+       }
+
+       return req;
+}
+EXPORT_SYMBOL(inet_reqsk_alloc);
+
+/*
+ * Return true if a syncookie should be sent
+ */
+static bool tcp_syn_flood_action(struct sock *sk,
+                                const struct sk_buff *skb,
+                                const char *proto)
+{
+       const char *msg = "Dropping request";
+       bool want_cookie = false;
+       struct listen_sock *lopt;
+
+#ifdef CONFIG_SYN_COOKIES
+       if (sysctl_tcp_syncookies) {
+               msg = "Sending cookies";
+               want_cookie = true;
+               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDOCOOKIES);
+       } else
+#endif
+               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDROP);
+
+       lopt = inet_csk(sk)->icsk_accept_queue.listen_opt;
+       if (!lopt->synflood_warned && sysctl_tcp_syncookies != 2) {
+               lopt->synflood_warned = 1;
+               pr_info("%s: Possible SYN flooding on port %d. %s.  Check SNMP counters.\n",
+                       proto, ntohs(tcp_hdr(skb)->dest), msg);
+       }
+       return want_cookie;
+}
+
 int tcp_conn_request(struct request_sock_ops *rsk_ops,
                     const struct tcp_request_sock_ops *af_ops,
                     struct sock *sk, struct sk_buff *skb)
@@ -5949,7 +6054,7 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops,
                goto drop;
        }
 
-       req = inet_reqsk_alloc(rsk_ops);
+       req = inet_reqsk_alloc(rsk_ops, sk);
        if (!req)
                goto drop;
 
@@ -5966,6 +6071,9 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops,
        tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
        tcp_openreq_init(req, &tmp_opt, skb, sk);
 
+       /* Note: tcp_v6_init_req() might override ir_iif for link locals */
+       inet_rsk(req)->ir_iif = sk->sk_bound_dev_if;
+
        af_ops->init_req(req, sk, skb);
 
        if (security_inet_conn_request(sk, skb, req))
@@ -6038,7 +6146,7 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops,
                if (err || want_cookie)
                        goto drop_and_free;
 
-               tcp_rsk(req)->listener = NULL;
+               tcp_rsk(req)->tfo_listener = false;
                af_ops->queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
        }
 
index f1756ee022078d12e74be5d245fc1a6a0c1c4a34..560f9571f7c43957b5996957ebed5f093fff6d75 100644 (file)
@@ -122,7 +122,7 @@ int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
           and use initial timestamp retrieved from peer table.
         */
        if (tcptw->tw_ts_recent_stamp &&
-           (twp == NULL || (sysctl_tcp_tw_reuse &&
+           (!twp || (sysctl_tcp_tw_reuse &&
                             get_seconds() - tcptw->tw_ts_recent_stamp > 1))) {
                tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
                if (tp->write_seq == 0)
@@ -189,7 +189,7 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
 
        if (!inet->inet_saddr)
                inet->inet_saddr = fl4->saddr;
-       inet->inet_rcv_saddr = inet->inet_saddr;
+       sk_rcv_saddr_set(sk, inet->inet_saddr);
 
        if (tp->rx_opt.ts_recent_stamp && inet->inet_daddr != daddr) {
                /* Reset inherited state */
@@ -204,7 +204,7 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
                tcp_fetch_timewait_stamp(sk, &rt->dst);
 
        inet->inet_dport = usin->sin_port;
-       inet->inet_daddr = daddr;
+       sk_daddr_set(sk, daddr);
 
        inet_csk(sk)->icsk_ext_hdr_len = 0;
        if (inet_opt)
@@ -310,6 +310,34 @@ static void do_redirect(struct sk_buff *skb, struct sock *sk)
                dst->ops->redirect(dst, sk, skb);
 }
 
+
+/* handle ICMP messages on TCP_NEW_SYN_RECV request sockets */
+void tcp_req_err(struct sock *sk, u32 seq)
+{
+       struct request_sock *req = inet_reqsk(sk);
+       struct net *net = sock_net(sk);
+
+       /* ICMPs are not backlogged, hence we cannot get
+        * an established socket here.
+        */
+       WARN_ON(req->sk);
+
+       if (seq != tcp_rsk(req)->snt_isn) {
+               NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
+               reqsk_put(req);
+       } else {
+               /*
+                * Still in SYN_RECV, just remove it silently.
+                * There is no good way to pass the error to the newly
+                * created socket, and POSIX does not want network
+                * errors returned from accept().
+                */
+               NET_INC_STATS_BH(net, LINUX_MIB_LISTENDROPS);
+               inet_csk_reqsk_queue_drop(req->rsk_listener, req);
+       }
+}
+EXPORT_SYMBOL(tcp_req_err);
+
 /*
  * This routine is called by the ICMP module when it gets some
  * sort of error condition.  If err < 0 then the socket should
@@ -343,8 +371,9 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
        int err;
        struct net *net = dev_net(icmp_skb->dev);
 
-       sk = inet_lookup(net, &tcp_hashinfo, iph->daddr, th->dest,
-                       iph->saddr, th->source, inet_iif(icmp_skb));
+       sk = __inet_lookup_established(net, &tcp_hashinfo, iph->daddr,
+                                      th->dest, iph->saddr, ntohs(th->source),
+                                      inet_iif(icmp_skb));
        if (!sk) {
                ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
                return;
@@ -353,6 +382,9 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
                inet_twsk_put(inet_twsk(sk));
                return;
        }
+       seq = ntohl(th->seq);
+       if (sk->sk_state == TCP_NEW_SYN_RECV)
+               return tcp_req_err(sk, seq);
 
        bh_lock_sock(sk);
        /* If too many ICMPs get dropped on busy
@@ -374,7 +406,6 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
 
        icsk = inet_csk(sk);
        tp = tcp_sk(sk);
-       seq = ntohl(th->seq);
        /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
        fastopen = tp->fastopen_rsk;
        snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
@@ -458,42 +489,12 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
        }
 
        switch (sk->sk_state) {
-               struct request_sock *req, **prev;
-       case TCP_LISTEN:
-               if (sock_owned_by_user(sk))
-                       goto out;
-
-               req = inet_csk_search_req(sk, &prev, th->dest,
-                                         iph->daddr, iph->saddr);
-               if (!req)
-                       goto out;
-
-               /* ICMPs are not backlogged, hence we cannot get
-                  an established socket here.
-                */
-               WARN_ON(req->sk);
-
-               if (seq != tcp_rsk(req)->snt_isn) {
-                       NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
-                       goto out;
-               }
-
-               /*
-                * Still in SYN_RECV, just remove it silently.
-                * There is no good way to pass the error to the newly
-                * created socket, and POSIX does not want network
-                * errors returned from accept().
-                */
-               inet_csk_reqsk_queue_drop(sk, req, prev);
-               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
-               goto out;
-
        case TCP_SYN_SENT:
        case TCP_SYN_RECV:
                /* Only in fast or simultaneous open. If a fast open socket is
                 * is already accepted it is treated as a connected one below.
                 */
-               if (fastopen && fastopen->sk == NULL)
+               if (fastopen && !fastopen->sk)
                        break;
 
                if (!sock_owned_by_user(sk)) {
@@ -647,7 +648,7 @@ static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
                if (!key)
                        goto release_sk1;
 
-               genhash = tcp_v4_md5_hash_skb(newhash, key, NULL, NULL, skb);
+               genhash = tcp_v4_md5_hash_skb(newhash, key, NULL, skb);
                if (genhash || memcmp(hash_location, newhash, 16) != 0)
                        goto release_sk1;
        } else {
@@ -855,35 +856,6 @@ static void tcp_v4_reqsk_destructor(struct request_sock *req)
        kfree(inet_rsk(req)->opt);
 }
 
-/*
- * Return true if a syncookie should be sent
- */
-bool tcp_syn_flood_action(struct sock *sk,
-                        const struct sk_buff *skb,
-                        const char *proto)
-{
-       const char *msg = "Dropping request";
-       bool want_cookie = false;
-       struct listen_sock *lopt;
-
-#ifdef CONFIG_SYN_COOKIES
-       if (sysctl_tcp_syncookies) {
-               msg = "Sending cookies";
-               want_cookie = true;
-               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDOCOOKIES);
-       } else
-#endif
-               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDROP);
-
-       lopt = inet_csk(sk)->icsk_accept_queue.listen_opt;
-       if (!lopt->synflood_warned && sysctl_tcp_syncookies != 2) {
-               lopt->synflood_warned = 1;
-               pr_info("%s: Possible SYN flooding on port %d. %s.  Check SNMP counters.\n",
-                       proto, ntohs(tcp_hdr(skb)->dest), msg);
-       }
-       return want_cookie;
-}
-EXPORT_SYMBOL(tcp_syn_flood_action);
 
 #ifdef CONFIG_TCP_MD5SIG
 /*
@@ -897,10 +869,10 @@ struct tcp_md5sig_key *tcp_md5_do_lookup(struct sock *sk,
                                         const union tcp_md5_addr *addr,
                                         int family)
 {
-       struct tcp_sock *tp = tcp_sk(sk);
+       const struct tcp_sock *tp = tcp_sk(sk);
        struct tcp_md5sig_key *key;
        unsigned int size = sizeof(struct in_addr);
-       struct tcp_md5sig_info *md5sig;
+       const struct tcp_md5sig_info *md5sig;
 
        /* caller either holds rcu_read_lock() or socket lock */
        md5sig = rcu_dereference_check(tp->md5sig_info,
@@ -923,24 +895,15 @@ struct tcp_md5sig_key *tcp_md5_do_lookup(struct sock *sk,
 EXPORT_SYMBOL(tcp_md5_do_lookup);
 
 struct tcp_md5sig_key *tcp_v4_md5_lookup(struct sock *sk,
-                                        struct sock *addr_sk)
+                                        const struct sock *addr_sk)
 {
        union tcp_md5_addr *addr;
 
-       addr = (union tcp_md5_addr *)&inet_sk(addr_sk)->inet_daddr;
+       addr = (union tcp_md5_addr *)&sk->sk_daddr;
        return tcp_md5_do_lookup(sk, addr, AF_INET);
 }
 EXPORT_SYMBOL(tcp_v4_md5_lookup);
 
-static struct tcp_md5sig_key *tcp_v4_reqsk_md5_lookup(struct sock *sk,
-                                                     struct request_sock *req)
-{
-       union tcp_md5_addr *addr;
-
-       addr = (union tcp_md5_addr *)&inet_rsk(req)->ir_rmt_addr;
-       return tcp_md5_do_lookup(sk, addr, AF_INET);
-}
-
 /* This can be called on a newly created socket, from other files */
 int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
                   int family, const u8 *newkey, u8 newkeylen, gfp_t gfp)
@@ -1101,8 +1064,8 @@ clear_hash_noput:
        return 1;
 }
 
-int tcp_v4_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
-                       const struct sock *sk, const struct request_sock *req,
+int tcp_v4_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key,
+                       const struct sock *sk,
                        const struct sk_buff *skb)
 {
        struct tcp_md5sig_pool *hp;
@@ -1110,12 +1073,9 @@ int tcp_v4_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
        const struct tcphdr *th = tcp_hdr(skb);
        __be32 saddr, daddr;
 
-       if (sk) {
-               saddr = inet_sk(sk)->inet_saddr;
-               daddr = inet_sk(sk)->inet_daddr;
-       } else if (req) {
-               saddr = inet_rsk(req)->ir_loc_addr;
-               daddr = inet_rsk(req)->ir_rmt_addr;
+       if (sk) { /* valid for establish/request sockets */
+               saddr = sk->sk_rcv_saddr;
+               daddr = sk->sk_daddr;
        } else {
                const struct iphdr *iph = ip_hdr(skb);
                saddr = iph->saddr;
@@ -1152,8 +1112,9 @@ clear_hash_noput:
 }
 EXPORT_SYMBOL(tcp_v4_md5_hash_skb);
 
-static bool __tcp_v4_inbound_md5_hash(struct sock *sk,
-                                     const struct sk_buff *skb)
+/* Called with rcu_read_lock() */
+static bool tcp_v4_inbound_md5_hash(struct sock *sk,
+                                   const struct sk_buff *skb)
 {
        /*
         * This gets called for each TCP segment that arrives
@@ -1193,7 +1154,7 @@ static bool __tcp_v4_inbound_md5_hash(struct sock *sk,
         */
        genhash = tcp_v4_md5_hash_skb(newhash,
                                      hash_expected,
-                                     NULL, NULL, skb);
+                                     NULL, skb);
 
        if (genhash || memcmp(hash_location, newhash, 16) != 0) {
                net_info_ratelimited("MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n",
@@ -1205,28 +1166,16 @@ static bool __tcp_v4_inbound_md5_hash(struct sock *sk,
        }
        return false;
 }
-
-static bool tcp_v4_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
-{
-       bool ret;
-
-       rcu_read_lock();
-       ret = __tcp_v4_inbound_md5_hash(sk, skb);
-       rcu_read_unlock();
-
-       return ret;
-}
-
 #endif
 
-static void tcp_v4_init_req(struct request_sock *req, struct sock *sk,
+static void tcp_v4_init_req(struct request_sock *req, struct sock *sk_listener,
                            struct sk_buff *skb)
 {
        struct inet_request_sock *ireq = inet_rsk(req);
 
-       ireq->ir_loc_addr = ip_hdr(skb)->daddr;
-       ireq->ir_rmt_addr = ip_hdr(skb)->saddr;
-       ireq->no_srccheck = inet_sk(sk)->transparent;
+       sk_rcv_saddr_set(req_to_sk(req), ip_hdr(skb)->daddr);
+       sk_daddr_set(req_to_sk(req), ip_hdr(skb)->saddr);
+       ireq->no_srccheck = inet_sk(sk_listener)->transparent;
        ireq->opt = tcp_v4_save_options(skb);
 }
 
@@ -1259,7 +1208,7 @@ struct request_sock_ops tcp_request_sock_ops __read_mostly = {
 static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
        .mss_clamp      =       TCP_MSS_DEFAULT,
 #ifdef CONFIG_TCP_MD5SIG
-       .md5_lookup     =       tcp_v4_reqsk_md5_lookup,
+       .req_md5_lookup =       tcp_v4_md5_lookup,
        .calc_md5_hash  =       tcp_v4_md5_hash_skb,
 #endif
        .init_req       =       tcp_v4_init_req,
@@ -1318,8 +1267,8 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
        newtp                 = tcp_sk(newsk);
        newinet               = inet_sk(newsk);
        ireq                  = inet_rsk(req);
-       newinet->inet_daddr   = ireq->ir_rmt_addr;
-       newinet->inet_rcv_saddr = ireq->ir_loc_addr;
+       sk_daddr_set(newsk, ireq->ir_rmt_addr);
+       sk_rcv_saddr_set(newsk, ireq->ir_loc_addr);
        newinet->inet_saddr           = ireq->ir_loc_addr;
        inet_opt              = ireq->opt;
        rcu_assign_pointer(newinet->inet_opt, inet_opt);
@@ -1356,7 +1305,7 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
        /* Copy over the MD5 key from the original socket */
        key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&newinet->inet_daddr,
                                AF_INET);
-       if (key != NULL) {
+       if (key) {
                /*
                 * We're using one, so create a matching key
                 * on the newsk structure. If we fail to get
@@ -1391,15 +1340,17 @@ EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
 
 static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
 {
-       struct tcphdr *th = tcp_hdr(skb);
+       const struct tcphdr *th = tcp_hdr(skb);
        const struct iphdr *iph = ip_hdr(skb);
+       struct request_sock *req;
        struct sock *nsk;
-       struct request_sock **prev;
-       /* Find possible connection requests. */
-       struct request_sock *req = inet_csk_search_req(sk, &prev, th->source,
-                                                      iph->saddr, iph->daddr);
-       if (req)
-               return tcp_check_req(sk, skb, req, prev, false);
+
+       req = inet_csk_search_req(sk, th->source, iph->saddr, iph->daddr);
+       if (req) {
+               nsk = tcp_check_req(sk, skb, req, false);
+               reqsk_put(req);
+               return nsk;
+       }
 
        nsk = inet_lookup_established(sock_net(sk), &tcp_hashinfo, iph->saddr,
                        th->source, iph->daddr, th->dest, inet_iif(skb));
@@ -1439,7 +1390,7 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
                sk_mark_napi_id(sk, skb);
                if (dst) {
                        if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
-                           dst->ops->check(dst, 0) == NULL) {
+                           !dst->ops->check(dst, 0)) {
                                dst_release(dst);
                                sk->sk_rx_dst = NULL;
                        }
@@ -1517,7 +1468,7 @@ void tcp_v4_early_demux(struct sk_buff *skb)
        if (sk) {
                skb->sk = sk;
                skb->destructor = sock_edemux;
-               if (sk->sk_state != TCP_TIME_WAIT) {
+               if (sk_fullsock(sk)) {
                        struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
 
                        if (dst)
@@ -1846,7 +1797,7 @@ void tcp_v4_destroy_sock(struct sock *sk)
        if (inet_csk(sk)->icsk_bind_hash)
                inet_put_port(sk);
 
-       BUG_ON(tp->fastopen_rsk != NULL);
+       BUG_ON(tp->fastopen_rsk);
 
        /* If socket is aborted during connect operation */
        tcp_free_fastopen_req(tp);
@@ -1904,13 +1855,13 @@ get_req:
                }
                sk        = sk_nulls_next(st->syn_wait_sk);
                st->state = TCP_SEQ_STATE_LISTENING;
-               read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
+               spin_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
        } else {
                icsk = inet_csk(sk);
-               read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
+               spin_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
                if (reqsk_queue_len(&icsk->icsk_accept_queue))
                        goto start_req;
-               read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
+               spin_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
                sk = sk_nulls_next(sk);
        }
 get_sk:
@@ -1922,7 +1873,7 @@ get_sk:
                        goto out;
                }
                icsk = inet_csk(sk);
-               read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
+               spin_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
                if (reqsk_queue_len(&icsk->icsk_accept_queue)) {
 start_req:
                        st->uid         = sock_i_uid(sk);
@@ -1931,7 +1882,7 @@ start_req:
                        st->sbucket     = 0;
                        goto get_req;
                }
-               read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
+               spin_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
        }
        spin_unlock_bh(&ilb->lock);
        st->offset = 0;
@@ -2150,7 +2101,7 @@ static void tcp_seq_stop(struct seq_file *seq, void *v)
        case TCP_SEQ_STATE_OPENREQ:
                if (v) {
                        struct inet_connection_sock *icsk = inet_csk(st->syn_wait_sk);
-                       read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
+                       spin_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
                }
        case TCP_SEQ_STATE_LISTENING:
                if (v != SEQ_START_TOKEN)
@@ -2204,17 +2155,17 @@ void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo)
 }
 EXPORT_SYMBOL(tcp_proc_unregister);
 
-static void get_openreq4(const struct sock *sk, const struct request_sock *req,
+static void get_openreq4(const struct request_sock *req,
                         struct seq_file *f, int i, kuid_t uid)
 {
        const struct inet_request_sock *ireq = inet_rsk(req);
-       long delta = req->expires - jiffies;
+       long delta = req->rsk_timer.expires - jiffies;
 
        seq_printf(f, "%4d: %08X:%04X %08X:%04X"
                " %02X %08X:%08X %02X:%08lX %08X %5u %8d %u %d %pK",
                i,
                ireq->ir_loc_addr,
-               ntohs(inet_sk(sk)->inet_sport),
+               ireq->ir_num,
                ireq->ir_rmt_addr,
                ntohs(ireq->ir_rmt_port),
                TCP_SYN_RECV,
@@ -2225,7 +2176,7 @@ static void get_openreq4(const struct sock *sk, const struct request_sock *req,
                from_kuid_munged(seq_user_ns(f), uid),
                0,  /* non standard timer */
                0, /* open_requests have no inode */
-               atomic_read(&sk->sk_refcnt),
+               0,
                req);
 }
 
@@ -2332,7 +2283,7 @@ static int tcp4_seq_show(struct seq_file *seq, void *v)
                        get_tcp4_sock(v, seq, st->num);
                break;
        case TCP_SEQ_STATE_OPENREQ:
-               get_openreq4(st->syn_wait_sk, v, seq, st->num, st->uid);
+               get_openreq4(v, seq, st->num, st->uid);
                break;
        }
 out:
@@ -2460,6 +2411,8 @@ static int __net_init tcp_sk_init(struct net *net)
        }
        net->ipv4.sysctl_tcp_ecn = 2;
        net->ipv4.sysctl_tcp_base_mss = TCP_BASE_MSS;
+       net->ipv4.sysctl_tcp_probe_threshold = TCP_PROBE_THRESHOLD;
+       net->ipv4.sysctl_tcp_probe_interval = TCP_PROBE_INTERVAL;
        return 0;
 
 fail:
index e5f41bd5ec1bcfe88199ec077f1558917b1be61b..78ecc4a017128adf389c33418c937134f111e39d 100644 (file)
@@ -40,6 +40,7 @@ struct tcp_fastopen_metrics {
 
 struct tcp_metrics_block {
        struct tcp_metrics_block __rcu  *tcpm_next;
+       possible_net_t                  tcpm_net;
        struct inetpeer_addr            tcpm_saddr;
        struct inetpeer_addr            tcpm_daddr;
        unsigned long                   tcpm_stamp;
@@ -52,6 +53,11 @@ struct tcp_metrics_block {
        struct rcu_head                 rcu_head;
 };
 
+static inline struct net *tm_net(struct tcp_metrics_block *tm)
+{
+       return read_pnet(&tm->tcpm_net);
+}
+
 static bool tcp_metric_locked(struct tcp_metrics_block *tm,
                              enum tcp_metric_index idx)
 {
@@ -74,23 +80,20 @@ static void tcp_metric_set(struct tcp_metrics_block *tm,
 static bool addr_same(const struct inetpeer_addr *a,
                      const struct inetpeer_addr *b)
 {
-       const struct in6_addr *a6, *b6;
-
        if (a->family != b->family)
                return false;
        if (a->family == AF_INET)
                return a->addr.a4 == b->addr.a4;
-
-       a6 = (const struct in6_addr *) &a->addr.a6[0];
-       b6 = (const struct in6_addr *) &b->addr.a6[0];
-
-       return ipv6_addr_equal(a6, b6);
+       return ipv6_addr_equal(&a->addr.in6, &b->addr.in6);
 }
 
 struct tcpm_hash_bucket {
        struct tcp_metrics_block __rcu  *chain;
 };
 
+static struct tcpm_hash_bucket *tcp_metrics_hash __read_mostly;
+static unsigned int            tcp_metrics_hash_log __read_mostly;
+
 static DEFINE_SPINLOCK(tcp_metrics_lock);
 
 static void tcpm_suck_dst(struct tcp_metrics_block *tm,
@@ -143,6 +146,9 @@ static void tcpm_check_stamp(struct tcp_metrics_block *tm, struct dst_entry *dst
 #define TCP_METRICS_RECLAIM_DEPTH      5
 #define TCP_METRICS_RECLAIM_PTR                (struct tcp_metrics_block *) 0x1UL
 
+#define deref_locked(p)        \
+       rcu_dereference_protected(p, lockdep_is_held(&tcp_metrics_lock))
+
 static struct tcp_metrics_block *tcpm_new(struct dst_entry *dst,
                                          struct inetpeer_addr *saddr,
                                          struct inetpeer_addr *daddr,
@@ -171,9 +177,9 @@ static struct tcp_metrics_block *tcpm_new(struct dst_entry *dst,
        if (unlikely(reclaim)) {
                struct tcp_metrics_block *oldest;
 
-               oldest = rcu_dereference(net->ipv4.tcp_metrics_hash[hash].chain);
-               for (tm = rcu_dereference(oldest->tcpm_next); tm;
-                    tm = rcu_dereference(tm->tcpm_next)) {
+               oldest = deref_locked(tcp_metrics_hash[hash].chain);
+               for (tm = deref_locked(oldest->tcpm_next); tm;
+                    tm = deref_locked(tm->tcpm_next)) {
                        if (time_before(tm->tcpm_stamp, oldest->tcpm_stamp))
                                oldest = tm;
                }
@@ -183,14 +189,15 @@ static struct tcp_metrics_block *tcpm_new(struct dst_entry *dst,
                if (!tm)
                        goto out_unlock;
        }
+       write_pnet(&tm->tcpm_net, net);
        tm->tcpm_saddr = *saddr;
        tm->tcpm_daddr = *daddr;
 
        tcpm_suck_dst(tm, dst, true);
 
        if (likely(!reclaim)) {
-               tm->tcpm_next = net->ipv4.tcp_metrics_hash[hash].chain;
-               rcu_assign_pointer(net->ipv4.tcp_metrics_hash[hash].chain, tm);
+               tm->tcpm_next = tcp_metrics_hash[hash].chain;
+               rcu_assign_pointer(tcp_metrics_hash[hash].chain, tm);
        }
 
 out_unlock:
@@ -214,10 +221,11 @@ static struct tcp_metrics_block *__tcp_get_metrics(const struct inetpeer_addr *s
        struct tcp_metrics_block *tm;
        int depth = 0;
 
-       for (tm = rcu_dereference(net->ipv4.tcp_metrics_hash[hash].chain); tm;
+       for (tm = rcu_dereference(tcp_metrics_hash[hash].chain); tm;
             tm = rcu_dereference(tm->tcpm_next)) {
                if (addr_same(&tm->tcpm_saddr, saddr) &&
-                   addr_same(&tm->tcpm_daddr, daddr))
+                   addr_same(&tm->tcpm_daddr, daddr) &&
+                   net_eq(tm_net(tm), net))
                        break;
                depth++;
        }
@@ -242,8 +250,8 @@ static struct tcp_metrics_block *__tcp_get_metrics_req(struct request_sock *req,
                break;
 #if IS_ENABLED(CONFIG_IPV6)
        case AF_INET6:
-               *(struct in6_addr *)saddr.addr.a6 = inet_rsk(req)->ir_v6_loc_addr;
-               *(struct in6_addr *)daddr.addr.a6 = inet_rsk(req)->ir_v6_rmt_addr;
+               saddr.addr.in6 = inet_rsk(req)->ir_v6_loc_addr;
+               daddr.addr.in6 = inet_rsk(req)->ir_v6_rmt_addr;
                hash = ipv6_addr_hash(&inet_rsk(req)->ir_v6_rmt_addr);
                break;
 #endif
@@ -252,12 +260,14 @@ static struct tcp_metrics_block *__tcp_get_metrics_req(struct request_sock *req,
        }
 
        net = dev_net(dst->dev);
-       hash = hash_32(hash, net->ipv4.tcp_metrics_hash_log);
+       hash ^= net_hash_mix(net);
+       hash = hash_32(hash, tcp_metrics_hash_log);
 
-       for (tm = rcu_dereference(net->ipv4.tcp_metrics_hash[hash].chain); tm;
+       for (tm = rcu_dereference(tcp_metrics_hash[hash].chain); tm;
             tm = rcu_dereference(tm->tcpm_next)) {
                if (addr_same(&tm->tcpm_saddr, &saddr) &&
-                   addr_same(&tm->tcpm_daddr, &daddr))
+                   addr_same(&tm->tcpm_daddr, &daddr) &&
+                   net_eq(tm_net(tm), net))
                        break;
        }
        tcpm_check_stamp(tm, dst);
@@ -288,9 +298,9 @@ static struct tcp_metrics_block *__tcp_get_metrics_tw(struct inet_timewait_sock
                        hash = (__force unsigned int) daddr.addr.a4;
                } else {
                        saddr.family = AF_INET6;
-                       *(struct in6_addr *)saddr.addr.a6 = tw->tw_v6_rcv_saddr;
+                       saddr.addr.in6 = tw->tw_v6_rcv_saddr;
                        daddr.family = AF_INET6;
-                       *(struct in6_addr *)daddr.addr.a6 = tw->tw_v6_daddr;
+                       daddr.addr.in6 = tw->tw_v6_daddr;
                        hash = ipv6_addr_hash(&tw->tw_v6_daddr);
                }
        }
@@ -299,12 +309,14 @@ static struct tcp_metrics_block *__tcp_get_metrics_tw(struct inet_timewait_sock
                return NULL;
 
        net = twsk_net(tw);
-       hash = hash_32(hash, net->ipv4.tcp_metrics_hash_log);
+       hash ^= net_hash_mix(net);
+       hash = hash_32(hash, tcp_metrics_hash_log);
 
-       for (tm = rcu_dereference(net->ipv4.tcp_metrics_hash[hash].chain); tm;
+       for (tm = rcu_dereference(tcp_metrics_hash[hash].chain); tm;
             tm = rcu_dereference(tm->tcpm_next)) {
                if (addr_same(&tm->tcpm_saddr, &saddr) &&
-                   addr_same(&tm->tcpm_daddr, &daddr))
+                   addr_same(&tm->tcpm_daddr, &daddr) &&
+                   net_eq(tm_net(tm), net))
                        break;
        }
        return tm;
@@ -336,9 +348,9 @@ static struct tcp_metrics_block *tcp_get_metrics(struct sock *sk,
                        hash = (__force unsigned int) daddr.addr.a4;
                } else {
                        saddr.family = AF_INET6;
-                       *(struct in6_addr *)saddr.addr.a6 = sk->sk_v6_rcv_saddr;
+                       saddr.addr.in6 = sk->sk_v6_rcv_saddr;
                        daddr.family = AF_INET6;
-                       *(struct in6_addr *)daddr.addr.a6 = sk->sk_v6_daddr;
+                       daddr.addr.in6 = sk->sk_v6_daddr;
                        hash = ipv6_addr_hash(&sk->sk_v6_daddr);
                }
        }
@@ -347,7 +359,8 @@ static struct tcp_metrics_block *tcp_get_metrics(struct sock *sk,
                return NULL;
 
        net = dev_net(dst->dev);
-       hash = hash_32(hash, net->ipv4.tcp_metrics_hash_log);
+       hash ^= net_hash_mix(net);
+       hash = hash_32(hash, tcp_metrics_hash_log);
 
        tm = __tcp_get_metrics(&saddr, &daddr, net, hash);
        if (tm == TCP_METRICS_RECLAIM_PTR)
@@ -492,7 +505,7 @@ void tcp_init_metrics(struct sock *sk)
        struct tcp_metrics_block *tm;
        u32 val, crtt = 0; /* cached RTT scaled by 8 */
 
-       if (dst == NULL)
+       if (!dst)
                goto reset;
 
        dst_confirm(dst);
@@ -773,19 +786,19 @@ static int tcp_metrics_fill_info(struct sk_buff *msg,
 
        switch (tm->tcpm_daddr.family) {
        case AF_INET:
-               if (nla_put_be32(msg, TCP_METRICS_ATTR_ADDR_IPV4,
-                               tm->tcpm_daddr.addr.a4) < 0)
+               if (nla_put_in_addr(msg, TCP_METRICS_ATTR_ADDR_IPV4,
+                                   tm->tcpm_daddr.addr.a4) < 0)
                        goto nla_put_failure;
-               if (nla_put_be32(msg, TCP_METRICS_ATTR_SADDR_IPV4,
-                               tm->tcpm_saddr.addr.a4) < 0)
+               if (nla_put_in_addr(msg, TCP_METRICS_ATTR_SADDR_IPV4,
+                                   tm->tcpm_saddr.addr.a4) < 0)
                        goto nla_put_failure;
                break;
        case AF_INET6:
-               if (nla_put(msg, TCP_METRICS_ATTR_ADDR_IPV6, 16,
-                           tm->tcpm_daddr.addr.a6) < 0)
+               if (nla_put_in6_addr(msg, TCP_METRICS_ATTR_ADDR_IPV6,
+                                    &tm->tcpm_daddr.addr.in6) < 0)
                        goto nla_put_failure;
-               if (nla_put(msg, TCP_METRICS_ATTR_SADDR_IPV6, 16,
-                           tm->tcpm_saddr.addr.a6) < 0)
+               if (nla_put_in6_addr(msg, TCP_METRICS_ATTR_SADDR_IPV6,
+                                    &tm->tcpm_saddr.addr.in6) < 0)
                        goto nla_put_failure;
                break;
        default:
@@ -898,17 +911,19 @@ static int tcp_metrics_nl_dump(struct sk_buff *skb,
                               struct netlink_callback *cb)
 {
        struct net *net = sock_net(skb->sk);
-       unsigned int max_rows = 1U << net->ipv4.tcp_metrics_hash_log;
+       unsigned int max_rows = 1U << tcp_metrics_hash_log;
        unsigned int row, s_row = cb->args[0];
        int s_col = cb->args[1], col = s_col;
 
        for (row = s_row; row < max_rows; row++, s_col = 0) {
                struct tcp_metrics_block *tm;
-               struct tcpm_hash_bucket *hb = net->ipv4.tcp_metrics_hash + row;
+               struct tcpm_hash_bucket *hb = tcp_metrics_hash + row;
 
                rcu_read_lock();
                for (col = 0, tm = rcu_dereference(hb->chain); tm;
                     tm = rcu_dereference(tm->tcpm_next), col++) {
+                       if (!net_eq(tm_net(tm), net))
+                               continue;
                        if (col < s_col)
                                continue;
                        if (tcp_metrics_dump_info(skb, cb, tm) < 0) {
@@ -933,7 +948,7 @@ static int __parse_nl_addr(struct genl_info *info, struct inetpeer_addr *addr,
        a = info->attrs[v4];
        if (a) {
                addr->family = AF_INET;
-               addr->addr.a4 = nla_get_be32(a);
+               addr->addr.a4 = nla_get_in_addr(a);
                if (hash)
                        *hash = (__force unsigned int) addr->addr.a4;
                return 0;
@@ -943,9 +958,9 @@ static int __parse_nl_addr(struct genl_info *info, struct inetpeer_addr *addr,
                if (nla_len(a) != sizeof(struct in6_addr))
                        return -EINVAL;
                addr->family = AF_INET6;
-               memcpy(addr->addr.a6, nla_data(a), sizeof(addr->addr.a6));
+               addr->addr.in6 = nla_get_in6_addr(a);
                if (hash)
-                       *hash = ipv6_addr_hash((struct in6_addr *) addr->addr.a6);
+                       *hash = ipv6_addr_hash(&addr->addr.in6);
                return 0;
        }
        return optional ? 1 : -EAFNOSUPPORT;
@@ -994,13 +1009,15 @@ static int tcp_metrics_nl_cmd_get(struct sk_buff *skb, struct genl_info *info)
        if (!reply)
                goto nla_put_failure;
 
-       hash = hash_32(hash, net->ipv4.tcp_metrics_hash_log);
+       hash ^= net_hash_mix(net);
+       hash = hash_32(hash, tcp_metrics_hash_log);
        ret = -ESRCH;
        rcu_read_lock();
-       for (tm = rcu_dereference(net->ipv4.tcp_metrics_hash[hash].chain); tm;
+       for (tm = rcu_dereference(tcp_metrics_hash[hash].chain); tm;
             tm = rcu_dereference(tm->tcpm_next)) {
                if (addr_same(&tm->tcpm_daddr, &daddr) &&
-                   (!src || addr_same(&tm->tcpm_saddr, &saddr))) {
+                   (!src || addr_same(&tm->tcpm_saddr, &saddr)) &&
+                   net_eq(tm_net(tm), net)) {
                        ret = tcp_metrics_fill_info(msg, tm);
                        break;
                }
@@ -1020,34 +1037,27 @@ out_free:
        return ret;
 }
 
-#define deref_locked_genl(p)   \
-       rcu_dereference_protected(p, lockdep_genl_is_held() && \
-                                    lockdep_is_held(&tcp_metrics_lock))
-
-#define deref_genl(p)  rcu_dereference_protected(p, lockdep_genl_is_held())
-
-static int tcp_metrics_flush_all(struct net *net)
+static void tcp_metrics_flush_all(struct net *net)
 {
-       unsigned int max_rows = 1U << net->ipv4.tcp_metrics_hash_log;
-       struct tcpm_hash_bucket *hb = net->ipv4.tcp_metrics_hash;
+       unsigned int max_rows = 1U << tcp_metrics_hash_log;
+       struct tcpm_hash_bucket *hb = tcp_metrics_hash;
        struct tcp_metrics_block *tm;
        unsigned int row;
 
        for (row = 0; row < max_rows; row++, hb++) {
+               struct tcp_metrics_block __rcu **pp;
                spin_lock_bh(&tcp_metrics_lock);
-               tm = deref_locked_genl(hb->chain);
-               if (tm)
-                       hb->chain = NULL;
-               spin_unlock_bh(&tcp_metrics_lock);
-               while (tm) {
-                       struct tcp_metrics_block *next;
-
-                       next = deref_genl(tm->tcpm_next);
-                       kfree_rcu(tm, rcu_head);
-                       tm = next;
+               pp = &hb->chain;
+               for (tm = deref_locked(*pp); tm; tm = deref_locked(*pp)) {
+                       if (net_eq(tm_net(tm), net)) {
+                               *pp = tm->tcpm_next;
+                               kfree_rcu(tm, rcu_head);
+                       } else {
+                               pp = &tm->tcpm_next;
+                       }
                }
+               spin_unlock_bh(&tcp_metrics_lock);
        }
-       return 0;
 }
 
 static int tcp_metrics_nl_cmd_del(struct sk_buff *skb, struct genl_info *info)
@@ -1064,19 +1074,23 @@ static int tcp_metrics_nl_cmd_del(struct sk_buff *skb, struct genl_info *info)
        ret = parse_nl_addr(info, &daddr, &hash, 1);
        if (ret < 0)
                return ret;
-       if (ret > 0)
-               return tcp_metrics_flush_all(net);
+       if (ret > 0) {
+               tcp_metrics_flush_all(net);
+               return 0;
+       }
        ret = parse_nl_saddr(info, &saddr);
        if (ret < 0)
                src = false;
 
-       hash = hash_32(hash, net->ipv4.tcp_metrics_hash_log);
-       hb = net->ipv4.tcp_metrics_hash + hash;
+       hash ^= net_hash_mix(net);
+       hash = hash_32(hash, tcp_metrics_hash_log);
+       hb = tcp_metrics_hash + hash;
        pp = &hb->chain;
        spin_lock_bh(&tcp_metrics_lock);
-       for (tm = deref_locked_genl(*pp); tm; tm = deref_locked_genl(*pp)) {
+       for (tm = deref_locked(*pp); tm; tm = deref_locked(*pp)) {
                if (addr_same(&tm->tcpm_daddr, &daddr) &&
-                   (!src || addr_same(&tm->tcpm_saddr, &saddr))) {
+                   (!src || addr_same(&tm->tcpm_saddr, &saddr)) &&
+                   net_eq(tm_net(tm), net)) {
                        *pp = tm->tcpm_next;
                        kfree_rcu(tm, rcu_head);
                        found = true;
@@ -1126,6 +1140,9 @@ static int __net_init tcp_net_metrics_init(struct net *net)
        size_t size;
        unsigned int slots;
 
+       if (!net_eq(net, &init_net))
+               return 0;
+
        slots = tcpmhash_entries;
        if (!slots) {
                if (totalram_pages >= 128 * 1024)
@@ -1134,14 +1151,14 @@ static int __net_init tcp_net_metrics_init(struct net *net)
                        slots = 8 * 1024;
        }
 
-       net->ipv4.tcp_metrics_hash_log = order_base_2(slots);
-       size = sizeof(struct tcpm_hash_bucket) << net->ipv4.tcp_metrics_hash_log;
+       tcp_metrics_hash_log = order_base_2(slots);
+       size = sizeof(struct tcpm_hash_bucket) << tcp_metrics_hash_log;
 
-       net->ipv4.tcp_metrics_hash = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
-       if (!net->ipv4.tcp_metrics_hash)
-               net->ipv4.tcp_metrics_hash = vzalloc(size);
+       tcp_metrics_hash = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
+       if (!tcp_metrics_hash)
+               tcp_metrics_hash = vzalloc(size);
 
-       if (!net->ipv4.tcp_metrics_hash)
+       if (!tcp_metrics_hash)
                return -ENOMEM;
 
        return 0;
@@ -1149,19 +1166,7 @@ static int __net_init tcp_net_metrics_init(struct net *net)
 
 static void __net_exit tcp_net_metrics_exit(struct net *net)
 {
-       unsigned int i;
-
-       for (i = 0; i < (1U << net->ipv4.tcp_metrics_hash_log) ; i++) {
-               struct tcp_metrics_block *tm, *next;
-
-               tm = rcu_dereference_protected(net->ipv4.tcp_metrics_hash[i].chain, 1);
-               while (tm) {
-                       next = rcu_dereference_protected(tm->tcpm_next, 1);
-                       kfree(tm);
-                       tm = next;
-               }
-       }
-       kvfree(net->ipv4.tcp_metrics_hash);
+       tcp_metrics_flush_all(net);
 }
 
 static __net_initdata struct pernet_operations tcp_net_metrics_ops = {
@@ -1175,16 +1180,10 @@ void __init tcp_metrics_init(void)
 
        ret = register_pernet_subsys(&tcp_net_metrics_ops);
        if (ret < 0)
-               goto cleanup;
+               panic("Could not allocate the tcp_metrics hash table\n");
+
        ret = genl_register_family_with_ops(&tcp_metrics_nl_family,
                                            tcp_metrics_nl_ops);
        if (ret < 0)
-               goto cleanup_subsys;
-       return;
-
-cleanup_subsys:
-       unregister_pernet_subsys(&tcp_net_metrics_ops);
-
-cleanup:
-       return;
+               panic("Could not register tcp_metrics generic netlink\n");
 }
index dd11ac7798c626d9abe3fbada06fadc99eafe378..d7003911c894075c209756a0ce26950a6a31aba4 100644 (file)
@@ -294,7 +294,7 @@ void tcp_time_wait(struct sock *sk, int state, int timeo)
        if (tcp_death_row.tw_count < tcp_death_row.sysctl_max_tw_buckets)
                tw = inet_twsk_alloc(sk, state);
 
-       if (tw != NULL) {
+       if (tw) {
                struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
                const int rto = (icsk->icsk_rto << 2) - (icsk->icsk_rto >> 1);
                struct inet_sock *inet = inet_sk(sk);
@@ -332,7 +332,7 @@ void tcp_time_wait(struct sock *sk, int state, int timeo)
                        struct tcp_md5sig_key *key;
                        tcptw->tw_md5_key = NULL;
                        key = tp->af_specific->md5_lookup(sk, sk);
-                       if (key != NULL) {
+                       if (key) {
                                tcptw->tw_md5_key = kmemdup(key, sizeof(*key), GFP_ATOMIC);
                                if (tcptw->tw_md5_key && !tcp_alloc_md5sig_pool())
                                        BUG();
@@ -454,7 +454,7 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req,
 {
        struct sock *newsk = inet_csk_clone_lock(sk, req, GFP_ATOMIC);
 
-       if (newsk != NULL) {
+       if (newsk) {
                const struct inet_request_sock *ireq = inet_rsk(req);
                struct tcp_request_sock *treq = tcp_rsk(req);
                struct inet_connection_sock *newicsk = inet_csk(newsk);
@@ -572,7 +572,6 @@ EXPORT_SYMBOL(tcp_create_openreq_child);
 
 struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
                           struct request_sock *req,
-                          struct request_sock **prev,
                           bool fastopen)
 {
        struct tcp_options_received tmp_opt;
@@ -630,8 +629,9 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
                                          &tcp_rsk(req)->last_oow_ack_time) &&
 
                    !inet_rtx_syn_ack(sk, req))
-                       req->expires = min(TCP_TIMEOUT_INIT << req->num_timeout,
-                                          TCP_RTO_MAX) + jiffies;
+                       mod_timer_pending(&req->rsk_timer, jiffies +
+                               min(TCP_TIMEOUT_INIT << req->num_timeout,
+                                   TCP_RTO_MAX));
                return NULL;
        }
 
@@ -763,10 +763,10 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
         * socket is created, wait for troubles.
         */
        child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL);
-       if (child == NULL)
+       if (!child)
                goto listen_overflow;
 
-       inet_csk_reqsk_queue_unlink(sk, req, prev);
+       inet_csk_reqsk_queue_unlink(sk, req);
        inet_csk_reqsk_queue_removed(sk, req);
 
        inet_csk_reqsk_queue_add(sk, req, child);
@@ -791,7 +791,7 @@ embryonic_reset:
                tcp_reset(sk);
        }
        if (!fastopen) {
-               inet_csk_reqsk_queue_drop(sk, req, prev);
+               inet_csk_reqsk_queue_drop(sk, req);
                NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
        }
        return NULL;
index 9d7930ba8e0f4c51c8d5434c5ea186b585ff3a35..3f7c2fca5431891f473c9ace152a6b3e04443008 100644 (file)
@@ -29,8 +29,8 @@ static void tcp_gso_tstamp(struct sk_buff *skb, unsigned int ts_seq,
        }
 }
 
-struct sk_buff *tcp4_gso_segment(struct sk_buff *skb,
-                                netdev_features_t features)
+static struct sk_buff *tcp4_gso_segment(struct sk_buff *skb,
+                                       netdev_features_t features)
 {
        if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
                return ERR_PTR(-EINVAL);
index 1db253e36045ac038d7449a06d312275535a8014..7404e5238e004395ce0a55a073c806076b2394c8 100644 (file)
@@ -565,7 +565,7 @@ static unsigned int tcp_syn_options(struct sock *sk, struct sk_buff *skb,
        opts->mss = tcp_advertise_mss(sk);
        remaining -= TCPOLEN_MSS_ALIGNED;
 
-       if (likely(sysctl_tcp_timestamps && *md5 == NULL)) {
+       if (likely(sysctl_tcp_timestamps && !*md5)) {
                opts->options |= OPTION_TS;
                opts->tsval = tcp_skb_timestamp(skb) + tp->tsoffset;
                opts->tsecr = tp->rx_opt.ts_recent;
@@ -601,15 +601,14 @@ static unsigned int tcp_synack_options(struct sock *sk,
                                   struct request_sock *req,
                                   unsigned int mss, struct sk_buff *skb,
                                   struct tcp_out_options *opts,
-                                  struct tcp_md5sig_key **md5,
+                                  const struct tcp_md5sig_key *md5,
                                   struct tcp_fastopen_cookie *foc)
 {
        struct inet_request_sock *ireq = inet_rsk(req);
        unsigned int remaining = MAX_TCP_OPTION_SPACE;
 
 #ifdef CONFIG_TCP_MD5SIG
-       *md5 = tcp_rsk(req)->af_specific->md5_lookup(sk, req);
-       if (*md5) {
+       if (md5) {
                opts->options |= OPTION_MD5;
                remaining -= TCPOLEN_MD5SIG_ALIGNED;
 
@@ -620,8 +619,6 @@ static unsigned int tcp_synack_options(struct sock *sk,
                 */
                ireq->tstamp_ok &= !ireq->sack_ok;
        }
-#else
-       *md5 = NULL;
 #endif
 
        /* We always send an MSS option. */
@@ -644,7 +641,7 @@ static unsigned int tcp_synack_options(struct sock *sk,
                if (unlikely(!ireq->tstamp_ok))
                        remaining -= TCPOLEN_SACKPERM_ALIGNED;
        }
-       if (foc != NULL && foc->len >= 0) {
+       if (foc && foc->len >= 0) {
                u32 need = TCPOLEN_EXP_FASTOPEN_BASE + foc->len;
                need = (need + 3) & ~3U;  /* Align to 32 bits */
                if (remaining >= need) {
@@ -989,7 +986,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
        if (md5) {
                sk_nocaps_add(sk, NETIF_F_GSO_MASK);
                tp->af_specific->calc_md5_hash(opts.hash_location,
-                                              md5, sk, NULL, skb);
+                                              md5, sk, skb);
        }
 #endif
 
@@ -1151,7 +1148,7 @@ int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len,
 
        /* Get a new skb... force flag on. */
        buff = sk_stream_alloc_skb(sk, nsize, gfp);
-       if (buff == NULL)
+       if (!buff)
                return -ENOMEM; /* We'll just try again later. */
 
        sk->sk_wmem_queued += buff->truesize;
@@ -1354,6 +1351,8 @@ void tcp_mtup_init(struct sock *sk)
                               icsk->icsk_af_ops->net_header_len;
        icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, net->ipv4.sysctl_tcp_base_mss);
        icsk->icsk_mtup.probe_size = 0;
+       if (icsk->icsk_mtup.enabled)
+               icsk->icsk_mtup.probe_timestamp = tcp_time_stamp;
 }
 EXPORT_SYMBOL(tcp_mtup_init);
 
@@ -1708,7 +1707,7 @@ static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len,
                return tcp_fragment(sk, skb, len, mss_now, gfp);
 
        buff = sk_stream_alloc_skb(sk, 0, gfp);
-       if (unlikely(buff == NULL))
+       if (unlikely(!buff))
                return -ENOMEM;
 
        sk->sk_wmem_queued += buff->truesize;
@@ -1752,20 +1751,23 @@ static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len,
 static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb,
                                 bool *is_cwnd_limited, u32 max_segs)
 {
-       struct tcp_sock *tp = tcp_sk(sk);
        const struct inet_connection_sock *icsk = inet_csk(sk);
-       u32 send_win, cong_win, limit, in_flight;
+       u32 age, send_win, cong_win, limit, in_flight;
+       struct tcp_sock *tp = tcp_sk(sk);
+       struct skb_mstamp now;
+       struct sk_buff *head;
        int win_divisor;
 
        if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
                goto send_now;
 
-       if (icsk->icsk_ca_state != TCP_CA_Open)
+       if (!((1 << icsk->icsk_ca_state) & (TCPF_CA_Open | TCPF_CA_CWR)))
                goto send_now;
 
-       /* Defer for less than two clock ticks. */
-       if (tp->tso_deferred &&
-           (((u32)jiffies << 1) >> 1) - (tp->tso_deferred >> 1) > 1)
+       /* Avoid bursty behavior by allowing defer
+        * only if the last write was recent.
+        */
+       if ((s32)(tcp_time_stamp - tp->lsndtime) > 0)
                goto send_now;
 
        in_flight = tcp_packets_in_flight(tp);
@@ -1807,11 +1809,14 @@ static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb,
                        goto send_now;
        }
 
-       /* Ok, it looks like it is advisable to defer.
-        * Do not rearm the timer if already set to not break TCP ACK clocking.
-        */
-       if (!tp->tso_deferred)
-               tp->tso_deferred = 1 | (jiffies << 1);
+       head = tcp_write_queue_head(sk);
+       skb_mstamp_get(&now);
+       age = skb_mstamp_us_delta(&now, &head->skb_mstamp);
+       /* If next ACK is likely to come too late (half srtt), do not defer */
+       if (age < (tp->srtt_us >> 4))
+               goto send_now;
+
+       /* Ok, it looks like it is advisable to defer. */
 
        if (cong_win < send_win && cong_win < skb->len)
                *is_cwnd_limited = true;
@@ -1819,10 +1824,34 @@ static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb,
        return true;
 
 send_now:
-       tp->tso_deferred = 0;
        return false;
 }
 
+static inline void tcp_mtu_check_reprobe(struct sock *sk)
+{
+       struct inet_connection_sock *icsk = inet_csk(sk);
+       struct tcp_sock *tp = tcp_sk(sk);
+       struct net *net = sock_net(sk);
+       u32 interval;
+       s32 delta;
+
+       interval = net->ipv4.sysctl_tcp_probe_interval;
+       delta = tcp_time_stamp - icsk->icsk_mtup.probe_timestamp;
+       if (unlikely(delta >= interval * HZ)) {
+               int mss = tcp_current_mss(sk);
+
+               /* Update current search range */
+               icsk->icsk_mtup.probe_size = 0;
+               icsk->icsk_mtup.search_high = tp->rx_opt.mss_clamp +
+                       sizeof(struct tcphdr) +
+                       icsk->icsk_af_ops->net_header_len;
+               icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, mss);
+
+               /* Update probe time stamp */
+               icsk->icsk_mtup.probe_timestamp = tcp_time_stamp;
+       }
+}
+
 /* Create a new MTU probe if we are ready.
  * MTU probe is regularly attempting to increase the path MTU by
  * deliberately sending larger packets.  This discovers routing
@@ -1837,11 +1866,13 @@ static int tcp_mtu_probe(struct sock *sk)
        struct tcp_sock *tp = tcp_sk(sk);
        struct inet_connection_sock *icsk = inet_csk(sk);
        struct sk_buff *skb, *nskb, *next;
+       struct net *net = sock_net(sk);
        int len;
        int probe_size;
        int size_needed;
        int copy;
        int mss_now;
+       int interval;
 
        /* Not currently probing/verifying,
         * not in recovery,
@@ -1854,12 +1885,25 @@ static int tcp_mtu_probe(struct sock *sk)
            tp->rx_opt.num_sacks || tp->rx_opt.dsack)
                return -1;
 
-       /* Very simple search strategy: just double the MSS. */
+       /* Use binary search for probe_size between tcp_mss_base,
+        * and current mss_clamp. if (search_high - search_low)
+        * smaller than a threshold, backoff from probing.
+        */
        mss_now = tcp_current_mss(sk);
-       probe_size = 2 * tp->mss_cache;
+       probe_size = tcp_mtu_to_mss(sk, (icsk->icsk_mtup.search_high +
+                                   icsk->icsk_mtup.search_low) >> 1);
        size_needed = probe_size + (tp->reordering + 1) * tp->mss_cache;
-       if (probe_size > tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_high)) {
-               /* TODO: set timer for probe_converge_event */
+       interval = icsk->icsk_mtup.search_high - icsk->icsk_mtup.search_low;
+       /* When misfortune happens, we are reprobing actively,
+        * and then reprobe timer has expired. We stick with current
+        * probing process by not resetting search range to its orignal.
+        */
+       if (probe_size > tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_high) ||
+               interval < net->ipv4.sysctl_tcp_probe_threshold) {
+               /* Check whether enough time has elaplased for
+                * another round of probing.
+                */
+               tcp_mtu_check_reprobe(sk);
                return -1;
        }
 
@@ -1881,7 +1925,8 @@ static int tcp_mtu_probe(struct sock *sk)
        }
 
        /* We're allowed to probe.  Build it now. */
-       if ((nskb = sk_stream_alloc_skb(sk, probe_size, GFP_ATOMIC)) == NULL)
+       nskb = sk_stream_alloc_skb(sk, probe_size, GFP_ATOMIC);
+       if (!nskb)
                return -1;
        sk->sk_wmem_queued += nskb->truesize;
        sk_mem_charge(sk, nskb->truesize);
@@ -2179,7 +2224,7 @@ void tcp_send_loss_probe(struct sock *sk)
        int mss = tcp_current_mss(sk);
        int err = -1;
 
-       if (tcp_send_head(sk) != NULL) {
+       if (tcp_send_head(sk)) {
                err = tcp_write_xmit(sk, mss, TCP_NAGLE_OFF, 2, GFP_ATOMIC);
                goto rearm_timer;
        }
@@ -2689,7 +2734,7 @@ void tcp_xmit_retransmit_queue(struct sock *sk)
                if (skb == tcp_send_head(sk))
                        break;
                /* we could do better than to assign each time */
-               if (hole == NULL)
+               if (!hole)
                        tp->retransmit_skb_hint = skb;
 
                /* Assume this retransmit will generate
@@ -2713,7 +2758,7 @@ begin_fwd:
                        if (!tcp_can_forward_retransmit(sk))
                                break;
                        /* Backtrack if necessary to non-L'ed skb */
-                       if (hole != NULL) {
+                       if (hole) {
                                skb = hole;
                                hole = NULL;
                        }
@@ -2721,7 +2766,7 @@ begin_fwd:
                        goto begin_fwd;
 
                } else if (!(sacked & TCPCB_LOST)) {
-                       if (hole == NULL && !(sacked & (TCPCB_SACKED_RETRANS|TCPCB_SACKED_ACKED)))
+                       if (!hole && !(sacked & (TCPCB_SACKED_RETRANS|TCPCB_SACKED_ACKED)))
                                hole = skb;
                        continue;
 
@@ -2766,7 +2811,7 @@ void tcp_send_fin(struct sock *sk)
         */
        mss_now = tcp_current_mss(sk);
 
-       if (tcp_send_head(sk) != NULL) {
+       if (tcp_send_head(sk)) {
                TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_FIN;
                TCP_SKB_CB(skb)->end_seq++;
                tp->write_seq++;
@@ -2824,14 +2869,14 @@ int tcp_send_synack(struct sock *sk)
        struct sk_buff *skb;
 
        skb = tcp_write_queue_head(sk);
-       if (skb == NULL || !(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)) {
+       if (!skb || !(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)) {
                pr_debug("%s: wrong queue state\n", __func__);
                return -EFAULT;
        }
        if (!(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_ACK)) {
                if (skb_cloned(skb)) {
                        struct sk_buff *nskb = skb_copy(skb, GFP_ATOMIC);
-                       if (nskb == NULL)
+                       if (!nskb)
                                return -ENOMEM;
                        tcp_unlink_write_queue(skb, sk);
                        __skb_header_release(nskb);
@@ -2866,7 +2911,7 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
        struct tcp_sock *tp = tcp_sk(sk);
        struct tcphdr *th;
        struct sk_buff *skb;
-       struct tcp_md5sig_key *md5;
+       struct tcp_md5sig_key *md5 = NULL;
        int tcp_header_size;
        int mss;
 
@@ -2879,7 +2924,6 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
        skb_reserve(skb, MAX_TCP_HEADER);
 
        skb_dst_set(skb, dst);
-       security_skb_owned_by(skb, sk);
 
        mss = dst_metric_advmss(dst);
        if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < mss)
@@ -2892,7 +2936,12 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
        else
 #endif
        skb_mstamp_get(&skb->skb_mstamp);
-       tcp_header_size = tcp_synack_options(sk, req, mss, skb, &opts, &md5,
+
+#ifdef CONFIG_TCP_MD5SIG
+       rcu_read_lock();
+       md5 = tcp_rsk(req)->af_specific->req_md5_lookup(sk, req_to_sk(req));
+#endif
+       tcp_header_size = tcp_synack_options(sk, req, mss, skb, &opts, md5,
                                             foc) + sizeof(*th);
 
        skb_push(skb, tcp_header_size);
@@ -2923,10 +2972,10 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
 
 #ifdef CONFIG_TCP_MD5SIG
        /* Okay, we have all we need - do the md5 hash if needed */
-       if (md5) {
+       if (md5)
                tcp_rsk(req)->af_specific->calc_md5_hash(opts.hash_location,
-                                              md5, NULL, req, skb);
-       }
+                                              md5, req_to_sk(req), skb);
+       rcu_read_unlock();
 #endif
 
        return skb;
@@ -2966,7 +3015,7 @@ static void tcp_connect_init(struct sock *sk)
                (sysctl_tcp_timestamps ? TCPOLEN_TSTAMP_ALIGNED : 0);
 
 #ifdef CONFIG_TCP_MD5SIG
-       if (tp->af_specific->md5_lookup(sk, sk) != NULL)
+       if (tp->af_specific->md5_lookup(sk, sk))
                tp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED;
 #endif
 
@@ -3252,7 +3301,7 @@ void tcp_send_ack(struct sock *sk)
         * sock.
         */
        buff = alloc_skb(MAX_TCP_HEADER, sk_gfp_atomic(sk, GFP_ATOMIC));
-       if (buff == NULL) {
+       if (!buff) {
                inet_csk_schedule_ack(sk);
                inet_csk(sk)->icsk_ack.ato = TCP_ATO_MIN;
                inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
@@ -3296,7 +3345,7 @@ static int tcp_xmit_probe_skb(struct sock *sk, int urgent)
 
        /* We don't queue it, tcp_transmit_skb() sets ownership. */
        skb = alloc_skb(MAX_TCP_HEADER, sk_gfp_atomic(sk, GFP_ATOMIC));
-       if (skb == NULL)
+       if (!skb)
                return -1;
 
        /* Reserve space for headers and set control bits. */
@@ -3327,8 +3376,8 @@ int tcp_write_wakeup(struct sock *sk)
        if (sk->sk_state == TCP_CLOSE)
                return -1;
 
-       if ((skb = tcp_send_head(sk)) != NULL &&
-           before(TCP_SKB_CB(skb)->seq, tcp_wnd_end(tp))) {
+       skb = tcp_send_head(sk);
+       if (skb && before(TCP_SKB_CB(skb)->seq, tcp_wnd_end(tp))) {
                int err;
                unsigned int mss = tcp_current_mss(sk);
                unsigned int seg_size = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq;
index 0732b787904ed32003bb776c744ed56457e0cb37..2568fd282873b7436ca2299e20c283e1affd8688 100644 (file)
@@ -107,6 +107,7 @@ static void tcp_mtu_probing(struct inet_connection_sock *icsk, struct sock *sk)
        if (net->ipv4.sysctl_tcp_mtu_probing) {
                if (!icsk->icsk_mtup.enabled) {
                        icsk->icsk_mtup.enabled = 1;
+                       icsk->icsk_mtup.probe_timestamp = tcp_time_stamp;
                        tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
                } else {
                        struct net *net = sock_net(sk);
@@ -326,7 +327,7 @@ static void tcp_fastopen_synack_timer(struct sock *sk)
        struct request_sock *req;
 
        req = tcp_sk(sk)->fastopen_rsk;
-       req->rsk_ops->syn_ack_timeout(sk, req);
+       req->rsk_ops->syn_ack_timeout(req);
 
        if (req->num_timeout >= max_retries) {
                tcp_write_err(sk);
@@ -538,19 +539,11 @@ static void tcp_write_timer(unsigned long data)
        sock_put(sk);
 }
 
-/*
- *     Timer for listening sockets
- */
-
-static void tcp_synack_timer(struct sock *sk)
+void tcp_syn_ack_timeout(const struct request_sock *req)
 {
-       inet_csk_reqsk_queue_prune(sk, TCP_SYNQ_INTERVAL,
-                                  TCP_TIMEOUT_INIT, TCP_RTO_MAX);
-}
+       struct net *net = read_pnet(&inet_rsk(req)->ireq_net);
 
-void tcp_syn_ack_timeout(struct sock *sk, struct request_sock *req)
-{
-       NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPTIMEOUTS);
+       NET_INC_STATS_BH(net, LINUX_MIB_TCPTIMEOUTS);
 }
 EXPORT_SYMBOL(tcp_syn_ack_timeout);
 
@@ -582,7 +575,7 @@ static void tcp_keepalive_timer (unsigned long data)
        }
 
        if (sk->sk_state == TCP_LISTEN) {
-               tcp_synack_timer(sk);
+               pr_err("Hmm... keepalive on a LISTEN ???\n");
                goto out;
        }
 
index 97ef1f8b7be81ed7d06c599b4158db0507afde44..2162fc6ce1c1e779e0bbe4c63aa126ab1db08540 100644 (file)
@@ -318,8 +318,8 @@ static int ipv4_rcv_saddr_equal(const struct sock *sk1, const struct sock *sk2)
                   inet1->inet_rcv_saddr == inet2->inet_rcv_saddr));
 }
 
-static unsigned int udp4_portaddr_hash(struct net *net, __be32 saddr,
-                                      unsigned int port)
+static u32 udp4_portaddr_hash(const struct net *net, __be32 saddr,
+                             unsigned int port)
 {
        return jhash_1word((__force u32)saddr, net_hash_mix(net)) ^ port;
 }
@@ -421,9 +421,9 @@ static inline int compute_score2(struct sock *sk, struct net *net,
        return score;
 }
 
-static unsigned int udp_ehashfn(struct net *net, const __be32 laddr,
-                                const __u16 lport, const __be32 faddr,
-                                const __be16 fport)
+static u32 udp_ehashfn(const struct net *net, const __be32 laddr,
+                      const __u16 lport, const __be32 faddr,
+                      const __be16 fport)
 {
        static u32 udp_ehash_secret __read_mostly;
 
@@ -633,7 +633,7 @@ void __udp4_lib_err(struct sk_buff *skb, u32 info, struct udp_table *udptable)
 
        sk = __udp4_lib_lookup(net, iph->daddr, uh->dest,
                        iph->saddr, uh->source, skb->dev->ifindex, udptable);
-       if (sk == NULL) {
+       if (!sk) {
                ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
                return; /* No socket for error */
        }
@@ -873,8 +873,7 @@ out:
 }
 EXPORT_SYMBOL(udp_push_pending_frames);
 
-int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
-               size_t len)
+int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
 {
        struct inet_sock *inet = inet_sk(sk);
        struct udp_sock *up = udp_sk(sk);
@@ -1012,7 +1011,7 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
        if (connected)
                rt = (struct rtable *)sk_dst_check(sk, 0);
 
-       if (rt == NULL) {
+       if (!rt) {
                struct net *net = sock_net(sk);
 
                fl4 = &fl4_stack;
@@ -1136,7 +1135,7 @@ int udp_sendpage(struct sock *sk, struct page *page, int offset,
                 * sendpage interface can't pass.
                 * This will succeed only when the socket is connected.
                 */
-               ret = udp_sendmsg(NULL, sk, &msg, 0);
+               ret = udp_sendmsg(sk, &msg, 0);
                if (ret < 0)
                        return ret;
        }
@@ -1254,8 +1253,8 @@ EXPORT_SYMBOL(udp_ioctl);
  *     return it, otherwise we block.
  */
 
-int udp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
-               size_t len, int noblock, int flags, int *addr_len)
+int udp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int noblock,
+               int flags, int *addr_len)
 {
        struct inet_sock *inet = inet_sk(sk);
        DECLARE_SOCKADDR(struct sockaddr_in *, sin, msg->msg_name);
@@ -1523,7 +1522,7 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
 
                /* if we're overly short, let UDP handle it */
                encap_rcv = ACCESS_ONCE(up->encap_rcv);
-               if (skb->len > sizeof(struct udphdr) && encap_rcv != NULL) {
+               if (skb->len > sizeof(struct udphdr) && encap_rcv) {
                        int ret;
 
                        /* Verify checksum before giving to encap */
@@ -1620,7 +1619,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
 
        for (i = 0; i < count; i++) {
                sk = stack[i];
-               if (likely(skb1 == NULL))
+               if (likely(!skb1))
                        skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
 
                if (!skb1) {
@@ -1803,7 +1802,7 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
                                                saddr, daddr, udptable, proto);
 
        sk = __udp4_lib_lookup_skb(skb, uh->source, uh->dest, udptable);
-       if (sk != NULL) {
+       if (sk) {
                int ret;
 
                if (inet_get_convert_csum(sk) && uh->check && !IS_UDPLITE(sk))
@@ -2525,6 +2524,16 @@ void __init udp_table_init(struct udp_table *table, const char *name)
        }
 }
 
+u32 udp_flow_hashrnd(void)
+{
+       static u32 hashrnd __read_mostly;
+
+       net_get_random_once(&hashrnd, sizeof(hashrnd));
+
+       return hashrnd;
+}
+EXPORT_SYMBOL(udp_flow_hashrnd);
+
 void __init udp_init(void)
 {
        unsigned long limit;
index 4a000f1dd75753833b792f6979bf697337f4dd7a..b763c39ae1d734621a5f6872f9d41d442f476c92 100644 (file)
@@ -18,8 +18,9 @@
 #include <linux/sock_diag.h>
 
 static int sk_diag_dump(struct sock *sk, struct sk_buff *skb,
-               struct netlink_callback *cb, struct inet_diag_req_v2 *req,
-               struct nlattr *bc)
+                       struct netlink_callback *cb,
+                       const struct inet_diag_req_v2 *req,
+                       struct nlattr *bc)
 {
        if (!inet_diag_bc_sk(bc, sk))
                return 0;
@@ -31,7 +32,8 @@ static int sk_diag_dump(struct sock *sk, struct sk_buff *skb,
 }
 
 static int udp_dump_one(struct udp_table *tbl, struct sk_buff *in_skb,
-               const struct nlmsghdr *nlh, struct inet_diag_req_v2 *req)
+                       const struct nlmsghdr *nlh,
+                       const struct inet_diag_req_v2 *req)
 {
        int err = -EINVAL;
        struct sock *sk;
@@ -56,7 +58,7 @@ static int udp_dump_one(struct udp_table *tbl, struct sk_buff *in_skb,
                goto out_nosk;
 
        err = -ENOENT;
-       if (sk == NULL)
+       if (!sk)
                goto out_nosk;
 
        err = sock_diag_check_cookie(sk, req->id.idiag_cookie);
@@ -90,8 +92,9 @@ out_nosk:
        return err;
 }
 
-static void udp_dump(struct udp_table *table, struct sk_buff *skb, struct netlink_callback *cb,
-               struct inet_diag_req_v2 *r, struct nlattr *bc)
+static void udp_dump(struct udp_table *table, struct sk_buff *skb,
+                    struct netlink_callback *cb,
+                    const struct inet_diag_req_v2 *r, struct nlattr *bc)
 {
        int num, s_num, slot, s_slot;
        struct net *net = sock_net(skb->sk);
@@ -144,13 +147,13 @@ done:
 }
 
 static void udp_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
-               struct inet_diag_req_v2 *r, struct nlattr *bc)
+                         const struct inet_diag_req_v2 *r, struct nlattr *bc)
 {
        udp_dump(&udp_table, skb, cb, r, bc);
 }
 
 static int udp_diag_dump_one(struct sk_buff *in_skb, const struct nlmsghdr *nlh,
-               struct inet_diag_req_v2 *req)
+                            const struct inet_diag_req_v2 *req)
 {
        return udp_dump_one(&udp_table, in_skb, nlh, req);
 }
@@ -170,13 +173,14 @@ static const struct inet_diag_handler udp_diag_handler = {
 };
 
 static void udplite_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
-               struct inet_diag_req_v2 *r, struct nlattr *bc)
+                             const struct inet_diag_req_v2 *r,
+                             struct nlattr *bc)
 {
        udp_dump(&udplite_table, skb, cb, r, bc);
 }
 
 static int udplite_diag_dump_one(struct sk_buff *in_skb, const struct nlmsghdr *nlh,
-               struct inet_diag_req_v2 *req)
+                                const struct inet_diag_req_v2 *req)
 {
        return udp_dump_one(&udplite_table, in_skb, nlh, req);
 }
index f3c27899f62b914f7b406094472a47e2fe2df613..7e0fe4bdd96702256b7608ad3fe45df114573702 100644 (file)
@@ -21,8 +21,8 @@ int compat_udp_setsockopt(struct sock *sk, int level, int optname,
 int compat_udp_getsockopt(struct sock *sk, int level, int optname,
                          char __user *optval, int __user *optlen);
 #endif
-int udp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
-               size_t len, int noblock, int flags, int *addr_len);
+int udp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int noblock,
+               int flags, int *addr_len);
 int udp_sendpage(struct sock *sk, struct page *page, int offset, size_t size,
                 int flags);
 int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
index 4915d8284a86f9ec6e5536804b24398d205e2a2b..f9386160cbee0288e294ea2cd8ba3b5be65cdbf6 100644 (file)
@@ -285,7 +285,7 @@ void udp_del_offload(struct udp_offload *uo)
        pr_warn("udp_del_offload: didn't find offload for port %d\n", ntohs(uo->port));
 unlock:
        spin_unlock(&udp_offload_lock);
-       if (uo_priv != NULL)
+       if (uo_priv)
                call_rcu(&uo_priv->rcu, udp_offload_free_routine);
 }
 EXPORT_SYMBOL(udp_del_offload);
@@ -394,7 +394,7 @@ int udp_gro_complete(struct sk_buff *skb, int nhoff)
                        break;
        }
 
-       if (uo_priv != NULL) {
+       if (uo_priv) {
                NAPI_GRO_CB(skb)->proto = uo_priv->offload->ipproto;
                err = uo_priv->offload->callbacks.gro_complete(skb,
                                nhoff + sizeof(struct udphdr),
index aac6197b7a7132f31af9a80d960d94d4a9f92290..cac7468db0a1bfc74f9f302588c1e9875e84ada0 100644 (file)
@@ -24,7 +24,7 @@ int xfrm4_extract_input(struct xfrm_state *x, struct sk_buff *skb)
 
 static inline int xfrm4_rcv_encap_finish(struct sk_buff *skb)
 {
-       if (skb_dst(skb) == NULL) {
+       if (!skb_dst(skb)) {
                const struct iphdr *iph = ip_hdr(skb);
 
                if (ip_route_input_noref(skb, iph->daddr, iph->saddr,
index 91771a7c802f828c33429413674c5f78ca393b6f..35feda67646494c92263cf30109432fb395fa1df 100644 (file)
@@ -63,7 +63,7 @@ static int xfrm4_mode_tunnel_output(struct xfrm_state *x, struct sk_buff *skb)
 
        top_iph->saddr = x->props.saddr.a4;
        top_iph->daddr = x->id.daddr.a4;
-       ip_select_ident(skb, NULL);
+       ip_select_ident(dev_net(dst->dev), skb, NULL);
 
        return 0;
 }
index 6156f68a1e90b53f7504a1e6f729b60c29d52b3a..bff69746e05f05d936ec8f7a62c34d3f87a55d10 100644 (file)
@@ -232,7 +232,6 @@ static void xfrm4_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
 
 static struct dst_ops xfrm4_dst_ops = {
        .family =               AF_INET,
-       .protocol =             cpu_to_be16(ETH_P_IP),
        .gc =                   xfrm4_garbage_collect,
        .update_pmtu =          xfrm4_update_pmtu,
        .redirect =             xfrm4_redirect,
@@ -299,7 +298,7 @@ static void __net_exit xfrm4_net_exit(struct net *net)
 {
        struct ctl_table *table;
 
-       if (net->ipv4.xfrm4_hdr == NULL)
+       if (!net->ipv4.xfrm4_hdr)
                return;
 
        table = net->ipv4.xfrm4_hdr->ctl_table_arg;
index b6030025f41197efbcdfd1d8c013e469413550b5..37b70e82bff8ee9b9964a0237df9d66f3b78bcc7 100644 (file)
@@ -46,6 +46,7 @@
 #include <linux/socket.h>
 #include <linux/sockios.h>
 #include <linux/net.h>
+#include <linux/inet.h>
 #include <linux/in6.h>
 #include <linux/netdevice.h>
 #include <linux/if_addr.h>
 
 #define        INFINITY_LIFE_TIME      0xFFFFFFFF
 
+#define IPV6_MAX_STRLEN \
+       sizeof("ffff:ffff:ffff:ffff:ffff:ffff:255.255.255.255")
+
 static inline u32 cstamp_delta(unsigned long cstamp)
 {
        return (cstamp - INITIAL_JIFFIES) * 100UL / HZ;
@@ -127,6 +131,9 @@ static void ipv6_regen_rndid(unsigned long data);
 
 static int ipv6_generate_eui64(u8 *eui, struct net_device *dev);
 static int ipv6_count_addresses(struct inet6_dev *idev);
+static int ipv6_generate_stable_address(struct in6_addr *addr,
+                                       u8 dad_count,
+                                       const struct inet6_dev *idev);
 
 /*
  *     Configured unicast address hash table
@@ -202,6 +209,9 @@ static struct ipv6_devconf ipv6_devconf __read_mostly = {
        .accept_dad             = 1,
        .suppress_frag_ndisc    = 1,
        .accept_ra_mtu          = 1,
+       .stable_secret          = {
+               .initialized = false,
+       }
 };
 
 static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = {
@@ -240,6 +250,9 @@ static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = {
        .accept_dad             = 1,
        .suppress_frag_ndisc    = 1,
        .accept_ra_mtu          = 1,
+       .stable_secret          = {
+               .initialized = false,
+       },
 };
 
 /* Check if a valid qdisc is available */
@@ -321,7 +334,7 @@ static struct inet6_dev *ipv6_add_dev(struct net_device *dev)
                return ERR_PTR(-EINVAL);
 
        ndev = kzalloc(sizeof(struct inet6_dev), GFP_KERNEL);
-       if (ndev == NULL)
+       if (!ndev)
                return ERR_PTR(err);
 
        rwlock_init(&ndev->lock);
@@ -333,7 +346,7 @@ static struct inet6_dev *ipv6_add_dev(struct net_device *dev)
        ndev->cnf.mtu6 = dev->mtu;
        ndev->cnf.sysctl = NULL;
        ndev->nd_parms = neigh_parms_alloc(dev, &nd_tbl);
-       if (ndev->nd_parms == NULL) {
+       if (!ndev->nd_parms) {
                kfree(ndev);
                return ERR_PTR(err);
        }
@@ -468,7 +481,7 @@ static int inet6_netconf_fill_devconf(struct sk_buff *skb, int ifindex,
 
        nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct netconfmsg),
                        flags);
-       if (nlh == NULL)
+       if (!nlh)
                return -EMSGSIZE;
 
        ncm = nlmsg_data(nlh);
@@ -506,7 +519,7 @@ void inet6_netconf_notify_devconf(struct net *net, int type, int ifindex,
        int err = -ENOBUFS;
 
        skb = nlmsg_new(inet6_netconf_msgsize_devconf(type), GFP_ATOMIC);
-       if (skb == NULL)
+       if (!skb)
                goto errout;
 
        err = inet6_netconf_fill_devconf(skb, ifindex, devconf, 0, 0,
@@ -561,10 +574,10 @@ static int inet6_netconf_get_devconf(struct sk_buff *in_skb,
                break;
        default:
                dev = __dev_get_by_index(net, ifindex);
-               if (dev == NULL)
+               if (!dev)
                        goto errout;
                in6_dev = __in6_dev_get(dev);
-               if (in6_dev == NULL)
+               if (!in6_dev)
                        goto errout;
                devconf = &in6_dev->cnf;
                break;
@@ -572,7 +585,7 @@ static int inet6_netconf_get_devconf(struct sk_buff *in_skb,
 
        err = -ENOBUFS;
        skb = nlmsg_new(inet6_netconf_msgsize_devconf(-1), GFP_ATOMIC);
-       if (skb == NULL)
+       if (!skb)
                goto errout;
 
        err = inet6_netconf_fill_devconf(skb, ifindex, devconf,
@@ -841,7 +854,7 @@ ipv6_add_addr(struct inet6_dev *idev, const struct in6_addr *addr,
 
        ifa = kzalloc(sizeof(struct inet6_ifaddr), GFP_ATOMIC);
 
-       if (ifa == NULL) {
+       if (!ifa) {
                ADBG("ipv6_add_addr: malloc failed\n");
                err = -ENOBUFS;
                goto out;
@@ -860,7 +873,6 @@ ipv6_add_addr(struct inet6_dev *idev, const struct in6_addr *addr,
                ifa->peer_addr = *peer_addr;
 
        spin_lock_init(&ifa->lock);
-       spin_lock_init(&ifa->state_lock);
        INIT_DELAYED_WORK(&ifa->dad_work, addrconf_dad_work);
        INIT_HLIST_NODE(&ifa->addr_lst);
        ifa->scope = scope;
@@ -1003,10 +1015,10 @@ static void ipv6_del_addr(struct inet6_ifaddr *ifp)
 
        ASSERT_RTNL();
 
-       spin_lock_bh(&ifp->state_lock);
+       spin_lock_bh(&ifp->lock);
        state = ifp->state;
        ifp->state = INET6_IFADDR_STATE_DEAD;
-       spin_unlock_bh(&ifp->state_lock);
+       spin_unlock_bh(&ifp->lock);
 
        if (state == INET6_IFADDR_STATE_DEAD)
                goto out;
@@ -1546,7 +1558,7 @@ int ipv6_chk_addr_and_flags(struct net *net, const struct in6_addr *addr,
                            : ifp->flags;
                if (ipv6_addr_equal(&ifp->addr, addr) &&
                    !(ifp_flags&banned_flags) &&
-                   (dev == NULL || ifp->idev->dev == dev ||
+                   (!dev || ifp->idev->dev == dev ||
                     !(ifp->scope&(IFA_LINK|IFA_HOST) || strict))) {
                        rcu_read_unlock_bh();
                        return 1;
@@ -1568,7 +1580,7 @@ static bool ipv6_chk_same_addr(struct net *net, const struct in6_addr *addr,
                if (!net_eq(dev_net(ifp->idev->dev), net))
                        continue;
                if (ipv6_addr_equal(&ifp->addr, addr)) {
-                       if (dev == NULL || ifp->idev->dev == dev)
+                       if (!dev || ifp->idev->dev == dev)
                                return true;
                }
        }
@@ -1637,7 +1649,7 @@ struct inet6_ifaddr *ipv6_get_ifaddr(struct net *net, const struct in6_addr *add
                if (!net_eq(dev_net(ifp->idev->dev), net))
                        continue;
                if (ipv6_addr_equal(&ifp->addr, addr)) {
-                       if (dev == NULL || ifp->idev->dev == dev ||
+                       if (!dev || ifp->idev->dev == dev ||
                            !(ifp->scope&(IFA_LINK|IFA_HOST) || strict)) {
                                result = ifp;
                                in6_ifa_hold(ifp);
@@ -1686,19 +1698,21 @@ static int addrconf_dad_end(struct inet6_ifaddr *ifp)
 {
        int err = -ENOENT;
 
-       spin_lock_bh(&ifp->state_lock);
+       spin_lock_bh(&ifp->lock);
        if (ifp->state == INET6_IFADDR_STATE_DAD) {
                ifp->state = INET6_IFADDR_STATE_POSTDAD;
                err = 0;
        }
-       spin_unlock_bh(&ifp->state_lock);
+       spin_unlock_bh(&ifp->lock);
 
        return err;
 }
 
 void addrconf_dad_failure(struct inet6_ifaddr *ifp)
 {
+       struct in6_addr addr;
        struct inet6_dev *idev = ifp->idev;
+       struct net *net = dev_net(ifp->idev->dev);
 
        if (addrconf_dad_end(ifp)) {
                in6_ifa_put(ifp);
@@ -1708,9 +1722,57 @@ void addrconf_dad_failure(struct inet6_ifaddr *ifp)
        net_info_ratelimited("%s: IPv6 duplicate address %pI6c detected!\n",
                             ifp->idev->dev->name, &ifp->addr);
 
-       if (idev->cnf.accept_dad > 1 && !idev->cnf.disable_ipv6) {
-               struct in6_addr addr;
+       spin_lock_bh(&ifp->lock);
+
+       if (ifp->flags & IFA_F_STABLE_PRIVACY) {
+               int scope = ifp->scope;
+               u32 flags = ifp->flags;
+               struct in6_addr new_addr;
+               struct inet6_ifaddr *ifp2;
+               u32 valid_lft, preferred_lft;
+               int pfxlen = ifp->prefix_len;
+               int retries = ifp->stable_privacy_retry + 1;
+
+               if (retries > net->ipv6.sysctl.idgen_retries) {
+                       net_info_ratelimited("%s: privacy stable address generation failed because of DAD conflicts!\n",
+                                            ifp->idev->dev->name);
+                       goto errdad;
+               }
+
+               new_addr = ifp->addr;
+               if (ipv6_generate_stable_address(&new_addr, retries,
+                                                idev))
+                       goto errdad;
+
+               valid_lft = ifp->valid_lft;
+               preferred_lft = ifp->prefered_lft;
+
+               spin_unlock_bh(&ifp->lock);
+
+               if (idev->cnf.max_addresses &&
+                   ipv6_count_addresses(idev) >=
+                   idev->cnf.max_addresses)
+                       goto lock_errdad;
+
+               net_info_ratelimited("%s: generating new stable privacy address because of DAD conflict\n",
+                                    ifp->idev->dev->name);
+
+               ifp2 = ipv6_add_addr(idev, &new_addr, NULL, pfxlen,
+                                    scope, flags, valid_lft,
+                                    preferred_lft);
+               if (IS_ERR(ifp2))
+                       goto lock_errdad;
+
+               spin_lock_bh(&ifp2->lock);
+               ifp2->stable_privacy_retry = retries;
+               ifp2->state = INET6_IFADDR_STATE_PREDAD;
+               spin_unlock_bh(&ifp2->lock);
 
+               addrconf_mod_dad_work(ifp2, net->ipv6.sysctl.idgen_delay);
+               in6_ifa_put(ifp2);
+lock_errdad:
+               spin_lock_bh(&ifp->lock);
+       } else if (idev->cnf.accept_dad > 1 && !idev->cnf.disable_ipv6) {
                addr.s6_addr32[0] = htonl(0xfe800000);
                addr.s6_addr32[1] = 0;
 
@@ -1724,10 +1786,10 @@ void addrconf_dad_failure(struct inet6_ifaddr *ifp)
                }
        }
 
-       spin_lock_bh(&ifp->state_lock);
+errdad:
        /* transition from _POSTDAD to _ERRDAD */
        ifp->state = INET6_IFADDR_STATE_ERRDAD;
-       spin_unlock_bh(&ifp->state_lock);
+       spin_unlock_bh(&ifp->lock);
 
        addrconf_mod_dad_work(ifp, 0);
 }
@@ -2052,7 +2114,7 @@ static struct rt6_info *addrconf_get_prefix_route(const struct in6_addr *pfx,
        struct fib6_table *table;
 
        table = fib6_get_table(dev_net(dev), RT6_TABLE_PREFIX);
-       if (table == NULL)
+       if (!table)
                return NULL;
 
        read_lock_bh(&table->tb6_lock);
@@ -2186,6 +2248,7 @@ void addrconf_prefix_rcv(struct net_device *dev, u8 *opt, int len, bool sllao)
        __u32 valid_lft;
        __u32 prefered_lft;
        int addr_type;
+       u32 addr_flags = 0;
        struct inet6_dev *in6_dev;
        struct net *net = dev_net(dev);
 
@@ -2215,7 +2278,7 @@ void addrconf_prefix_rcv(struct net_device *dev, u8 *opt, int len, bool sllao)
 
        in6_dev = in6_dev_get(dev);
 
-       if (in6_dev == NULL) {
+       if (!in6_dev) {
                net_dbg_ratelimited("addrconf: device %s not configured\n",
                                    dev->name);
                return;
@@ -2292,6 +2355,12 @@ void addrconf_prefix_rcv(struct net_device *dev, u8 *opt, int len, bool sllao)
                                       in6_dev->token.s6_addr + 8, 8);
                                read_unlock_bh(&in6_dev->lock);
                                tokenized = true;
+                       } else if (in6_dev->addr_gen_mode ==
+                                  IN6_ADDR_GEN_MODE_STABLE_PRIVACY &&
+                                  !ipv6_generate_stable_address(&addr, 0,
+                                                                in6_dev)) {
+                               addr_flags |= IFA_F_STABLE_PRIVACY;
+                               goto ok;
                        } else if (ipv6_generate_eui64(addr.s6_addr + 8, dev) &&
                                   ipv6_inherit_eui64(addr.s6_addr + 8, in6_dev)) {
                                in6_dev_put(in6_dev);
@@ -2308,9 +2377,8 @@ ok:
 
                ifp = ipv6_get_ifaddr(net, &addr, dev, 1);
 
-               if (ifp == NULL && valid_lft) {
+               if (!ifp && valid_lft) {
                        int max_addresses = in6_dev->cnf.max_addresses;
-                       u32 addr_flags = 0;
 
 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
                        if (in6_dev->cnf.optimistic_dad &&
@@ -2350,7 +2418,7 @@ ok:
                        u32 stored_lft;
 
                        /* update lifetime (RFC2462 5.5.3 e) */
-                       spin_lock(&ifp->lock);
+                       spin_lock_bh(&ifp->lock);
                        now = jiffies;
                        if (ifp->valid_lft > (now - ifp->tstamp) / HZ)
                                stored_lft = ifp->valid_lft - (now - ifp->tstamp) / HZ;
@@ -2380,12 +2448,12 @@ ok:
                                ifp->tstamp = now;
                                flags = ifp->flags;
                                ifp->flags &= ~IFA_F_DEPRECATED;
-                               spin_unlock(&ifp->lock);
+                               spin_unlock_bh(&ifp->lock);
 
                                if (!(flags&IFA_F_TENTATIVE))
                                        ipv6_ifa_notify(0, ifp);
                        } else
-                               spin_unlock(&ifp->lock);
+                               spin_unlock_bh(&ifp->lock);
 
                        manage_tempaddrs(in6_dev, ifp, valid_lft, prefered_lft,
                                         create, now);
@@ -2418,7 +2486,7 @@ int addrconf_set_dstaddr(struct net *net, void __user *arg)
        dev = __dev_get_by_index(net, ireq.ifr6_ifindex);
 
        err = -ENODEV;
-       if (dev == NULL)
+       if (!dev)
                goto err_exit;
 
 #if IS_ENABLED(CONFIG_IPV6_SIT)
@@ -2464,6 +2532,23 @@ err_exit:
        return err;
 }
 
+static int ipv6_mc_config(struct sock *sk, bool join,
+                         const struct in6_addr *addr, int ifindex)
+{
+       int ret;
+
+       ASSERT_RTNL();
+
+       lock_sock(sk);
+       if (join)
+               ret = ipv6_sock_mc_join(sk, ifindex, addr);
+       else
+               ret = ipv6_sock_mc_drop(sk, ifindex, addr);
+       release_sock(sk);
+
+       return ret;
+}
+
 /*
  *     Manual configuration of address on an interface
  */
@@ -2476,10 +2561,10 @@ static int inet6_addr_add(struct net *net, int ifindex,
        struct inet6_ifaddr *ifp;
        struct inet6_dev *idev;
        struct net_device *dev;
+       unsigned long timeout;
+       clock_t expires;
        int scope;
        u32 flags;
-       clock_t expires;
-       unsigned long timeout;
 
        ASSERT_RTNL();
 
@@ -2501,6 +2586,14 @@ static int inet6_addr_add(struct net *net, int ifindex,
        if (IS_ERR(idev))
                return PTR_ERR(idev);
 
+       if (ifa_flags & IFA_F_MCAUTOJOIN) {
+               int ret = ipv6_mc_config(net->ipv6.mc_autojoin_sk,
+                                        true, pfx, ifindex);
+
+               if (ret < 0)
+                       return ret;
+       }
+
        scope = ipv6_addr_scope(pfx);
 
        timeout = addrconf_timeout_fixup(valid_lft, HZ);
@@ -2542,6 +2635,9 @@ static int inet6_addr_add(struct net *net, int ifindex,
                in6_ifa_put(ifp);
                addrconf_verify_rtnl();
                return 0;
+       } else if (ifa_flags & IFA_F_MCAUTOJOIN) {
+               ipv6_mc_config(net->ipv6.mc_autojoin_sk,
+                              false, pfx, ifindex);
        }
 
        return PTR_ERR(ifp);
@@ -2562,7 +2658,7 @@ static int inet6_addr_del(struct net *net, int ifindex, u32 ifa_flags,
                return -ENODEV;
 
        idev = __in6_dev_get(dev);
-       if (idev == NULL)
+       if (!idev)
                return -ENXIO;
 
        read_lock_bh(&idev->lock);
@@ -2578,6 +2674,10 @@ static int inet6_addr_del(struct net *net, int ifindex, u32 ifa_flags,
                                                 jiffies);
                        ipv6_del_addr(ifp);
                        addrconf_verify_rtnl();
+                       if (ipv6_addr_is_multicast(pfx)) {
+                               ipv6_mc_config(net->ipv6.mc_autojoin_sk,
+                                              false, pfx, dev->ifindex);
+                       }
                        return 0;
                }
        }
@@ -2710,7 +2810,7 @@ static void init_loopback(struct net_device *dev)
        ASSERT_RTNL();
 
        idev = ipv6_find_idev(dev);
-       if (idev == NULL) {
+       if (!idev) {
                pr_debug("%s: add_dev failed\n", __func__);
                return;
        }
@@ -2757,10 +2857,11 @@ static void init_loopback(struct net_device *dev)
        }
 }
 
-static void addrconf_add_linklocal(struct inet6_dev *idev, const struct in6_addr *addr)
+static void addrconf_add_linklocal(struct inet6_dev *idev,
+                                  const struct in6_addr *addr, u32 flags)
 {
        struct inet6_ifaddr *ifp;
-       u32 addr_flags = IFA_F_PERMANENT;
+       u32 addr_flags = flags | IFA_F_PERMANENT;
 
 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
        if (idev->cnf.optimistic_dad &&
@@ -2768,7 +2869,6 @@ static void addrconf_add_linklocal(struct inet6_dev *idev, const struct in6_addr
                addr_flags |= IFA_F_OPTIMISTIC;
 #endif
 
-
        ifp = ipv6_add_addr(idev, addr, NULL, 64, IFA_LINK, addr_flags,
                            INFINITY_LIFE_TIME, INFINITY_LIFE_TIME);
        if (!IS_ERR(ifp)) {
@@ -2778,18 +2878,103 @@ static void addrconf_add_linklocal(struct inet6_dev *idev, const struct in6_addr
        }
 }
 
+static bool ipv6_reserved_interfaceid(struct in6_addr address)
+{
+       if ((address.s6_addr32[2] | address.s6_addr32[3]) == 0)
+               return true;
+
+       if (address.s6_addr32[2] == htonl(0x02005eff) &&
+           ((address.s6_addr32[3] & htonl(0xfe000000)) == htonl(0xfe000000)))
+               return true;
+
+       if (address.s6_addr32[2] == htonl(0xfdffffff) &&
+           ((address.s6_addr32[3] & htonl(0xffffff80)) == htonl(0xffffff80)))
+               return true;
+
+       return false;
+}
+
+static int ipv6_generate_stable_address(struct in6_addr *address,
+                                       u8 dad_count,
+                                       const struct inet6_dev *idev)
+{
+       static DEFINE_SPINLOCK(lock);
+       static __u32 digest[SHA_DIGEST_WORDS];
+       static __u32 workspace[SHA_WORKSPACE_WORDS];
+
+       static union {
+               char __data[SHA_MESSAGE_BYTES];
+               struct {
+                       struct in6_addr secret;
+                       __be32 prefix[2];
+                       unsigned char hwaddr[MAX_ADDR_LEN];
+                       u8 dad_count;
+               } __packed;
+       } data;
+
+       struct in6_addr secret;
+       struct in6_addr temp;
+       struct net *net = dev_net(idev->dev);
+
+       BUILD_BUG_ON(sizeof(data.__data) != sizeof(data));
+
+       if (idev->cnf.stable_secret.initialized)
+               secret = idev->cnf.stable_secret.secret;
+       else if (net->ipv6.devconf_dflt->stable_secret.initialized)
+               secret = net->ipv6.devconf_dflt->stable_secret.secret;
+       else
+               return -1;
+
+retry:
+       spin_lock_bh(&lock);
+
+       sha_init(digest);
+       memset(&data, 0, sizeof(data));
+       memset(workspace, 0, sizeof(workspace));
+       memcpy(data.hwaddr, idev->dev->perm_addr, idev->dev->addr_len);
+       data.prefix[0] = address->s6_addr32[0];
+       data.prefix[1] = address->s6_addr32[1];
+       data.secret = secret;
+       data.dad_count = dad_count;
+
+       sha_transform(digest, data.__data, workspace);
+
+       temp = *address;
+       temp.s6_addr32[2] = (__force __be32)digest[0];
+       temp.s6_addr32[3] = (__force __be32)digest[1];
+
+       spin_unlock_bh(&lock);
+
+       if (ipv6_reserved_interfaceid(temp)) {
+               dad_count++;
+               if (dad_count > dev_net(idev->dev)->ipv6.sysctl.idgen_retries)
+                       return -1;
+               goto retry;
+       }
+
+       *address = temp;
+       return 0;
+}
+
 static void addrconf_addr_gen(struct inet6_dev *idev, bool prefix_route)
 {
-       if (idev->addr_gen_mode == IN6_ADDR_GEN_MODE_EUI64) {
-               struct in6_addr addr;
+       struct in6_addr addr;
+
+       ipv6_addr_set(&addr, htonl(0xFE800000), 0, 0, 0);
 
-               ipv6_addr_set(&addr,  htonl(0xFE800000), 0, 0, 0);
+       if (idev->addr_gen_mode == IN6_ADDR_GEN_MODE_STABLE_PRIVACY) {
+               if (!ipv6_generate_stable_address(&addr, 0, idev))
+                       addrconf_add_linklocal(idev, &addr,
+                                              IFA_F_STABLE_PRIVACY);
+               else if (prefix_route)
+                       addrconf_prefix_route(&addr, 64, idev->dev, 0, 0);
+       } else if (idev->addr_gen_mode == IN6_ADDR_GEN_MODE_EUI64) {
                /* addrconf_add_linklocal also adds a prefix_route and we
                 * only need to care about prefix routes if ipv6_generate_eui64
                 * couldn't generate one.
                 */
                if (ipv6_generate_eui64(addr.s6_addr + 8, idev->dev) == 0)
-                       addrconf_add_linklocal(idev, &addr);
+                       addrconf_add_linklocal(idev, &addr, 0);
                else if (prefix_route)
                        addrconf_prefix_route(&addr, 64, idev->dev, 0, 0);
        }
@@ -2834,7 +3019,7 @@ static void addrconf_sit_config(struct net_device *dev)
         */
 
        idev = ipv6_find_idev(dev);
-       if (idev == NULL) {
+       if (!idev) {
                pr_debug("%s: add_dev failed\n", __func__);
                return;
        }
@@ -2859,7 +3044,7 @@ static void addrconf_gre_config(struct net_device *dev)
        ASSERT_RTNL();
 
        idev = ipv6_find_idev(dev);
-       if (idev == NULL) {
+       if (!idev) {
                pr_debug("%s: add_dev failed\n", __func__);
                return;
        }
@@ -3056,7 +3241,7 @@ static int addrconf_ifdown(struct net_device *dev, int how)
        neigh_ifdown(&nd_tbl, dev);
 
        idev = __in6_dev_get(dev);
-       if (idev == NULL)
+       if (!idev)
                return -ENODEV;
 
        /*
@@ -3127,10 +3312,10 @@ restart:
 
                write_unlock_bh(&idev->lock);
 
-               spin_lock_bh(&ifa->state_lock);
+               spin_lock_bh(&ifa->lock);
                state = ifa->state;
                ifa->state = INET6_IFADDR_STATE_DEAD;
-               spin_unlock_bh(&ifa->state_lock);
+               spin_unlock_bh(&ifa->lock);
 
                if (state != INET6_IFADDR_STATE_DEAD) {
                        __ipv6_ifa_notify(RTM_DELADDR, ifa);
@@ -3288,12 +3473,12 @@ static void addrconf_dad_start(struct inet6_ifaddr *ifp)
 {
        bool begin_dad = false;
 
-       spin_lock_bh(&ifp->state_lock);
+       spin_lock_bh(&ifp->lock);
        if (ifp->state != INET6_IFADDR_STATE_DEAD) {
                ifp->state = INET6_IFADDR_STATE_PREDAD;
                begin_dad = true;
        }
-       spin_unlock_bh(&ifp->state_lock);
+       spin_unlock_bh(&ifp->lock);
 
        if (begin_dad)
                addrconf_mod_dad_work(ifp, 0);
@@ -3315,7 +3500,7 @@ static void addrconf_dad_work(struct work_struct *w)
 
        rtnl_lock();
 
-       spin_lock_bh(&ifp->state_lock);
+       spin_lock_bh(&ifp->lock);
        if (ifp->state == INET6_IFADDR_STATE_PREDAD) {
                action = DAD_BEGIN;
                ifp->state = INET6_IFADDR_STATE_DAD;
@@ -3323,7 +3508,7 @@ static void addrconf_dad_work(struct work_struct *w)
                action = DAD_ABORT;
                ifp->state = INET6_IFADDR_STATE_POSTDAD;
        }
-       spin_unlock_bh(&ifp->state_lock);
+       spin_unlock_bh(&ifp->lock);
 
        if (action == DAD_BEGIN) {
                addrconf_dad_begin(ifp);
@@ -3811,7 +3996,7 @@ inet6_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh)
 
        ifm = nlmsg_data(nlh);
        pfx = extract_addr(tb[IFA_ADDRESS], tb[IFA_LOCAL], &peer_pfx);
-       if (pfx == NULL)
+       if (!pfx)
                return -EINVAL;
 
        ifa_flags = tb[IFA_FLAGS] ? nla_get_u32(tb[IFA_FLAGS]) : ifm->ifa_flags;
@@ -3923,7 +4108,7 @@ inet6_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh)
 
        ifm = nlmsg_data(nlh);
        pfx = extract_addr(tb[IFA_ADDRESS], tb[IFA_LOCAL], &peer_pfx);
-       if (pfx == NULL)
+       if (!pfx)
                return -EINVAL;
 
        if (tb[IFA_CACHEINFO]) {
@@ -3938,17 +4123,17 @@ inet6_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh)
        }
 
        dev =  __dev_get_by_index(net, ifm->ifa_index);
-       if (dev == NULL)
+       if (!dev)
                return -ENODEV;
 
        ifa_flags = tb[IFA_FLAGS] ? nla_get_u32(tb[IFA_FLAGS]) : ifm->ifa_flags;
 
        /* We ignore other flags so far. */
        ifa_flags &= IFA_F_NODAD | IFA_F_HOMEADDRESS | IFA_F_MANAGETEMPADDR |
-                    IFA_F_NOPREFIXROUTE;
+                    IFA_F_NOPREFIXROUTE | IFA_F_MCAUTOJOIN;
 
        ifa = ipv6_get_ifaddr(net, pfx, dev, 1);
-       if (ifa == NULL) {
+       if (!ifa) {
                /*
                 * It would be best to check for !NLM_F_CREATE here but
                 * userspace already relies on not having to provide this.
@@ -4023,7 +4208,7 @@ static int inet6_fill_ifaddr(struct sk_buff *skb, struct inet6_ifaddr *ifa,
        u32 preferred, valid;
 
        nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct ifaddrmsg), flags);
-       if (nlh == NULL)
+       if (!nlh)
                return -EMSGSIZE;
 
        put_ifaddrmsg(nlh, ifa->prefix_len, ifa->flags, rt_scope(ifa->scope),
@@ -4052,11 +4237,11 @@ static int inet6_fill_ifaddr(struct sk_buff *skb, struct inet6_ifaddr *ifa,
        }
 
        if (!ipv6_addr_any(&ifa->peer_addr)) {
-               if (nla_put(skb, IFA_LOCAL, 16, &ifa->addr) < 0 ||
-                   nla_put(skb, IFA_ADDRESS, 16, &ifa->peer_addr) < 0)
+               if (nla_put_in6_addr(skb, IFA_LOCAL, &ifa->addr) < 0 ||
+                   nla_put_in6_addr(skb, IFA_ADDRESS, &ifa->peer_addr) < 0)
                        goto error;
        } else
-               if (nla_put(skb, IFA_ADDRESS, 16, &ifa->addr) < 0)
+               if (nla_put_in6_addr(skb, IFA_ADDRESS, &ifa->addr) < 0)
                        goto error;
 
        if (put_cacheinfo(skb, ifa->cstamp, ifa->tstamp, preferred, valid) < 0)
@@ -4084,11 +4269,11 @@ static int inet6_fill_ifmcaddr(struct sk_buff *skb, struct ifmcaddr6 *ifmca,
                scope = RT_SCOPE_SITE;
 
        nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct ifaddrmsg), flags);
-       if (nlh == NULL)
+       if (!nlh)
                return -EMSGSIZE;
 
        put_ifaddrmsg(nlh, 128, IFA_F_PERMANENT, scope, ifindex);
-       if (nla_put(skb, IFA_MULTICAST, 16, &ifmca->mca_addr) < 0 ||
+       if (nla_put_in6_addr(skb, IFA_MULTICAST, &ifmca->mca_addr) < 0 ||
            put_cacheinfo(skb, ifmca->mca_cstamp, ifmca->mca_tstamp,
                          INFINITY_LIFE_TIME, INFINITY_LIFE_TIME) < 0) {
                nlmsg_cancel(skb, nlh);
@@ -4110,11 +4295,11 @@ static int inet6_fill_ifacaddr(struct sk_buff *skb, struct ifacaddr6 *ifaca,
                scope = RT_SCOPE_SITE;
 
        nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct ifaddrmsg), flags);
-       if (nlh == NULL)
+       if (!nlh)
                return -EMSGSIZE;
 
        put_ifaddrmsg(nlh, 128, IFA_F_PERMANENT, scope, ifindex);
-       if (nla_put(skb, IFA_ANYCAST, 16, &ifaca->aca_addr) < 0 ||
+       if (nla_put_in6_addr(skb, IFA_ANYCAST, &ifaca->aca_addr) < 0 ||
            put_cacheinfo(skb, ifaca->aca_cstamp, ifaca->aca_tstamp,
                          INFINITY_LIFE_TIME, INFINITY_LIFE_TIME) < 0) {
                nlmsg_cancel(skb, nlh);
@@ -4283,7 +4468,7 @@ static int inet6_rtm_getaddr(struct sk_buff *in_skb, struct nlmsghdr *nlh)
                goto errout;
 
        addr = extract_addr(tb[IFA_ADDRESS], tb[IFA_LOCAL], &peer);
-       if (addr == NULL) {
+       if (!addr) {
                err = -EINVAL;
                goto errout;
        }
@@ -4326,7 +4511,7 @@ static void inet6_ifa_notify(int event, struct inet6_ifaddr *ifa)
        int err = -ENOBUFS;
 
        skb = nlmsg_new(inet6_ifaddr_msgsize(), GFP_ATOMIC);
-       if (skb == NULL)
+       if (!skb)
                goto errout;
 
        err = inet6_fill_ifaddr(skb, ifa, 0, 0, event, 0);
@@ -4398,6 +4583,7 @@ static inline void ipv6_store_devconf(struct ipv6_devconf *cnf,
        array[DEVCONF_SUPPRESS_FRAG_NDISC] = cnf->suppress_frag_ndisc;
        array[DEVCONF_ACCEPT_RA_FROM_LOCAL] = cnf->accept_ra_from_local;
        array[DEVCONF_ACCEPT_RA_MTU] = cnf->accept_ra_mtu;
+       /* we omit DEVCONF_STABLE_SECRET for now */
 }
 
 static inline size_t inet6_ifla6_size(void)
@@ -4478,24 +4664,24 @@ static int inet6_fill_ifla6_attrs(struct sk_buff *skb, struct inet6_dev *idev)
        if (nla_put(skb, IFLA_INET6_CACHEINFO, sizeof(ci), &ci))
                goto nla_put_failure;
        nla = nla_reserve(skb, IFLA_INET6_CONF, DEVCONF_MAX * sizeof(s32));
-       if (nla == NULL)
+       if (!nla)
                goto nla_put_failure;
        ipv6_store_devconf(&idev->cnf, nla_data(nla), nla_len(nla));
 
        /* XXX - MC not implemented */
 
        nla = nla_reserve(skb, IFLA_INET6_STATS, IPSTATS_MIB_MAX * sizeof(u64));
-       if (nla == NULL)
+       if (!nla)
                goto nla_put_failure;
        snmp6_fill_stats(nla_data(nla), idev, IFLA_INET6_STATS, nla_len(nla));
 
        nla = nla_reserve(skb, IFLA_INET6_ICMP6STATS, ICMP6_MIB_MAX * sizeof(u64));
-       if (nla == NULL)
+       if (!nla)
                goto nla_put_failure;
        snmp6_fill_stats(nla_data(nla), idev, IFLA_INET6_ICMP6STATS, nla_len(nla));
 
        nla = nla_reserve(skb, IFLA_INET6_TOKEN, sizeof(struct in6_addr));
-       if (nla == NULL)
+       if (!nla)
                goto nla_put_failure;
 
        if (nla_put_u8(skb, IFLA_INET6_ADDR_GEN_MODE, idev->addr_gen_mode))
@@ -4541,7 +4727,7 @@ static int inet6_set_iftoken(struct inet6_dev *idev, struct in6_addr *token)
 
        ASSERT_RTNL();
 
-       if (token == NULL)
+       if (!token)
                return -EINVAL;
        if (ipv6_addr_any(token))
                return -EINVAL;
@@ -4632,8 +4818,15 @@ static int inet6_set_link_af(struct net_device *dev, const struct nlattr *nla)
                u8 mode = nla_get_u8(tb[IFLA_INET6_ADDR_GEN_MODE]);
 
                if (mode != IN6_ADDR_GEN_MODE_EUI64 &&
-                   mode != IN6_ADDR_GEN_MODE_NONE)
+                   mode != IN6_ADDR_GEN_MODE_NONE &&
+                   mode != IN6_ADDR_GEN_MODE_STABLE_PRIVACY)
                        return -EINVAL;
+
+               if (mode == IN6_ADDR_GEN_MODE_STABLE_PRIVACY &&
+                   !idev->cnf.stable_secret.initialized &&
+                   !dev_net(dev)->ipv6.devconf_dflt->stable_secret.initialized)
+                       return -EINVAL;
+
                idev->addr_gen_mode = mode;
                err = 0;
        }
@@ -4650,7 +4843,7 @@ static int inet6_fill_ifinfo(struct sk_buff *skb, struct inet6_dev *idev,
        void *protoinfo;
 
        nlh = nlmsg_put(skb, portid, seq, event, sizeof(*hdr), flags);
-       if (nlh == NULL)
+       if (!nlh)
                return -EMSGSIZE;
 
        hdr = nlmsg_data(nlh);
@@ -4665,11 +4858,11 @@ static int inet6_fill_ifinfo(struct sk_buff *skb, struct inet6_dev *idev,
            (dev->addr_len &&
             nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr)) ||
            nla_put_u32(skb, IFLA_MTU, dev->mtu) ||
-           (dev->ifindex != dev->iflink &&
-            nla_put_u32(skb, IFLA_LINK, dev->iflink)))
+           (dev->ifindex != dev_get_iflink(dev) &&
+            nla_put_u32(skb, IFLA_LINK, dev_get_iflink(dev))))
                goto nla_put_failure;
        protoinfo = nla_nest_start(skb, IFLA_PROTINFO);
-       if (protoinfo == NULL)
+       if (!protoinfo)
                goto nla_put_failure;
 
        if (inet6_fill_ifla6_attrs(skb, idev) < 0)
@@ -4730,7 +4923,7 @@ void inet6_ifinfo_notify(int event, struct inet6_dev *idev)
        int err = -ENOBUFS;
 
        skb = nlmsg_new(inet6_if_nlmsg_size(), GFP_ATOMIC);
-       if (skb == NULL)
+       if (!skb)
                goto errout;
 
        err = inet6_fill_ifinfo(skb, idev, 0, 0, event, 0);
@@ -4763,7 +4956,7 @@ static int inet6_fill_prefix(struct sk_buff *skb, struct inet6_dev *idev,
        struct prefix_cacheinfo ci;
 
        nlh = nlmsg_put(skb, portid, seq, event, sizeof(*pmsg), flags);
-       if (nlh == NULL)
+       if (!nlh)
                return -EMSGSIZE;
 
        pmsg = nlmsg_data(nlh);
@@ -4802,7 +4995,7 @@ static void inet6_prefix_notify(int event, struct inet6_dev *idev,
        int err = -ENOBUFS;
 
        skb = nlmsg_new(inet6_prefix_nlmsg_size(), GFP_ATOMIC);
-       if (skb == NULL)
+       if (!skb)
                goto errout;
 
        err = inet6_fill_prefix(skb, idev, pinfo, 0, 0, event, 0);
@@ -5042,6 +5235,74 @@ int addrconf_sysctl_proxy_ndp(struct ctl_table *ctl, int write,
        return ret;
 }
 
+static int addrconf_sysctl_stable_secret(struct ctl_table *ctl, int write,
+                                        void __user *buffer, size_t *lenp,
+                                        loff_t *ppos)
+{
+       int err;
+       struct in6_addr addr;
+       char str[IPV6_MAX_STRLEN];
+       struct ctl_table lctl = *ctl;
+       struct net *net = ctl->extra2;
+       struct ipv6_stable_secret *secret = ctl->data;
+
+       if (&net->ipv6.devconf_all->stable_secret == ctl->data)
+               return -EIO;
+
+       lctl.maxlen = IPV6_MAX_STRLEN;
+       lctl.data = str;
+
+       if (!rtnl_trylock())
+               return restart_syscall();
+
+       if (!write && !secret->initialized) {
+               err = -EIO;
+               goto out;
+       }
+
+       if (!write) {
+               err = snprintf(str, sizeof(str), "%pI6",
+                              &secret->secret);
+               if (err >= sizeof(str)) {
+                       err = -EIO;
+                       goto out;
+               }
+       }
+
+       err = proc_dostring(&lctl, write, buffer, lenp, ppos);
+       if (err || !write)
+               goto out;
+
+       if (in6_pton(str, -1, addr.in6_u.u6_addr8, -1, NULL) != 1) {
+               err = -EIO;
+               goto out;
+       }
+
+       secret->initialized = true;
+       secret->secret = addr;
+
+       if (&net->ipv6.devconf_dflt->stable_secret == ctl->data) {
+               struct net_device *dev;
+
+               for_each_netdev(net, dev) {
+                       struct inet6_dev *idev = __in6_dev_get(dev);
+
+                       if (idev) {
+                               idev->addr_gen_mode =
+                                       IN6_ADDR_GEN_MODE_STABLE_PRIVACY;
+                       }
+               }
+       } else {
+               struct inet6_dev *idev = ctl->extra1;
+
+               idev->addr_gen_mode = IN6_ADDR_GEN_MODE_STABLE_PRIVACY;
+       }
+
+out:
+       rtnl_unlock();
+
+       return err;
+}
 
 static struct addrconf_sysctl_table
 {
@@ -5314,6 +5575,13 @@ static struct addrconf_sysctl_table
                        .mode           = 0644,
                        .proc_handler   = proc_dointvec,
                },
+               {
+                       .procname       = "stable_secret",
+                       .data           = &ipv6_devconf.stable_secret,
+                       .maxlen         = IPV6_MAX_STRLEN,
+                       .mode           = 0600,
+                       .proc_handler   = addrconf_sysctl_stable_secret,
+               },
                {
                        /* sentinel */
                }
@@ -5328,7 +5596,7 @@ static int __addrconf_sysctl_register(struct net *net, char *dev_name,
        char path[sizeof("net/ipv6/conf/") + IFNAMSIZ];
 
        t = kmemdup(&addrconf_sysctl, sizeof(*t), GFP_KERNEL);
-       if (t == NULL)
+       if (!t)
                goto out;
 
        for (i = 0; t->addrconf_vars[i].data; i++) {
@@ -5340,7 +5608,7 @@ static int __addrconf_sysctl_register(struct net *net, char *dev_name,
        snprintf(path, sizeof(path), "net/ipv6/conf/%s", dev_name);
 
        t->sysctl_header = register_net_sysctl(net, path, t->addrconf_vars);
-       if (t->sysctl_header == NULL)
+       if (!t->sysctl_header)
                goto free;
 
        p->sysctl = t;
@@ -5356,7 +5624,7 @@ static void __addrconf_sysctl_unregister(struct ipv6_devconf *p)
 {
        struct addrconf_sysctl_table *t;
 
-       if (p->sysctl == NULL)
+       if (!p->sysctl)
                return;
 
        t = p->sysctl;
@@ -5399,17 +5667,20 @@ static int __net_init addrconf_init_net(struct net *net)
        struct ipv6_devconf *all, *dflt;
 
        all = kmemdup(&ipv6_devconf, sizeof(ipv6_devconf), GFP_KERNEL);
-       if (all == NULL)
+       if (!all)
                goto err_alloc_all;
 
        dflt = kmemdup(&ipv6_devconf_dflt, sizeof(ipv6_devconf_dflt), GFP_KERNEL);
-       if (dflt == NULL)
+       if (!dflt)
                goto err_alloc_dflt;
 
        /* these will be inherited by all namespaces */
        dflt->autoconf = ipv6_defaults.autoconf;
        dflt->disable_ipv6 = ipv6_defaults.disable_ipv6;
 
+       dflt->stable_secret.initialized = false;
+       all->stable_secret.initialized = false;
+
        net->ipv6.devconf_all = all;
        net->ipv6.devconf_dflt = dflt;
 
index 98cc4cd570e23b96dc479956afe7bb6660a8a6c6..d873ceea86e6c74c34e7fcd31bec41c78ce5720b 100644 (file)
@@ -140,7 +140,7 @@ void in6_dev_finish_destroy(struct inet6_dev *idev)
        struct net_device *dev = idev->dev;
 
        WARN_ON(!list_empty(&idev->addr_list));
-       WARN_ON(idev->mc_list != NULL);
+       WARN_ON(idev->mc_list);
        WARN_ON(timer_pending(&idev->rs_timer));
 
 #ifdef NET_REFCNT_DEBUG
index e43e79d0a6124caa06cd2e2c09203ad2cd032c9e..882124ebb438bd033765c4284ddf7ada9834d39d 100644 (file)
@@ -29,9 +29,7 @@
  * Policy Table
  */
 struct ip6addrlbl_entry {
-#ifdef CONFIG_NET_NS
-       struct net *lbl_net;
-#endif
+       possible_net_t lbl_net;
        struct in6_addr prefix;
        int prefixlen;
        int ifindex;
@@ -129,9 +127,6 @@ static const __net_initconst struct ip6addrlbl_init_table
 /* Object management */
 static inline void ip6addrlbl_free(struct ip6addrlbl_entry *p)
 {
-#ifdef CONFIG_NET_NS
-       release_net(p->lbl_net);
-#endif
        kfree(p);
 }
 
@@ -240,9 +235,7 @@ static struct ip6addrlbl_entry *ip6addrlbl_alloc(struct net *net,
        newp->addrtype = addrtype;
        newp->label = label;
        INIT_HLIST_NODE(&newp->list);
-#ifdef CONFIG_NET_NS
-       newp->lbl_net = hold_net(net);
-#endif
+       write_pnet(&newp->lbl_net, net);
        atomic_set(&newp->refcnt, 1);
        return newp;
 }
@@ -484,7 +477,7 @@ static int ip6addrlbl_fill(struct sk_buff *skb,
 
        ip6addrlbl_putmsg(nlh, p->prefixlen, p->ifindex, lseq);
 
-       if (nla_put(skb, IFAL_ADDRESS, 16, &p->prefix) < 0 ||
+       if (nla_put_in6_addr(skb, IFAL_ADDRESS, &p->prefix) < 0 ||
            nla_put_u32(skb, IFAL_LABEL, p->label) < 0) {
                nlmsg_cancel(skb, nlh);
                return -EMSGSIZE;
index e8c4400f23e9b4afe47fcc45bb761d82354fb6a6..eef63b394c5ab9ae2e2b5c060a9960f62d671a3a 100644 (file)
@@ -164,11 +164,11 @@ lookup_protocol:
        answer_flags = answer->flags;
        rcu_read_unlock();
 
-       WARN_ON(answer_prot->slab == NULL);
+       WARN_ON(!answer_prot->slab);
 
        err = -ENOBUFS;
        sk = sk_alloc(net, PF_INET6, GFP_KERNEL, answer_prot);
-       if (sk == NULL)
+       if (!sk)
                goto out;
 
        sock_init_data(sock, sk);
@@ -391,7 +391,7 @@ int inet6_release(struct socket *sock)
 {
        struct sock *sk = sock->sk;
 
-       if (sk == NULL)
+       if (!sk)
                return -EINVAL;
 
        /* Free mc lists */
@@ -413,11 +413,11 @@ void inet6_destroy_sock(struct sock *sk)
        /* Release rx options */
 
        skb = xchg(&np->pktoptions, NULL);
-       if (skb != NULL)
+       if (skb)
                kfree_skb(skb);
 
        skb = xchg(&np->rxpmtu, NULL);
-       if (skb != NULL)
+       if (skb)
                kfree_skb(skb);
 
        /* Free flowlabels */
@@ -426,7 +426,7 @@ void inet6_destroy_sock(struct sock *sk)
        /* Free tx options */
 
        opt = xchg(&np->opt, NULL);
-       if (opt != NULL)
+       if (opt)
                sock_kfree_s(sk, opt, opt->tot_len);
 }
 EXPORT_SYMBOL_GPL(inet6_destroy_sock);
@@ -640,7 +640,7 @@ int inet6_sk_rebuild_header(struct sock *sk)
 
        dst = __sk_dst_check(sk, np->dst_cookie);
 
-       if (dst == NULL) {
+       if (!dst) {
                struct inet_sock *inet = inet_sk(sk);
                struct in6_addr *final_p, final;
                struct flowi6 fl6;
@@ -766,6 +766,8 @@ static int __net_init inet6_net_init(struct net *net)
        net->ipv6.sysctl.icmpv6_time = 1*HZ;
        net->ipv6.sysctl.flowlabel_consistency = 1;
        net->ipv6.sysctl.auto_flowlabels = 0;
+       net->ipv6.sysctl.idgen_retries = 3;
+       net->ipv6.sysctl.idgen_delay = 1 * HZ;
        atomic_set(&net->ipv6.fib6_sernum, 1);
 
        err = ipv6_init_mibs(net);
@@ -824,7 +826,7 @@ static int __init inet6_init(void)
        struct list_head *r;
        int err = 0;
 
-       BUILD_BUG_ON(sizeof(struct inet6_skb_parm) > FIELD_SIZEOF(struct sk_buff, cb));
+       sock_skb_cb_check_size(sizeof(struct inet6_skb_parm));
 
        /* Register the socket-side information for inet6_create.  */
        for (r = &inetsw6[0]; r < &inetsw6[SOCK_MAX]; ++r)
index a6727add26240e9980ce8da620a6e5bf5cddaf8d..ed7d4e3f9c108961fed9ae3144328cc797fed3f9 100644 (file)
@@ -681,7 +681,7 @@ static int ah6_init_state(struct xfrm_state *x)
                goto error;
 
        ahp = kzalloc(sizeof(*ahp), GFP_KERNEL);
-       if (ahp == NULL)
+       if (!ahp)
                return -ENOMEM;
 
        ahash = crypto_alloc_ahash(x->aalg->alg_name, 0, 0);
index baf2742d1ec47ee1b9cf62d087afb8b7d7195921..514ac259f5433a43fc3cbfcaeb45847122594e79 100644 (file)
@@ -60,6 +60,8 @@ int ipv6_sock_ac_join(struct sock *sk, int ifindex, const struct in6_addr *addr)
        int     ishost = !net->ipv6.devconf_all->forwarding;
        int     err = 0;
 
+       ASSERT_RTNL();
+
        if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
                return -EPERM;
        if (ipv6_addr_is_multicast(addr))
@@ -68,12 +70,11 @@ int ipv6_sock_ac_join(struct sock *sk, int ifindex, const struct in6_addr *addr)
                return -EINVAL;
 
        pac = sock_kmalloc(sk, sizeof(struct ipv6_ac_socklist), GFP_KERNEL);
-       if (pac == NULL)
+       if (!pac)
                return -ENOMEM;
        pac->acl_next = NULL;
        pac->acl_addr = *addr;
 
-       rtnl_lock();
        if (ifindex == 0) {
                struct rt6_info *rt;
 
@@ -92,7 +93,7 @@ int ipv6_sock_ac_join(struct sock *sk, int ifindex, const struct in6_addr *addr)
        } else
                dev = __dev_get_by_index(net, ifindex);
 
-       if (dev == NULL) {
+       if (!dev) {
                err = -ENODEV;
                goto error;
        }
@@ -130,7 +131,6 @@ int ipv6_sock_ac_join(struct sock *sk, int ifindex, const struct in6_addr *addr)
        }
 
 error:
-       rtnl_unlock();
        if (pac)
                sock_kfree_s(sk, pac, sizeof(*pac));
        return err;
@@ -146,7 +146,8 @@ int ipv6_sock_ac_drop(struct sock *sk, int ifindex, const struct in6_addr *addr)
        struct ipv6_ac_socklist *pac, *prev_pac;
        struct net *net = sock_net(sk);
 
-       rtnl_lock();
+       ASSERT_RTNL();
+
        prev_pac = NULL;
        for (pac = np->ipv6_ac_list; pac; pac = pac->acl_next) {
                if ((ifindex == 0 || pac->acl_ifindex == ifindex) &&
@@ -154,10 +155,8 @@ int ipv6_sock_ac_drop(struct sock *sk, int ifindex, const struct in6_addr *addr)
                        break;
                prev_pac = pac;
        }
-       if (!pac) {
-               rtnl_unlock();
+       if (!pac)
                return -ENOENT;
-       }
        if (prev_pac)
                prev_pac->acl_next = pac->acl_next;
        else
@@ -166,7 +165,6 @@ int ipv6_sock_ac_drop(struct sock *sk, int ifindex, const struct in6_addr *addr)
        dev = __dev_get_by_index(net, pac->acl_ifindex);
        if (dev)
                ipv6_dev_ac_dec(dev, &pac->acl_addr);
-       rtnl_unlock();
 
        sock_kfree_s(sk, pac, sizeof(*pac));
        return 0;
@@ -224,7 +222,7 @@ static struct ifacaddr6 *aca_alloc(struct rt6_info *rt,
        struct ifacaddr6 *aca;
 
        aca = kzalloc(sizeof(*aca), GFP_ATOMIC);
-       if (aca == NULL)
+       if (!aca)
                return NULL;
 
        aca->aca_addr = *addr;
@@ -270,7 +268,7 @@ int __ipv6_dev_ac_inc(struct inet6_dev *idev, const struct in6_addr *addr)
                goto out;
        }
        aca = aca_alloc(rt, addr);
-       if (aca == NULL) {
+       if (!aca) {
                ip6_rt_put(rt);
                err = -ENOMEM;
                goto out;
@@ -339,7 +337,7 @@ static int ipv6_dev_ac_dec(struct net_device *dev, const struct in6_addr *addr)
 {
        struct inet6_dev *idev = __in6_dev_get(dev);
 
-       if (idev == NULL)
+       if (!idev)
                return -ENODEV;
        return __ipv6_dev_ac_dec(idev, addr);
 }
index ace8daca5c8361ad37073a4eeb0f8d55c622d807..762a58c772b81f31841ebe9ae6e14deed57337f0 100644 (file)
@@ -71,7 +71,7 @@ int ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
                fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
                if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
                        flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
-                       if (flowlabel == NULL)
+                       if (!flowlabel)
                                return -EINVAL;
                }
        }
@@ -373,7 +373,7 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
 
        err = -EAGAIN;
        skb = sock_dequeue_err_skb(sk);
-       if (skb == NULL)
+       if (!skb)
                goto out;
 
        copied = skb->len;
@@ -463,7 +463,7 @@ int ipv6_recv_rxpmtu(struct sock *sk, struct msghdr *msg, int len,
 
        err = -EAGAIN;
        skb = xchg(&np->rxpmtu, NULL);
-       if (skb == NULL)
+       if (!skb)
                goto out;
 
        copied = skb->len;
index e48f2c7c5c596b0a364636b7ed6d6e693e9e925f..31f1b5d5e2ef8f7056eb8eddd513ba5b3343e2b1 100644 (file)
@@ -495,7 +495,7 @@ static int esp_init_authenc(struct xfrm_state *x)
        int err;
 
        err = -EINVAL;
-       if (x->ealg == NULL)
+       if (!x->ealg)
                goto error;
 
        err = -ENAMETOOLONG;
index 8af3eb57f4380fd7de7497ff98f40c88f2040e50..5c5d23e59da598995ff962d069d1e7b6886e31d6 100644 (file)
@@ -82,7 +82,7 @@ int ipv6_skip_exthdr(const struct sk_buff *skb, int start, u8 *nexthdrp,
                if (nexthdr == NEXTHDR_NONE)
                        return -1;
                hp = skb_header_pointer(skb, start, sizeof(_hdr), &_hdr);
-               if (hp == NULL)
+               if (!hp)
                        return -1;
                if (nexthdr == NEXTHDR_FRAGMENT) {
                        __be16 _frag_off, *fp;
@@ -91,7 +91,7 @@ int ipv6_skip_exthdr(const struct sk_buff *skb, int start, u8 *nexthdrp,
                                                               frag_off),
                                                sizeof(_frag_off),
                                                &_frag_off);
-                       if (fp == NULL)
+                       if (!fp)
                                return -1;
 
                        *frag_offp = *fp;
@@ -218,7 +218,7 @@ int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset,
                }
 
                hp = skb_header_pointer(skb, start, sizeof(_hdr), &_hdr);
-               if (hp == NULL)
+               if (!hp)
                        return -EBADMSG;
 
                if (nexthdr == NEXTHDR_ROUTING) {
@@ -226,7 +226,7 @@ int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset,
 
                        rh = skb_header_pointer(skb, start, sizeof(_rh),
                                                &_rh);
-                       if (rh == NULL)
+                       if (!rh)
                                return -EBADMSG;
 
                        if (flags && (*flags & IP6_FH_F_SKIP_RH) &&
@@ -245,7 +245,7 @@ int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset,
                                                               frag_off),
                                                sizeof(_frag_off),
                                                &_frag_off);
-                       if (fp == NULL)
+                       if (!fp)
                                return -EBADMSG;
 
                        _frag_off = ntohs(*fp) & ~0x7;
index 27ca79682efbf681a0ab6073f50f8fa73214028e..2367a16eae58a31e01aa0d1d676090b688102593 100644 (file)
@@ -199,12 +199,10 @@ static int fib6_rule_configure(struct fib_rule *rule, struct sk_buff *skb,
        }
 
        if (frh->src_len)
-               nla_memcpy(&rule6->src.addr, tb[FRA_SRC],
-                          sizeof(struct in6_addr));
+               rule6->src.addr = nla_get_in6_addr(tb[FRA_SRC]);
 
        if (frh->dst_len)
-               nla_memcpy(&rule6->dst.addr, tb[FRA_DST],
-                          sizeof(struct in6_addr));
+               rule6->dst.addr = nla_get_in6_addr(tb[FRA_DST]);
 
        rule6->src.plen = frh->src_len;
        rule6->dst.plen = frh->dst_len;
@@ -250,11 +248,9 @@ static int fib6_rule_fill(struct fib_rule *rule, struct sk_buff *skb,
        frh->tos = rule6->tclass;
 
        if ((rule6->dst.plen &&
-            nla_put(skb, FRA_DST, sizeof(struct in6_addr),
-                    &rule6->dst.addr)) ||
+            nla_put_in6_addr(skb, FRA_DST, &rule6->dst.addr)) ||
            (rule6->src.plen &&
-            nla_put(skb, FRA_SRC, sizeof(struct in6_addr),
-                    &rule6->src.addr)))
+            nla_put_in6_addr(skb, FRA_SRC, &rule6->src.addr)))
                goto nla_put_failure;
        return 0;
 
@@ -299,19 +295,16 @@ static int __net_init fib6_rules_net_init(struct net *net)
        ops = fib_rules_register(&fib6_rules_ops_template, net);
        if (IS_ERR(ops))
                return PTR_ERR(ops);
-       net->ipv6.fib6_rules_ops = ops;
-
 
-       err = fib_default_rule_add(net->ipv6.fib6_rules_ops, 0,
-                                  RT6_TABLE_LOCAL, 0);
+       err = fib_default_rule_add(ops, 0, RT6_TABLE_LOCAL, 0);
        if (err)
                goto out_fib6_rules_ops;
 
-       err = fib_default_rule_add(net->ipv6.fib6_rules_ops,
-                                  0x7FFE, RT6_TABLE_MAIN, 0);
+       err = fib_default_rule_add(ops, 0x7FFE, RT6_TABLE_MAIN, 0);
        if (err)
                goto out_fib6_rules_ops;
 
+       net->ipv6.fib6_rules_ops = ops;
 out:
        return err;
 
@@ -322,7 +315,9 @@ out_fib6_rules_ops:
 
 static void __net_exit fib6_rules_net_exit(struct net *net)
 {
+       rtnl_lock();
        fib_rules_unregister(net->ipv6.fib6_rules_ops);
+       rtnl_unlock();
 }
 
 static struct pernet_operations fib6_rules_net_ops = {
index a5e95199585ecb55bf63bbf02d2d34cacfbcca48..2c2b5d51f15cd24b7c1a5b9a416bbfd33201ec06 100644 (file)
@@ -160,8 +160,7 @@ static bool is_ineligible(const struct sk_buff *skb)
                tp = skb_header_pointer(skb,
                        ptr+offsetof(struct icmp6hdr, icmp6_type),
                        sizeof(_type), &_type);
-               if (tp == NULL ||
-                   !(*tp & ICMPV6_INFOMSG_MASK))
+               if (!tp || !(*tp & ICMPV6_INFOMSG_MASK))
                        return true;
        }
        return false;
@@ -231,7 +230,7 @@ static bool opt_unrec(struct sk_buff *skb, __u32 offset)
 
        offset += skb_network_offset(skb);
        op = skb_header_pointer(skb, offset, sizeof(_optval), &_optval);
-       if (op == NULL)
+       if (!op)
                return true;
        return (*op & 0xC0) == 0x80;
 }
@@ -244,7 +243,7 @@ int icmpv6_push_pending_frames(struct sock *sk, struct flowi6 *fl6,
        int err = 0;
 
        skb = skb_peek(&sk->sk_write_queue);
-       if (skb == NULL)
+       if (!skb)
                goto out;
 
        icmp6h = icmp6_hdr(skb);
@@ -479,7 +478,7 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info)
        security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
 
        sk = icmpv6_xmit_lock(net);
-       if (sk == NULL)
+       if (!sk)
                return;
        sk->sk_mark = mark;
        np = inet6_sk(sk);
@@ -582,7 +581,7 @@ static void icmpv6_echo_reply(struct sk_buff *skb)
        security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
 
        sk = icmpv6_xmit_lock(net);
-       if (sk == NULL)
+       if (!sk)
                return;
        sk->sk_mark = mark;
        np = inet6_sk(sk);
@@ -839,7 +838,7 @@ static int __net_init icmpv6_sk_init(struct net *net)
 
        net->ipv6.icmp_sk =
                kzalloc(nr_cpu_ids * sizeof(struct sock *), GFP_KERNEL);
-       if (net->ipv6.icmp_sk == NULL)
+       if (!net->ipv6.icmp_sk)
                return -ENOMEM;
 
        for_each_possible_cpu(i) {
index 29b32206e49488e1155900adfcd1707ea909855e..6927f3fb5597fd2013b885cddb35bed852b950d5 100644 (file)
@@ -112,22 +112,20 @@ static u32 inet6_synq_hash(const struct in6_addr *raddr, const __be16 rport,
        return c & (synq_hsize - 1);
 }
 
-struct request_sock *inet6_csk_search_req(const struct sock *sk,
-                                         struct request_sock ***prevp,
+struct request_sock *inet6_csk_search_req(struct sock *sk,
                                          const __be16 rport,
                                          const struct in6_addr *raddr,
                                          const struct in6_addr *laddr,
                                          const int iif)
 {
-       const struct inet_connection_sock *icsk = inet_csk(sk);
+       struct inet_connection_sock *icsk = inet_csk(sk);
        struct listen_sock *lopt = icsk->icsk_accept_queue.listen_opt;
-       struct request_sock *req, **prev;
+       struct request_sock *req;
+       u32 hash = inet6_synq_hash(raddr, rport, lopt->hash_rnd,
+                                  lopt->nr_table_entries);
 
-       for (prev = &lopt->syn_table[inet6_synq_hash(raddr, rport,
-                                                    lopt->hash_rnd,
-                                                    lopt->nr_table_entries)];
-            (req = *prev) != NULL;
-            prev = &req->dl_next) {
+       spin_lock(&icsk->icsk_accept_queue.syn_wait_lock);
+       for (req = lopt->syn_table[hash]; req != NULL; req = req->dl_next) {
                const struct inet_request_sock *ireq = inet_rsk(req);
 
                if (ireq->ir_rmt_port == rport &&
@@ -135,13 +133,14 @@ struct request_sock *inet6_csk_search_req(const struct sock *sk,
                    ipv6_addr_equal(&ireq->ir_v6_rmt_addr, raddr) &&
                    ipv6_addr_equal(&ireq->ir_v6_loc_addr, laddr) &&
                    (!ireq->ir_iif || ireq->ir_iif == iif)) {
+                       atomic_inc(&req->rsk_refcnt);
                        WARN_ON(req->sk != NULL);
-                       *prevp = prev;
-                       return req;
+                       break;
                }
        }
+       spin_unlock(&icsk->icsk_accept_queue.syn_wait_lock);
 
-       return NULL;
+       return req;
 }
 EXPORT_SYMBOL_GPL(inet6_csk_search_req);
 
index 051dffb49c90e979ca6cc58dd1c2cd2fe9b1cc4c..033f17816ef4cf482d40eb496129a7d832c0a251 100644 (file)
 #include <net/secure_seq.h>
 #include <net/ip.h>
 
-static unsigned int inet6_ehashfn(struct net *net,
-                                 const struct in6_addr *laddr,
-                                 const u16 lport,
-                                 const struct in6_addr *faddr,
-                                 const __be16 fport)
+u32 inet6_ehashfn(const struct net *net,
+                 const struct in6_addr *laddr, const u16 lport,
+                 const struct in6_addr *faddr, const __be16 fport)
 {
        static u32 inet6_ehash_secret __read_mostly;
        static u32 ipv6_hash_secret __read_mostly;
@@ -44,54 +42,6 @@ static unsigned int inet6_ehashfn(struct net *net,
                               inet6_ehash_secret + net_hash_mix(net));
 }
 
-static int inet6_sk_ehashfn(const struct sock *sk)
-{
-       const struct inet_sock *inet = inet_sk(sk);
-       const struct in6_addr *laddr = &sk->sk_v6_rcv_saddr;
-       const struct in6_addr *faddr = &sk->sk_v6_daddr;
-       const __u16 lport = inet->inet_num;
-       const __be16 fport = inet->inet_dport;
-       struct net *net = sock_net(sk);
-
-       return inet6_ehashfn(net, laddr, lport, faddr, fport);
-}
-
-int __inet6_hash(struct sock *sk, struct inet_timewait_sock *tw)
-{
-       struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
-       int twrefcnt = 0;
-
-       WARN_ON(!sk_unhashed(sk));
-
-       if (sk->sk_state == TCP_LISTEN) {
-               struct inet_listen_hashbucket *ilb;
-
-               ilb = &hashinfo->listening_hash[inet_sk_listen_hashfn(sk)];
-               spin_lock(&ilb->lock);
-               __sk_nulls_add_node_rcu(sk, &ilb->head);
-               spin_unlock(&ilb->lock);
-       } else {
-               unsigned int hash;
-               struct hlist_nulls_head *list;
-               spinlock_t *lock;
-
-               sk->sk_hash = hash = inet6_sk_ehashfn(sk);
-               list = &inet_ehash_bucket(hashinfo, hash)->chain;
-               lock = inet_ehash_lockp(hashinfo, hash);
-               spin_lock(lock);
-               __sk_nulls_add_node_rcu(sk, list);
-               if (tw) {
-                       WARN_ON(sk->sk_hash != tw->tw_hash);
-                       twrefcnt = inet_twsk_unhash(tw);
-               }
-               spin_unlock(lock);
-       }
-
-       sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
-       return twrefcnt;
-}
-EXPORT_SYMBOL(__inet6_hash);
-
 /*
  * Sockets in TCP_CLOSE state are _always_ taken out of the hash, so
  * we need not check it for TCP lookups anymore, thanks Alexey. -DaveM
@@ -320,6 +270,6 @@ int inet6_hash_connect(struct inet_timewait_death_row *death_row,
                       struct sock *sk)
 {
        return __inet_hash_connect(death_row, sk, inet6_sk_port_offset(sk),
-                       __inet6_check_established, __inet6_hash);
+                                  __inet6_check_established);
 }
 EXPORT_SYMBOL_GPL(inet6_hash_connect);
index 263ef4143bff8fec124ab3087dde78164b5684f3..96dbffff5a2400bfca0a7b0bee9072d76ec92e88 100644 (file)
@@ -1206,7 +1206,7 @@ static struct fib6_node *fib6_repair_tree(struct net *net,
 
                WARN_ON(fn->fn_flags & RTN_RTINFO);
                WARN_ON(fn->fn_flags & RTN_TL_ROOT);
-               WARN_ON(fn->leaf != NULL);
+               WARN_ON(fn->leaf);
 
                children = 0;
                child = NULL;
@@ -1361,7 +1361,7 @@ int fib6_del(struct rt6_info *rt, struct nl_info *info)
 
 #if RT6_DEBUG >= 2
        if (rt->dst.obsolete > 0) {
-               WARN_ON(fn != NULL);
+               WARN_ON(fn);
                return -ENOENT;
        }
 #endif
index f45d6db50a454727367d2fc2450fd6f0b1dfb923..d491125011c4d1c47fd92180efb1cf2e22a85e22 100644 (file)
@@ -100,7 +100,6 @@ static void fl_free(struct ip6_flowlabel *fl)
        if (fl) {
                if (fl->share == IPV6_FL_S_PROCESS)
                        put_pid(fl->owner.pid);
-               release_net(fl->fl_net);
                kfree(fl->opt);
                kfree_rcu(fl, rcu);
        }
@@ -206,7 +205,7 @@ static struct ip6_flowlabel *fl_intern(struct net *net,
                        fl->label = htonl(prandom_u32())&IPV6_FLOWLABEL_MASK;
                        if (fl->label) {
                                lfl = __fl_lookup(net, fl->label);
-                               if (lfl == NULL)
+                               if (!lfl)
                                        break;
                        }
                }
@@ -220,7 +219,7 @@ static struct ip6_flowlabel *fl_intern(struct net *net,
                 * with the same label can only appear on another sock
                 */
                lfl = __fl_lookup(net, fl->label);
-               if (lfl != NULL) {
+               if (lfl) {
                        atomic_inc(&lfl->users);
                        spin_unlock_bh(&ip6_fl_lock);
                        return lfl;
@@ -298,10 +297,10 @@ struct ipv6_txoptions *fl6_merge_options(struct ipv6_txoptions *opt_space,
 {
        struct ipv6_txoptions *fl_opt = fl->opt;
 
-       if (fopt == NULL || fopt->opt_flen == 0)
+       if (!fopt || fopt->opt_flen == 0)
                return fl_opt;
 
-       if (fl_opt != NULL) {
+       if (fl_opt) {
                opt_space->hopopt = fl_opt->hopopt;
                opt_space->dst0opt = fl_opt->dst0opt;
                opt_space->srcrt = fl_opt->srcrt;
@@ -367,7 +366,7 @@ fl_create(struct net *net, struct sock *sk, struct in6_flowlabel_req *freq,
 
        err = -ENOMEM;
        fl = kzalloc(sizeof(*fl), GFP_KERNEL);
-       if (fl == NULL)
+       if (!fl)
                goto done;
 
        if (olen > 0) {
@@ -377,7 +376,7 @@ fl_create(struct net *net, struct sock *sk, struct in6_flowlabel_req *freq,
 
                err = -ENOMEM;
                fl->opt = kmalloc(sizeof(*fl->opt) + olen, GFP_KERNEL);
-               if (fl->opt == NULL)
+               if (!fl->opt)
                        goto done;
 
                memset(fl->opt, 0, sizeof(*fl->opt));
@@ -403,7 +402,7 @@ fl_create(struct net *net, struct sock *sk, struct in6_flowlabel_req *freq,
                }
        }
 
-       fl->fl_net = hold_net(net);
+       fl->fl_net = net;
        fl->expires = jiffies;
        err = fl6_renew(fl, freq->flr_linger, freq->flr_expires);
        if (err)
@@ -597,7 +596,7 @@ int ipv6_flowlabel_opt(struct sock *sk, char __user *optval, int optlen)
                        return -EINVAL;
 
                fl = fl_create(net, sk, &freq, optval, optlen, &err);
-               if (fl == NULL)
+               if (!fl)
                        return err;
                sfl1 = kmalloc(sizeof(*sfl1), GFP_KERNEL);
 
@@ -617,7 +616,7 @@ int ipv6_flowlabel_opt(struct sock *sk, char __user *optval, int optlen)
                        }
                        rcu_read_unlock_bh();
 
-                       if (fl1 == NULL)
+                       if (!fl1)
                                fl1 = fl_lookup(net, freq.flr_label);
                        if (fl1) {
 recheck:
@@ -634,7 +633,7 @@ recheck:
                                        goto release;
 
                                err = -ENOMEM;
-                               if (sfl1 == NULL)
+                               if (!sfl1)
                                        goto release;
                                if (fl->linger > fl1->linger)
                                        fl1->linger = fl->linger;
@@ -654,7 +653,7 @@ release:
                        goto done;
 
                err = -ENOMEM;
-               if (sfl1 == NULL)
+               if (!sfl1)
                        goto done;
 
                err = mem_check(sk);
@@ -662,7 +661,7 @@ release:
                        goto done;
 
                fl1 = fl_intern(net, fl, freq.flr_label);
-               if (fl1 != NULL)
+               if (fl1)
                        goto recheck;
 
                if (!freq.flr_label) {
index bc28b7d42a6dab05abee80d4fa84c102d92ca91f..f724329d7436430a532dd70c8c047bf0dd881642 100644 (file)
@@ -223,7 +223,7 @@ static struct ip6_tnl *ip6gre_tunnel_lookup(struct net_device *dev,
                }
        }
 
-       if (cand != NULL)
+       if (cand)
                return cand;
 
        dev = ign->fb_tunnel_dev;
@@ -395,7 +395,7 @@ static void ip6gre_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
                                flags & GRE_KEY ?
                                *(((__be32 *)p) + (grehlen / 4) - 1) : 0,
                                p[1]);
-       if (t == NULL)
+       if (!t)
                return;
 
        switch (type) {
@@ -980,7 +980,7 @@ static void ip6gre_tnl_link_config(struct ip6_tnl *t, int set_mtu)
                                                 &p->raddr, &p->laddr,
                                                 p->link, strict);
 
-               if (rt == NULL)
+               if (!rt)
                        return;
 
                if (rt->dst.dev) {
@@ -1073,7 +1073,7 @@ static int ip6gre_tunnel_ioctl(struct net_device *dev,
                        }
                        ip6gre_tnl_parm_from_user(&p1, &p);
                        t = ip6gre_tunnel_locate(net, &p1, 0);
-                       if (t == NULL)
+                       if (!t)
                                t = netdev_priv(dev);
                }
                memset(&p, 0, sizeof(p));
@@ -1105,7 +1105,7 @@ static int ip6gre_tunnel_ioctl(struct net_device *dev,
                t = ip6gre_tunnel_locate(net, &p1, cmd == SIOCADDTUNNEL);
 
                if (dev != ign->fb_tunnel_dev && cmd == SIOCCHGTUNNEL) {
-                       if (t != NULL) {
+                       if (t) {
                                if (t->dev != dev) {
                                        err = -EEXIST;
                                        break;
@@ -1144,7 +1144,7 @@ static int ip6gre_tunnel_ioctl(struct net_device *dev,
                        err = -ENOENT;
                        ip6gre_tnl_parm_from_user(&p1, &p);
                        t = ip6gre_tunnel_locate(net, &p1, 0);
-                       if (t == NULL)
+                       if (!t)
                                goto done;
                        err = -EPERM;
                        if (t == netdev_priv(ign->fb_tunnel_dev))
@@ -1216,6 +1216,7 @@ static const struct net_device_ops ip6gre_netdev_ops = {
        .ndo_do_ioctl           = ip6gre_tunnel_ioctl,
        .ndo_change_mtu         = ip6gre_tunnel_change_mtu,
        .ndo_get_stats64        = ip_tunnel_get_stats64,
+       .ndo_get_iflink         = ip6_tnl_get_iflink,
 };
 
 static void ip6gre_dev_free(struct net_device *dev)
@@ -1238,7 +1239,6 @@ static void ip6gre_tunnel_setup(struct net_device *dev)
        if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
                dev->mtu -= 8;
        dev->flags |= IFF_NOARP;
-       dev->iflink = 0;
        dev->addr_len = sizeof(struct in6_addr);
        netif_keep_dst(dev);
 }
@@ -1270,8 +1270,6 @@ static int ip6gre_tunnel_init(struct net_device *dev)
                u64_stats_init(&ip6gre_tunnel_stats->syncp);
        }
 
-       dev->iflink = tunnel->parms.link;
-
        return 0;
 }
 
@@ -1313,7 +1311,7 @@ static void ip6gre_destroy_tunnels(struct net *net, struct list_head *head)
 
                        t = rtnl_dereference(ign->tunnels[prio][h]);
 
-                       while (t != NULL) {
+                       while (t) {
                                /* If dev is in the same netns, it has already
                                 * been added to the list by the previous loop.
                                 */
@@ -1412,7 +1410,7 @@ static int ip6gre_tap_validate(struct nlattr *tb[], struct nlattr *data[])
                goto out;
 
        if (data[IFLA_GRE_REMOTE]) {
-               nla_memcpy(&daddr, data[IFLA_GRE_REMOTE], sizeof(struct in6_addr));
+               daddr = nla_get_in6_addr(data[IFLA_GRE_REMOTE]);
                if (ipv6_addr_any(&daddr))
                        return -EINVAL;
        }
@@ -1446,10 +1444,10 @@ static void ip6gre_netlink_parms(struct nlattr *data[],
                parms->o_key = nla_get_be32(data[IFLA_GRE_OKEY]);
 
        if (data[IFLA_GRE_LOCAL])
-               nla_memcpy(&parms->laddr, data[IFLA_GRE_LOCAL], sizeof(struct in6_addr));
+               parms->laddr = nla_get_in6_addr(data[IFLA_GRE_LOCAL]);
 
        if (data[IFLA_GRE_REMOTE])
-               nla_memcpy(&parms->raddr, data[IFLA_GRE_REMOTE], sizeof(struct in6_addr));
+               parms->raddr = nla_get_in6_addr(data[IFLA_GRE_REMOTE]);
 
        if (data[IFLA_GRE_TTL])
                parms->hop_limit = nla_get_u8(data[IFLA_GRE_TTL]);
@@ -1480,8 +1478,6 @@ static int ip6gre_tap_init(struct net_device *dev)
        if (!dev->tstats)
                return -ENOMEM;
 
-       dev->iflink = tunnel->parms.link;
-
        return 0;
 }
 
@@ -1493,6 +1489,7 @@ static const struct net_device_ops ip6gre_tap_netdev_ops = {
        .ndo_validate_addr = eth_validate_addr,
        .ndo_change_mtu = ip6gre_tunnel_change_mtu,
        .ndo_get_stats64 = ip_tunnel_get_stats64,
+       .ndo_get_iflink = ip6_tnl_get_iflink,
 };
 
 static void ip6gre_tap_setup(struct net_device *dev)
@@ -1503,7 +1500,6 @@ static void ip6gre_tap_setup(struct net_device *dev)
        dev->netdev_ops = &ip6gre_tap_netdev_ops;
        dev->destructor = ip6gre_dev_free;
 
-       dev->iflink = 0;
        dev->features |= NETIF_F_NETNS_LOCAL;
 }
 
@@ -1622,8 +1618,8 @@ static int ip6gre_fill_info(struct sk_buff *skb, const struct net_device *dev)
            nla_put_be16(skb, IFLA_GRE_OFLAGS, p->o_flags) ||
            nla_put_be32(skb, IFLA_GRE_IKEY, p->i_key) ||
            nla_put_be32(skb, IFLA_GRE_OKEY, p->o_key) ||
-           nla_put(skb, IFLA_GRE_LOCAL, sizeof(struct in6_addr), &p->laddr) ||
-           nla_put(skb, IFLA_GRE_REMOTE, sizeof(struct in6_addr), &p->raddr) ||
+           nla_put_in6_addr(skb, IFLA_GRE_LOCAL, &p->laddr) ||
+           nla_put_in6_addr(skb, IFLA_GRE_REMOTE, &p->raddr) ||
            nla_put_u8(skb, IFLA_GRE_TTL, p->hop_limit) ||
            /*nla_put_u8(skb, IFLA_GRE_TOS, t->priority) ||*/
            nla_put_u8(skb, IFLA_GRE_ENCAP_LIMIT, p->encap_limit) ||
index aacdcb4dc7624117561ce9720fa4734ed06ce5a0..fb97f7f8d4ed11497e088d063fbe8d78e80d0ed0 100644 (file)
@@ -221,7 +221,7 @@ resubmit:
 
        raw = raw6_local_deliver(skb, nexthdr);
        ipprot = rcu_dereference(inet6_protos[nexthdr]);
-       if (ipprot != NULL) {
+       if (ipprot) {
                int ret;
 
                if (ipprot->flags & INET6_PROTO_FINAL) {
index 46d452a56d3e1ce9b1a0f9f8d8edfede3675bf21..e893cd18612fcdc9e8577f0e060ffd32b5659eea 100644 (file)
@@ -124,7 +124,7 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb,
                        unfrag_ip6hlen = ip6_find_1stfragopt(skb, &prevhdr);
                        fptr = (struct frag_hdr *)((u8 *)ipv6h + unfrag_ip6hlen);
                        fptr->frag_off = htons(offset);
-                       if (skb->next != NULL)
+                       if (skb->next)
                                fptr->frag_off |= htons(IP6_MF);
                        offset += (ntohs(ipv6h->payload_len) -
                                   sizeof(struct frag_hdr));
index 7e80b61b51ff474db6c188218b70f12709209256..654f245aa93051c03bfe69c7a9c1ed83c50c527d 100644 (file)
@@ -177,7 +177,7 @@ int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
 
                if (skb_headroom(skb) < head_room) {
                        struct sk_buff *skb2 = skb_realloc_headroom(skb, head_room);
-                       if (skb2 == NULL) {
+                       if (!skb2) {
                                IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
                                              IPSTATS_MIB_OUTDISCARDS);
                                kfree_skb(skb);
@@ -542,7 +542,8 @@ int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
 {
        struct sk_buff *frag;
        struct rt6_info *rt = (struct rt6_info *)skb_dst(skb);
-       struct ipv6_pinfo *np = skb->sk ? inet6_sk(skb->sk) : NULL;
+       struct ipv6_pinfo *np = skb->sk && !dev_recursion_level() ?
+                               inet6_sk(skb->sk) : NULL;
        struct ipv6hdr *tmp_hdr;
        struct frag_hdr *fh;
        unsigned int mtu, hlen, left, len;
@@ -628,7 +629,7 @@ int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
                skb_reset_network_header(skb);
                memcpy(skb_network_header(skb), tmp_hdr, hlen);
 
-               ipv6_select_ident(fh, rt);
+               ipv6_select_ident(net, fh, rt);
                fh->nexthdr = nexthdr;
                fh->reserved = 0;
                fh->frag_off = htons(IP6_MF);
@@ -657,7 +658,7 @@ int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
                                fh->nexthdr = nexthdr;
                                fh->reserved = 0;
                                fh->frag_off = htons(offset);
-                               if (frag->next != NULL)
+                               if (frag->next)
                                        fh->frag_off |= htons(IP6_MF);
                                fh->identification = frag_id;
                                ipv6_hdr(frag)->payload_len =
@@ -775,7 +776,7 @@ slow_path:
                fh->nexthdr = nexthdr;
                fh->reserved = 0;
                if (!frag_id) {
-                       ipv6_select_ident(fh, rt);
+                       ipv6_select_ident(net, fh, rt);
                        frag_id = fh->identification;
                } else
                        fh->identification = frag_id;
@@ -823,7 +824,7 @@ static inline int ip6_rt_check(const struct rt6key *rt_key,
                               const struct in6_addr *addr_cache)
 {
        return (rt_key->plen != 128 || !ipv6_addr_equal(fl_addr, &rt_key->addr)) &&
-               (addr_cache == NULL || !ipv6_addr_equal(fl_addr, addr_cache));
+               (!addr_cache || !ipv6_addr_equal(fl_addr, addr_cache));
 }
 
 static struct dst_entry *ip6_sk_dst_check(struct sock *sk,
@@ -882,7 +883,7 @@ static int ip6_dst_lookup_tail(struct sock *sk,
 #endif
        int err;
 
-       if (*dst == NULL)
+       if (!*dst)
                *dst = ip6_route_output(net, sk, fl6);
 
        err = (*dst)->error;
@@ -1045,11 +1046,11 @@ static inline int ip6_ufo_append_data(struct sock *sk,
         * udp datagram
         */
        skb = skb_peek_tail(queue);
-       if (skb == NULL) {
+       if (!skb) {
                skb = sock_alloc_send_skb(sk,
                        hh_len + fragheaderlen + transhdrlen + 20,
                        (flags & MSG_DONTWAIT), &err);
-               if (skb == NULL)
+               if (!skb)
                        return err;
 
                /* reserve space for Hardware header */
@@ -1079,7 +1080,7 @@ static inline int ip6_ufo_append_data(struct sock *sk,
        skb_shinfo(skb)->gso_size = (mtu - fragheaderlen -
                                     sizeof(struct frag_hdr)) & ~7;
        skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
-       ipv6_select_ident(&fhdr, rt);
+       ipv6_select_ident(sock_net(sk), &fhdr, rt);
        skb_shinfo(skb)->ip6_frag_id = fhdr.identification;
 
 append:
@@ -1107,7 +1108,7 @@ static void ip6_append_data_mtu(unsigned int *mtu,
                                unsigned int orig_mtu)
 {
        if (!(rt->dst.flags & DST_XFRM_TUNNEL)) {
-               if (skb == NULL) {
+               if (!skb) {
                        /* first fragment, reserve header_len */
                        *mtu = orig_mtu - rt->dst.header_len;
 
@@ -1139,7 +1140,7 @@ static int ip6_setup_cork(struct sock *sk, struct inet_cork_full *cork,
                        return -EINVAL;
 
                v6_cork->opt = kzalloc(opt->tot_len, sk->sk_allocation);
-               if (unlikely(v6_cork->opt == NULL))
+               if (unlikely(!v6_cork->opt))
                        return -ENOBUFS;
 
                v6_cork->opt->tot_len = opt->tot_len;
@@ -1331,7 +1332,7 @@ alloc_new_skb:
                        else
                                fraggap = 0;
                        /* update mtu and maxfraglen if necessary */
-                       if (skb == NULL || skb_prev == NULL)
+                       if (!skb || !skb_prev)
                                ip6_append_data_mtu(&mtu, &maxfraglen,
                                                    fragheaderlen, skb, rt,
                                                    orig_mtu);
@@ -1383,10 +1384,10 @@ alloc_new_skb:
                                        skb = sock_wmalloc(sk,
                                                           alloclen + hh_len, 1,
                                                           sk->sk_allocation);
-                               if (unlikely(skb == NULL))
+                               if (unlikely(!skb))
                                        err = -ENOBUFS;
                        }
-                       if (skb == NULL)
+                       if (!skb)
                                goto error;
                        /*
                         *      Fill in the control structures
@@ -1578,7 +1579,7 @@ struct sk_buff *__ip6_make_skb(struct sock *sk,
        unsigned char proto = fl6->flowi6_proto;
 
        skb = __skb_dequeue(queue);
-       if (skb == NULL)
+       if (!skb)
                goto out;
        tail_skb = &(skb_shinfo(skb)->frag_list);
 
index ddd94eca19b3986e4fc0b1ff684eee401032815e..b6a211a150b27e4b28eaa873e1edca204f0c03fd 100644 (file)
@@ -64,12 +64,6 @@ MODULE_LICENSE("GPL");
 MODULE_ALIAS_RTNL_LINK("ip6tnl");
 MODULE_ALIAS_NETDEV("ip6tnl0");
 
-#ifdef IP6_TNL_DEBUG
-#define IP6_TNL_TRACE(x...) pr_debug("%s:" x "\n", __func__)
-#else
-#define IP6_TNL_TRACE(x...) do {;} while(0)
-#endif
-
 #define HASH_SIZE_SHIFT  5
 #define HASH_SIZE (1 << HASH_SIZE_SHIFT)
 
@@ -137,7 +131,7 @@ struct dst_entry *ip6_tnl_dst_check(struct ip6_tnl *t)
        struct dst_entry *dst = t->dst_cache;
 
        if (dst && dst->obsolete &&
-           dst->ops->check(dst, t->dst_cookie) == NULL) {
+           !dst->ops->check(dst, t->dst_cookie)) {
                t->dst_cache = NULL;
                dst_release(dst);
                return NULL;
@@ -331,7 +325,7 @@ static struct ip6_tnl *ip6_tnl_create(struct net *net, struct __ip6_tnl_parm *p)
 
        dev = alloc_netdev(sizeof(*t), name, NET_NAME_UNKNOWN,
                           ip6_tnl_dev_setup);
-       if (dev == NULL)
+       if (!dev)
                goto failed;
 
        dev_net_set(dev, net);
@@ -502,7 +496,7 @@ ip6_tnl_err(struct sk_buff *skb, __u8 ipproto, struct inet6_skb_parm *opt,
 
        rcu_read_lock();
        t = ip6_tnl_lookup(dev_net(skb->dev), &ipv6h->daddr, &ipv6h->saddr);
-       if (t == NULL)
+       if (!t)
                goto out;
 
        tproto = ACCESS_ONCE(t->parms.proto);
@@ -813,7 +807,7 @@ static int ip6_tnl_rcv(struct sk_buff *skb, __u16 protocol,
 
        rcu_read_lock();
        t = ip6_tnl_lookup(dev_net(skb->dev), &ipv6h->saddr, &ipv6h->daddr);
-       if (t != NULL) {
+       if (t) {
                struct pcpu_sw_netstats *tstats;
 
                tproto = ACCESS_ONCE(t->parms.proto);
@@ -1270,8 +1264,6 @@ static void ip6_tnl_link_config(struct ip6_tnl *t)
        else
                dev->flags &= ~IFF_POINTOPOINT;
 
-       dev->iflink = p->link;
-
        if (p->flags & IP6_TNL_F_CAP_XMIT) {
                int strict = (ipv6_addr_type(&p->raddr) &
                              (IPV6_ADDR_MULTICAST|IPV6_ADDR_LINKLOCAL));
@@ -1280,7 +1272,7 @@ static void ip6_tnl_link_config(struct ip6_tnl *t)
                                                 &p->raddr, &p->laddr,
                                                 p->link, strict);
 
-               if (rt == NULL)
+               if (!rt)
                        return;
 
                if (rt->dst.dev) {
@@ -1523,6 +1515,13 @@ ip6_tnl_change_mtu(struct net_device *dev, int new_mtu)
        return 0;
 }
 
+int ip6_tnl_get_iflink(const struct net_device *dev)
+{
+       struct ip6_tnl *t = netdev_priv(dev);
+
+       return t->parms.link;
+}
+EXPORT_SYMBOL(ip6_tnl_get_iflink);
 
 static const struct net_device_ops ip6_tnl_netdev_ops = {
        .ndo_init       = ip6_tnl_dev_init,
@@ -1531,6 +1530,7 @@ static const struct net_device_ops ip6_tnl_netdev_ops = {
        .ndo_do_ioctl   = ip6_tnl_ioctl,
        .ndo_change_mtu = ip6_tnl_change_mtu,
        .ndo_get_stats  = ip6_get_stats,
+       .ndo_get_iflink = ip6_tnl_get_iflink,
 };
 
 
@@ -1646,12 +1646,10 @@ static void ip6_tnl_netlink_parms(struct nlattr *data[],
                parms->link = nla_get_u32(data[IFLA_IPTUN_LINK]);
 
        if (data[IFLA_IPTUN_LOCAL])
-               nla_memcpy(&parms->laddr, data[IFLA_IPTUN_LOCAL],
-                          sizeof(struct in6_addr));
+               parms->laddr = nla_get_in6_addr(data[IFLA_IPTUN_LOCAL]);
 
        if (data[IFLA_IPTUN_REMOTE])
-               nla_memcpy(&parms->raddr, data[IFLA_IPTUN_REMOTE],
-                          sizeof(struct in6_addr));
+               parms->raddr = nla_get_in6_addr(data[IFLA_IPTUN_REMOTE]);
 
        if (data[IFLA_IPTUN_TTL])
                parms->hop_limit = nla_get_u8(data[IFLA_IPTUN_TTL]);
@@ -1745,10 +1743,8 @@ static int ip6_tnl_fill_info(struct sk_buff *skb, const struct net_device *dev)
        struct __ip6_tnl_parm *parm = &tunnel->parms;
 
        if (nla_put_u32(skb, IFLA_IPTUN_LINK, parm->link) ||
-           nla_put(skb, IFLA_IPTUN_LOCAL, sizeof(struct in6_addr),
-                   &parm->laddr) ||
-           nla_put(skb, IFLA_IPTUN_REMOTE, sizeof(struct in6_addr),
-                   &parm->raddr) ||
+           nla_put_in6_addr(skb, IFLA_IPTUN_LOCAL, &parm->laddr) ||
+           nla_put_in6_addr(skb, IFLA_IPTUN_REMOTE, &parm->raddr) ||
            nla_put_u8(skb, IFLA_IPTUN_TTL, parm->hop_limit) ||
            nla_put_u8(skb, IFLA_IPTUN_ENCAP_LIMIT, parm->encap_limit) ||
            nla_put_be32(skb, IFLA_IPTUN_FLOWINFO, parm->flowinfo) ||
@@ -1821,7 +1817,7 @@ static void __net_exit ip6_tnl_destroy_tunnels(struct net *net)
 
        for (h = 0; h < HASH_SIZE; h++) {
                t = rtnl_dereference(ip6n->tnls_r_l[h]);
-               while (t != NULL) {
+               while (t) {
                        /* If dev is in the same netns, it has already
                         * been added to the list by the previous loop.
                         */
index 5fb9e212eca8d0629bf9dc8b4677565c965b679f..b53148444e157f821c86b467b166fc9ce7bd5ccb 100644 (file)
@@ -218,7 +218,7 @@ static struct ip6_tnl *vti6_tnl_create(struct net *net, struct __ip6_tnl_parm *p
                sprintf(name, "ip6_vti%%d");
 
        dev = alloc_netdev(sizeof(*t), name, NET_NAME_UNKNOWN, vti6_dev_setup);
-       if (dev == NULL)
+       if (!dev)
                goto failed;
 
        dev_net_set(dev, net);
@@ -305,7 +305,7 @@ static int vti6_rcv(struct sk_buff *skb)
 
        rcu_read_lock();
        t = vti6_tnl_lookup(dev_net(skb->dev), &ipv6h->saddr, &ipv6h->daddr);
-       if (t != NULL) {
+       if (t) {
                if (t->parms.proto != IPPROTO_IPV6 && t->parms.proto != 0) {
                        rcu_read_unlock();
                        goto discard;
@@ -601,8 +601,6 @@ static void vti6_link_config(struct ip6_tnl *t)
                dev->flags |= IFF_POINTOPOINT;
        else
                dev->flags &= ~IFF_POINTOPOINT;
-
-       dev->iflink = p->link;
 }
 
 /**
@@ -716,7 +714,7 @@ vti6_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
                } else {
                        memset(&p, 0, sizeof(p));
                }
-               if (t == NULL)
+               if (!t)
                        t = netdev_priv(dev);
                vti6_parm_to_user(&p, &t->parms);
                if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
@@ -736,7 +734,7 @@ vti6_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
                vti6_parm_from_user(&p1, &p);
                t = vti6_locate(net, &p1, cmd == SIOCADDTUNNEL);
                if (dev != ip6n->fb_tnl_dev && cmd == SIOCCHGTUNNEL) {
-                       if (t != NULL) {
+                       if (t) {
                                if (t->dev != dev) {
                                        err = -EEXIST;
                                        break;
@@ -767,7 +765,7 @@ vti6_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
                        err = -ENOENT;
                        vti6_parm_from_user(&p1, &p);
                        t = vti6_locate(net, &p1, 0);
-                       if (t == NULL)
+                       if (!t)
                                break;
                        err = -EPERM;
                        if (t->dev == ip6n->fb_tnl_dev)
@@ -808,6 +806,7 @@ static const struct net_device_ops vti6_netdev_ops = {
        .ndo_do_ioctl   = vti6_ioctl,
        .ndo_change_mtu = vti6_change_mtu,
        .ndo_get_stats64 = ip_tunnel_get_stats64,
+       .ndo_get_iflink = ip6_tnl_get_iflink,
 };
 
 /**
@@ -897,12 +896,10 @@ static void vti6_netlink_parms(struct nlattr *data[],
                parms->link = nla_get_u32(data[IFLA_VTI_LINK]);
 
        if (data[IFLA_VTI_LOCAL])
-               nla_memcpy(&parms->laddr, data[IFLA_VTI_LOCAL],
-                          sizeof(struct in6_addr));
+               parms->laddr = nla_get_in6_addr(data[IFLA_VTI_LOCAL]);
 
        if (data[IFLA_VTI_REMOTE])
-               nla_memcpy(&parms->raddr, data[IFLA_VTI_REMOTE],
-                          sizeof(struct in6_addr));
+               parms->raddr = nla_get_in6_addr(data[IFLA_VTI_REMOTE]);
 
        if (data[IFLA_VTI_IKEY])
                parms->i_key = nla_get_be32(data[IFLA_VTI_IKEY]);
@@ -983,10 +980,8 @@ static int vti6_fill_info(struct sk_buff *skb, const struct net_device *dev)
        struct __ip6_tnl_parm *parm = &tunnel->parms;
 
        if (nla_put_u32(skb, IFLA_VTI_LINK, parm->link) ||
-           nla_put(skb, IFLA_VTI_LOCAL, sizeof(struct in6_addr),
-                   &parm->laddr) ||
-           nla_put(skb, IFLA_VTI_REMOTE, sizeof(struct in6_addr),
-                   &parm->raddr) ||
+           nla_put_in6_addr(skb, IFLA_VTI_LOCAL, &parm->laddr) ||
+           nla_put_in6_addr(skb, IFLA_VTI_REMOTE, &parm->raddr) ||
            nla_put_be32(skb, IFLA_VTI_IKEY, parm->i_key) ||
            nla_put_be32(skb, IFLA_VTI_OKEY, parm->o_key))
                goto nla_put_failure;
@@ -1027,7 +1022,7 @@ static void __net_exit vti6_destroy_tunnels(struct vti6_net *ip6n)
 
        for (h = 0; h < HASH_SIZE; h++) {
                t = rtnl_dereference(ip6n->tnls_r_l[h]);
-               while (t != NULL) {
+               while (t) {
                        unregister_netdevice_queue(t->dev, &list);
                        t = rtnl_dereference(t->next);
                }
index 52028f449a892d9457314799e6767eef5a401824..8493a22e74eb8a7f65b4b4177ece059d2f1c3060 100644 (file)
@@ -56,9 +56,7 @@
 
 struct mr6_table {
        struct list_head        list;
-#ifdef CONFIG_NET_NS
-       struct net              *net;
-#endif
+       possible_net_t          net;
        u32                     id;
        struct sock             *mroute6_sk;
        struct timer_list       ipmr_expire_timer;
@@ -175,7 +173,7 @@ static int ip6mr_rule_action(struct fib_rule *rule, struct flowi *flp,
        }
 
        mrt = ip6mr_get_table(rule->fr_net, rule->table);
-       if (mrt == NULL)
+       if (!mrt)
                return -EAGAIN;
        res->mrt = mrt;
        return 0;
@@ -239,7 +237,7 @@ static int __net_init ip6mr_rules_init(struct net *net)
        INIT_LIST_HEAD(&net->ipv6.mr6_tables);
 
        mrt = ip6mr_new_table(net, RT6_TABLE_DFLT);
-       if (mrt == NULL) {
+       if (!mrt) {
                err = -ENOMEM;
                goto err1;
        }
@@ -267,8 +265,8 @@ static void __net_exit ip6mr_rules_exit(struct net *net)
                list_del(&mrt->list);
                ip6mr_free_table(mrt);
        }
-       rtnl_unlock();
        fib_rules_unregister(net->ipv6.mr6_rules_ops);
+       rtnl_unlock();
 }
 #else
 #define ip6mr_for_each_table(mrt, net) \
@@ -307,11 +305,11 @@ static struct mr6_table *ip6mr_new_table(struct net *net, u32 id)
        unsigned int i;
 
        mrt = ip6mr_get_table(net, id);
-       if (mrt != NULL)
+       if (mrt)
                return mrt;
 
        mrt = kzalloc(sizeof(*mrt), GFP_KERNEL);
-       if (mrt == NULL)
+       if (!mrt)
                return NULL;
        mrt->id = id;
        write_pnet(&mrt->net, net);
@@ -336,7 +334,7 @@ static struct mr6_table *ip6mr_new_table(struct net *net, u32 id)
 
 static void ip6mr_free_table(struct mr6_table *mrt)
 {
-       del_timer(&mrt->ipmr_expire_timer);
+       del_timer_sync(&mrt->ipmr_expire_timer);
        mroute_clean_tables(mrt);
        kfree(mrt);
 }
@@ -410,7 +408,7 @@ static void *ip6mr_vif_seq_start(struct seq_file *seq, loff_t *pos)
        struct mr6_table *mrt;
 
        mrt = ip6mr_get_table(net, RT6_TABLE_DFLT);
-       if (mrt == NULL)
+       if (!mrt)
                return ERR_PTR(-ENOENT);
 
        iter->mrt = mrt;
@@ -494,7 +492,7 @@ static void *ipmr_mfc_seq_start(struct seq_file *seq, loff_t *pos)
        struct mr6_table *mrt;
 
        mrt = ip6mr_get_table(net, RT6_TABLE_DFLT);
-       if (mrt == NULL)
+       if (!mrt)
                return ERR_PTR(-ENOENT);
 
        it->mrt = mrt;
@@ -667,7 +665,7 @@ static int pim6_rcv(struct sk_buff *skb)
                dev_hold(reg_dev);
        read_unlock(&mrt_lock);
 
-       if (reg_dev == NULL)
+       if (!reg_dev)
                goto drop;
 
        skb->mac_header = skb->network_header;
@@ -720,8 +718,14 @@ static netdev_tx_t reg_vif_xmit(struct sk_buff *skb,
        return NETDEV_TX_OK;
 }
 
+static int reg_vif_get_iflink(const struct net_device *dev)
+{
+       return 0;
+}
+
 static const struct net_device_ops reg_vif_netdev_ops = {
        .ndo_start_xmit = reg_vif_xmit,
+       .ndo_get_iflink = reg_vif_get_iflink,
 };
 
 static void reg_vif_setup(struct net_device *dev)
@@ -745,7 +749,7 @@ static struct net_device *ip6mr_reg_vif(struct net *net, struct mr6_table *mrt)
                sprintf(name, "pim6reg%u", mrt->id);
 
        dev = alloc_netdev(0, name, NET_NAME_UNKNOWN, reg_vif_setup);
-       if (dev == NULL)
+       if (!dev)
                return NULL;
 
        dev_net_set(dev, net);
@@ -754,7 +758,6 @@ static struct net_device *ip6mr_reg_vif(struct net *net, struct mr6_table *mrt)
                free_netdev(dev);
                return NULL;
        }
-       dev->iflink = 0;
 
        if (dev_open(dev))
                goto failure;
@@ -994,7 +997,7 @@ static int mif6_add(struct net *net, struct mr6_table *mrt,
        v->pkt_out = 0;
        v->link = dev->ifindex;
        if (v->flags & MIFF_REGISTER)
-               v->link = dev->iflink;
+               v->link = dev_get_iflink(dev);
 
        /* And finish update writing critical data */
        write_lock_bh(&mrt_lock);
@@ -1074,7 +1077,7 @@ skip:
 static struct mfc6_cache *ip6mr_cache_alloc(void)
 {
        struct mfc6_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL);
-       if (c == NULL)
+       if (!c)
                return NULL;
        c->mfc_un.res.minvif = MAXMIFS;
        return c;
@@ -1083,7 +1086,7 @@ static struct mfc6_cache *ip6mr_cache_alloc(void)
 static struct mfc6_cache *ip6mr_cache_alloc_unres(void)
 {
        struct mfc6_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_ATOMIC);
-       if (c == NULL)
+       if (!c)
                return NULL;
        skb_queue_head_init(&c->mfc_un.unres.unresolved);
        c->mfc_un.unres.expires = jiffies + 10 * HZ;
@@ -1200,7 +1203,7 @@ static int ip6mr_cache_report(struct mr6_table *mrt, struct sk_buff *pkt,
        skb->ip_summed = CHECKSUM_UNNECESSARY;
        }
 
-       if (mrt->mroute6_sk == NULL) {
+       if (!mrt->mroute6_sk) {
                kfree_skb(skb);
                return -EINVAL;
        }
@@ -1495,7 +1498,7 @@ static int ip6mr_mfc_add(struct net *net, struct mr6_table *mrt,
                return -EINVAL;
 
        c = ip6mr_cache_alloc();
-       if (c == NULL)
+       if (!c)
                return -ENOMEM;
 
        c->mf6c_origin = mfc->mf6cc_origin.sin6_addr;
@@ -1665,7 +1668,7 @@ int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, uns
        struct mr6_table *mrt;
 
        mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT);
-       if (mrt == NULL)
+       if (!mrt)
                return -ENOENT;
 
        if (optname != MRT6_INIT) {
@@ -1814,7 +1817,7 @@ int ip6_mroute_getsockopt(struct sock *sk, int optname, char __user *optval,
        struct mr6_table *mrt;
 
        mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT);
-       if (mrt == NULL)
+       if (!mrt)
                return -ENOENT;
 
        switch (optname) {
@@ -1861,7 +1864,7 @@ int ip6mr_ioctl(struct sock *sk, int cmd, void __user *arg)
        struct mr6_table *mrt;
 
        mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT);
-       if (mrt == NULL)
+       if (!mrt)
                return -ENOENT;
 
        switch (cmd) {
@@ -1935,7 +1938,7 @@ int ip6mr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg)
        struct mr6_table *mrt;
 
        mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT);
-       if (mrt == NULL)
+       if (!mrt)
                return -ENOENT;
 
        switch (cmd) {
@@ -2005,7 +2008,7 @@ static int ip6mr_forward2(struct net *net, struct mr6_table *mrt,
        struct dst_entry *dst;
        struct flowi6 fl6;
 
-       if (vif->dev == NULL)
+       if (!vif->dev)
                goto out_free;
 
 #ifdef CONFIG_IPV6_PIMSM_V2
@@ -2194,7 +2197,7 @@ int ip6_mr_input(struct sk_buff *skb)
        read_lock(&mrt_lock);
        cache = ip6mr_cache_find(mrt,
                                 &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr);
-       if (cache == NULL) {
+       if (!cache) {
                int vif = ip6mr_find_vif(mrt, skb->dev);
 
                if (vif >= 0)
@@ -2206,7 +2209,7 @@ int ip6_mr_input(struct sk_buff *skb)
        /*
         *      No usable cache entry
         */
-       if (cache == NULL) {
+       if (!cache) {
                int vif;
 
                vif = ip6mr_find_vif(mrt, skb->dev);
@@ -2245,13 +2248,13 @@ static int __ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb,
            nla_put_u32(skb, RTA_IIF, mrt->vif6_table[c->mf6c_parent].dev->ifindex) < 0)
                return -EMSGSIZE;
        mp_attr = nla_nest_start(skb, RTA_MULTIPATH);
-       if (mp_attr == NULL)
+       if (!mp_attr)
                return -EMSGSIZE;
 
        for (ct = c->mfc_un.res.minvif; ct < c->mfc_un.res.maxvif; ct++) {
                if (MIF_EXISTS(mrt, ct) && c->mfc_un.res.ttls[ct] < 255) {
                        nhp = nla_reserve_nohdr(skb, sizeof(*nhp));
-                       if (nhp == NULL) {
+                       if (!nhp) {
                                nla_nest_cancel(skb, mp_attr);
                                return -EMSGSIZE;
                        }
@@ -2284,7 +2287,7 @@ int ip6mr_get_route(struct net *net,
        struct rt6_info *rt = (struct rt6_info *)skb_dst(skb);
 
        mrt = ip6mr_get_table(net, RT6_TABLE_DFLT);
-       if (mrt == NULL)
+       if (!mrt)
                return -ENOENT;
 
        read_lock(&mrt_lock);
@@ -2309,7 +2312,7 @@ int ip6mr_get_route(struct net *net,
                }
 
                dev = skb->dev;
-               if (dev == NULL || (vif = ip6mr_find_vif(mrt, dev)) < 0) {
+               if (!dev || (vif = ip6mr_find_vif(mrt, dev)) < 0) {
                        read_unlock(&mrt_lock);
                        return -ENODEV;
                }
@@ -2361,7 +2364,7 @@ static int ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb,
        int err;
 
        nlh = nlmsg_put(skb, portid, seq, cmd, sizeof(*rtm), flags);
-       if (nlh == NULL)
+       if (!nlh)
                return -EMSGSIZE;
 
        rtm = nlmsg_data(nlh);
@@ -2380,8 +2383,8 @@ static int ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb,
                rtm->rtm_protocol = RTPROT_MROUTED;
        rtm->rtm_flags    = 0;
 
-       if (nla_put(skb, RTA_SRC, 16, &c->mf6c_origin) ||
-           nla_put(skb, RTA_DST, 16, &c->mf6c_mcastgrp))
+       if (nla_put_in6_addr(skb, RTA_SRC, &c->mf6c_origin) ||
+           nla_put_in6_addr(skb, RTA_DST, &c->mf6c_mcastgrp))
                goto nla_put_failure;
        err = __ip6mr_fill_mroute(mrt, skb, c, rtm);
        /* do not break the dump if cache is unresolved */
@@ -2426,7 +2429,7 @@ static void mr6_netlink_event(struct mr6_table *mrt, struct mfc6_cache *mfc,
 
        skb = nlmsg_new(mr6_msgsize(mfc->mf6c_parent >= MAXMIFS, mrt->maxvif),
                        GFP_ATOMIC);
-       if (skb == NULL)
+       if (!skb)
                goto errout;
 
        err = ip6mr_fill_mroute(mrt, skb, 0, 0, mfc, cmd, 0);
index 8d766d9100cba408525faf5818b7b0c6b6bc543c..63e6956917c9cf20ee74968de3a7f03c1b48c849 100644 (file)
@@ -85,7 +85,7 @@ int ip6_ra_control(struct sock *sk, int sel)
                        return 0;
                }
        }
-       if (new_ra == NULL) {
+       if (!new_ra) {
                write_unlock_bh(&ip6_ra_lock);
                return -ENOBUFS;
        }
@@ -117,6 +117,25 @@ struct ipv6_txoptions *ipv6_update_options(struct sock *sk,
        return opt;
 }
 
+static bool setsockopt_needs_rtnl(int optname)
+{
+       switch (optname) {
+       case IPV6_ADD_MEMBERSHIP:
+       case IPV6_DROP_MEMBERSHIP:
+       case IPV6_JOIN_ANYCAST:
+       case IPV6_LEAVE_ANYCAST:
+       case MCAST_JOIN_GROUP:
+       case MCAST_LEAVE_GROUP:
+       case MCAST_JOIN_SOURCE_GROUP:
+       case MCAST_LEAVE_SOURCE_GROUP:
+       case MCAST_BLOCK_SOURCE:
+       case MCAST_UNBLOCK_SOURCE:
+       case MCAST_MSFILTER:
+               return true;
+       }
+       return false;
+}
+
 static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
                    char __user *optval, unsigned int optlen)
 {
@@ -124,8 +143,9 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
        struct net *net = sock_net(sk);
        int val, valbool;
        int retv = -ENOPROTOOPT;
+       bool needs_rtnl = setsockopt_needs_rtnl(optname);
 
-       if (optval == NULL)
+       if (!optval)
                val = 0;
        else {
                if (optlen >= sizeof(int)) {
@@ -140,6 +160,8 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
        if (ip6_mroute_opt(optname))
                return ip6_mroute_setsockopt(sk, optname, optval, optlen);
 
+       if (needs_rtnl)
+               rtnl_lock();
        lock_sock(sk);
 
        switch (optname) {
@@ -370,7 +392,7 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
                 */
                if (optlen == 0)
                        optval = NULL;
-               else if (optval == NULL)
+               else if (!optval)
                        goto e_inval;
                else if (optlen < sizeof(struct ipv6_opt_hdr) ||
                         optlen & 0x7 || optlen > 8 * 255)
@@ -421,7 +443,7 @@ sticky_done:
 
                if (optlen == 0)
                        goto e_inval;
-               else if (optlen < sizeof(struct in6_pktinfo) || optval == NULL)
+               else if (optlen < sizeof(struct in6_pktinfo) || !optval)
                        goto e_inval;
 
                if (copy_from_user(&pkt, optval, sizeof(struct in6_pktinfo))) {
@@ -460,7 +482,7 @@ sticky_done:
 
                opt = sock_kmalloc(sk, sizeof(*opt) + optlen, GFP_KERNEL);
                retv = -ENOBUFS;
-               if (opt == NULL)
+               if (!opt)
                        break;
 
                memset(opt, 0, sizeof(*opt));
@@ -624,10 +646,10 @@ done:
                psin6 = (struct sockaddr_in6 *)&greq.gr_group;
                if (optname == MCAST_JOIN_GROUP)
                        retv = ipv6_sock_mc_join(sk, greq.gr_interface,
-                               &psin6->sin6_addr);
+                                                &psin6->sin6_addr);
                else
                        retv = ipv6_sock_mc_drop(sk, greq.gr_interface,
-                               &psin6->sin6_addr);
+                                                &psin6->sin6_addr);
                break;
        }
        case MCAST_JOIN_SOURCE_GROUP:
@@ -660,7 +682,7 @@ done:
 
                        psin6 = (struct sockaddr_in6 *)&greqs.gsr_group;
                        retv = ipv6_sock_mc_join(sk, greqs.gsr_interface,
-                               &psin6->sin6_addr);
+                                                &psin6->sin6_addr);
                        /* prior join w/ different source is ok */
                        if (retv && retv != -EADDRINUSE)
                                break;
@@ -837,11 +859,15 @@ pref_skip_coa:
        }
 
        release_sock(sk);
+       if (needs_rtnl)
+               rtnl_unlock();
 
        return retv;
 
 e_inval:
        release_sock(sk);
+       if (needs_rtnl)
+               rtnl_unlock();
        return -EINVAL;
 }
 
index 5ce107c8aab3477ba20c281c5eb859c572e7347f..fac1f27e428e26257a949b3d3d062ddecdda3e87 100644 (file)
@@ -140,6 +140,8 @@ int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr)
        struct net *net = sock_net(sk);
        int err;
 
+       ASSERT_RTNL();
+
        if (!ipv6_addr_is_multicast(addr))
                return -EINVAL;
 
@@ -155,13 +157,12 @@ int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr)
 
        mc_lst = sock_kmalloc(sk, sizeof(struct ipv6_mc_socklist), GFP_KERNEL);
 
-       if (mc_lst == NULL)
+       if (!mc_lst)
                return -ENOMEM;
 
        mc_lst->next = NULL;
        mc_lst->addr = *addr;
 
-       rtnl_lock();
        if (ifindex == 0) {
                struct rt6_info *rt;
                rt = rt6_lookup(net, addr, NULL, 0, 0);
@@ -172,8 +173,7 @@ int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr)
        } else
                dev = __dev_get_by_index(net, ifindex);
 
-       if (dev == NULL) {
-               rtnl_unlock();
+       if (!dev) {
                sock_kfree_s(sk, mc_lst, sizeof(*mc_lst));
                return -ENODEV;
        }
@@ -190,7 +190,6 @@ int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr)
        err = ipv6_dev_mc_inc(dev, addr);
 
        if (err) {
-               rtnl_unlock();
                sock_kfree_s(sk, mc_lst, sizeof(*mc_lst));
                return err;
        }
@@ -198,10 +197,9 @@ int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr)
        mc_lst->next = np->ipv6_mc_list;
        rcu_assign_pointer(np->ipv6_mc_list, mc_lst);
 
-       rtnl_unlock();
-
        return 0;
 }
+EXPORT_SYMBOL(ipv6_sock_mc_join);
 
 /*
  *     socket leave on multicast group
@@ -213,10 +211,11 @@ int ipv6_sock_mc_drop(struct sock *sk, int ifindex, const struct in6_addr *addr)
        struct ipv6_mc_socklist __rcu **lnk;
        struct net *net = sock_net(sk);
 
+       ASSERT_RTNL();
+
        if (!ipv6_addr_is_multicast(addr))
                return -EINVAL;
 
-       rtnl_lock();
        for (lnk = &np->ipv6_mc_list;
             (mc_lst = rtnl_dereference(*lnk)) != NULL;
              lnk = &mc_lst->next) {
@@ -227,7 +226,7 @@ int ipv6_sock_mc_drop(struct sock *sk, int ifindex, const struct in6_addr *addr)
                        *lnk = mc_lst->next;
 
                        dev = __dev_get_by_index(net, mc_lst->ifindex);
-                       if (dev != NULL) {
+                       if (dev) {
                                struct inet6_dev *idev = __in6_dev_get(dev);
 
                                (void) ip6_mc_leave_src(sk, mc_lst, idev);
@@ -235,17 +234,16 @@ int ipv6_sock_mc_drop(struct sock *sk, int ifindex, const struct in6_addr *addr)
                                        __ipv6_dev_mc_dec(idev, &mc_lst->addr);
                        } else
                                (void) ip6_mc_leave_src(sk, mc_lst, NULL);
-                       rtnl_unlock();
 
                        atomic_sub(sizeof(*mc_lst), &sk->sk_omem_alloc);
                        kfree_rcu(mc_lst, rcu);
                        return 0;
                }
        }
-       rtnl_unlock();
 
        return -EADDRNOTAVAIL;
 }
+EXPORT_SYMBOL(ipv6_sock_mc_drop);
 
 /* called with rcu_read_lock() */
 static struct inet6_dev *ip6_mc_find_dev_rcu(struct net *net,
@@ -438,7 +436,7 @@ done:
        read_unlock_bh(&idev->lock);
        rcu_read_unlock();
        if (leavegroup)
-               return ipv6_sock_mc_drop(sk, pgsr->gsr_interface, group);
+               err = ipv6_sock_mc_drop(sk, pgsr->gsr_interface, group);
        return err;
 }
 
@@ -825,7 +823,7 @@ static struct ifmcaddr6 *mca_alloc(struct inet6_dev *idev,
        struct ifmcaddr6 *mc;
 
        mc = kzalloc(sizeof(*mc), GFP_ATOMIC);
-       if (mc == NULL)
+       if (!mc)
                return NULL;
 
        setup_timer(&mc->mca_timer, igmp6_timer_handler, (unsigned long)mc);
@@ -862,7 +860,7 @@ int ipv6_dev_mc_inc(struct net_device *dev, const struct in6_addr *addr)
        /* we need to take a reference on idev */
        idev = in6_dev_get(dev);
 
-       if (idev == NULL)
+       if (!idev)
                return -EINVAL;
 
        write_lock_bh(&idev->lock);
@@ -1330,7 +1328,7 @@ int igmp6_event_query(struct sk_buff *skb)
                return -EINVAL;
 
        idev = __in6_dev_get(skb->dev);
-       if (idev == NULL)
+       if (!idev)
                return 0;
 
        mld = (struct mld_msg *)icmp6_hdr(skb);
@@ -1445,7 +1443,7 @@ int igmp6_event_report(struct sk_buff *skb)
                return -EINVAL;
 
        idev = __in6_dev_get(skb->dev);
-       if (idev == NULL)
+       if (!idev)
                return -ENODEV;
 
        /*
@@ -1964,7 +1962,7 @@ static void igmp6_send(struct in6_addr *addr, struct net_device *dev, int type)
 
        skb = sock_alloc_send_skb(sk, hlen + tlen + full_len, 1, &err);
 
-       if (skb == NULL) {
+       if (!skb) {
                rcu_read_lock();
                IP6_INC_STATS(net, __in6_dev_get(dev),
                              IPSTATS_MIB_OUTDISCARDS);
@@ -2613,7 +2611,7 @@ static struct ifmcaddr6 *igmp6_mc_get_next(struct seq_file *seq, struct ifmcaddr
 
        im = im->next;
        while (!im) {
-               if (likely(state->idev != NULL))
+               if (likely(state->idev))
                        read_unlock_bh(&state->idev->lock);
 
                state->dev = next_net_device_rcu(state->dev);
@@ -2659,7 +2657,7 @@ static void igmp6_mc_seq_stop(struct seq_file *seq, void *v)
 {
        struct igmp6_mc_iter_state *state = igmp6_mc_seq_private(seq);
 
-       if (likely(state->idev != NULL)) {
+       if (likely(state->idev)) {
                read_unlock_bh(&state->idev->lock);
                state->idev = NULL;
        }
@@ -2728,10 +2726,10 @@ static inline struct ip6_sf_list *igmp6_mcf_get_first(struct seq_file *seq)
                        continue;
                read_lock_bh(&idev->lock);
                im = idev->mc_list;
-               if (likely(im != NULL)) {
+               if (likely(im)) {
                        spin_lock_bh(&im->mca_lock);
                        psf = im->mca_sources;
-                       if (likely(psf != NULL)) {
+                       if (likely(psf)) {
                                state->im = im;
                                state->idev = idev;
                                break;
@@ -2752,7 +2750,7 @@ static struct ip6_sf_list *igmp6_mcf_get_next(struct seq_file *seq, struct ip6_s
                spin_unlock_bh(&state->im->mca_lock);
                state->im = state->im->next;
                while (!state->im) {
-                       if (likely(state->idev != NULL))
+                       if (likely(state->idev))
                                read_unlock_bh(&state->idev->lock);
 
                        state->dev = next_net_device_rcu(state->dev);
@@ -2806,11 +2804,11 @@ static void igmp6_mcf_seq_stop(struct seq_file *seq, void *v)
        __releases(RCU)
 {
        struct igmp6_mcf_iter_state *state = igmp6_mcf_seq_private(seq);
-       if (likely(state->im != NULL)) {
+       if (likely(state->im)) {
                spin_unlock_bh(&state->im->mca_lock);
                state->im = NULL;
        }
-       if (likely(state->idev != NULL)) {
+       if (likely(state->idev)) {
                read_unlock_bh(&state->idev->lock);
                state->idev = NULL;
        }
@@ -2907,20 +2905,32 @@ static int __net_init igmp6_net_init(struct net *net)
 
        inet6_sk(net->ipv6.igmp_sk)->hop_limit = 1;
 
+       err = inet_ctl_sock_create(&net->ipv6.mc_autojoin_sk, PF_INET6,
+                                  SOCK_RAW, IPPROTO_ICMPV6, net);
+       if (err < 0) {
+               pr_err("Failed to initialize the IGMP6 autojoin socket (err %d)\n",
+                      err);
+               goto out_sock_create;
+       }
+
        err = igmp6_proc_init(net);
        if (err)
-               goto out_sock_create;
-out:
-       return err;
+               goto out_sock_create_autojoin;
+
+       return 0;
 
+out_sock_create_autojoin:
+       inet_ctl_sock_destroy(net->ipv6.mc_autojoin_sk);
 out_sock_create:
        inet_ctl_sock_destroy(net->ipv6.igmp_sk);
-       goto out;
+out:
+       return err;
 }
 
 static void __net_exit igmp6_net_exit(struct net *net)
 {
        inet_ctl_sock_destroy(net->ipv6.igmp_sk);
+       inet_ctl_sock_destroy(net->ipv6.mc_autojoin_sk);
        igmp6_proc_exit(net);
 }
 
index 14ecdaf06bf7497dc71199fc5638b49592a24655..71fde6cafb35d6e63da6bd18811a1b4322936892 100644 (file)
@@ -84,6 +84,7 @@ do {                                                          \
 static u32 ndisc_hash(const void *pkey,
                      const struct net_device *dev,
                      __u32 *hash_rnd);
+static bool ndisc_key_eq(const struct neighbour *neigh, const void *pkey);
 static int ndisc_constructor(struct neighbour *neigh);
 static void ndisc_solicit(struct neighbour *neigh, struct sk_buff *skb);
 static void ndisc_error_report(struct neighbour *neigh, struct sk_buff *skb);
@@ -117,7 +118,9 @@ static const struct neigh_ops ndisc_direct_ops = {
 struct neigh_table nd_tbl = {
        .family =       AF_INET6,
        .key_len =      sizeof(struct in6_addr),
+       .protocol =     cpu_to_be16(ETH_P_IPV6),
        .hash =         ndisc_hash,
+       .key_eq =       ndisc_key_eq,
        .constructor =  ndisc_constructor,
        .pconstructor = pndisc_constructor,
        .pdestructor =  pndisc_destructor,
@@ -294,6 +297,11 @@ static u32 ndisc_hash(const void *pkey,
        return ndisc_hashfn(pkey, dev, hash_rnd);
 }
 
+static bool ndisc_key_eq(const struct neighbour *n, const void *pkey)
+{
+       return neigh_key_eq128(n, pkey);
+}
+
 static int ndisc_constructor(struct neighbour *neigh)
 {
        struct in6_addr *addr = (struct in6_addr *)&neigh->primary_key;
@@ -303,7 +311,7 @@ static int ndisc_constructor(struct neighbour *neigh)
        bool is_multicast = ipv6_addr_is_multicast(addr);
 
        in6_dev = in6_dev_get(dev);
-       if (in6_dev == NULL) {
+       if (!in6_dev) {
                return -EINVAL;
        }
 
@@ -348,7 +356,7 @@ static int pndisc_constructor(struct pneigh_entry *n)
        struct in6_addr maddr;
        struct net_device *dev = n->dev;
 
-       if (dev == NULL || __in6_dev_get(dev) == NULL)
+       if (!dev || !__in6_dev_get(dev))
                return -EINVAL;
        addrconf_addr_solict_mult(addr, &maddr);
        ipv6_dev_mc_inc(dev, &maddr);
@@ -361,7 +369,7 @@ static void pndisc_destructor(struct pneigh_entry *n)
        struct in6_addr maddr;
        struct net_device *dev = n->dev;
 
-       if (dev == NULL || __in6_dev_get(dev) == NULL)
+       if (!dev || !__in6_dev_get(dev))
                return;
        addrconf_addr_solict_mult(addr, &maddr);
        ipv6_dev_mc_dec(dev, &maddr);
@@ -552,7 +560,7 @@ void ndisc_send_ns(struct net_device *dev, struct neighbour *neigh,
        int optlen = 0;
        struct nd_msg *msg;
 
-       if (saddr == NULL) {
+       if (!saddr) {
                if (ipv6_get_lladdr(dev, &addr_buf,
                                   (IFA_F_TENTATIVE|IFA_F_OPTIMISTIC)))
                        return;
@@ -1022,13 +1030,13 @@ static void ndisc_ra_useropt(struct sk_buff *ra, struct nd_opt_hdr *opt)
        size_t msg_size = base_size + nla_total_size(sizeof(struct in6_addr));
 
        skb = nlmsg_new(msg_size, GFP_ATOMIC);
-       if (skb == NULL) {
+       if (!skb) {
                err = -ENOBUFS;
                goto errout;
        }
 
        nlh = nlmsg_put(skb, 0, 0, RTM_NEWNDUSEROPT, base_size, 0);
-       if (nlh == NULL) {
+       if (!nlh) {
                goto nla_put_failure;
        }
 
@@ -1041,8 +1049,7 @@ static void ndisc_ra_useropt(struct sk_buff *ra, struct nd_opt_hdr *opt)
 
        memcpy(ndmsg + 1, opt, opt->nd_opt_len << 3);
 
-       if (nla_put(skb, NDUSEROPT_SRCADDR, sizeof(struct in6_addr),
-                   &ipv6_hdr(ra)->saddr))
+       if (nla_put_in6_addr(skb, NDUSEROPT_SRCADDR, &ipv6_hdr(ra)->saddr))
                goto nla_put_failure;
        nlmsg_end(skb, nlh);
 
@@ -1096,7 +1103,7 @@ static void ndisc_router_discovery(struct sk_buff *skb)
         */
 
        in6_dev = __in6_dev_get(skb->dev);
-       if (in6_dev == NULL) {
+       if (!in6_dev) {
                ND_PRINTK(0, err, "RA: can't find inet6 device for %s\n",
                          skb->dev->name);
                return;
@@ -1191,11 +1198,11 @@ static void ndisc_router_discovery(struct sk_buff *skb)
 
        ND_PRINTK(3, info, "RA: rt: %p  lifetime: %d, for dev: %s\n",
                  rt, lifetime, skb->dev->name);
-       if (rt == NULL && lifetime) {
+       if (!rt && lifetime) {
                ND_PRINTK(3, info, "RA: adding default router\n");
 
                rt = rt6_add_dflt_router(&ipv6_hdr(skb)->saddr, skb->dev, pref);
-               if (rt == NULL) {
+               if (!rt) {
                        ND_PRINTK(0, err,
                                  "RA: %s failed to add default route\n",
                                  __func__);
@@ -1203,7 +1210,7 @@ static void ndisc_router_discovery(struct sk_buff *skb)
                }
 
                neigh = dst_neigh_lookup(&rt->dst, &ipv6_hdr(skb)->saddr);
-               if (neigh == NULL) {
+               if (!neigh) {
                        ND_PRINTK(0, err,
                                  "RA: %s got default router without neighbour\n",
                                  __func__);
index 398377a9d0183d297edff70c53dc5cee0a4ab8f9..d958718b50318911d27ece2b7e9f026ca68e8c97 100644 (file)
@@ -84,7 +84,7 @@ static void nf_ip6_saveroute(const struct sk_buff *skb,
 {
        struct ip6_rt_info *rt_info = nf_queue_entry_reroute(entry);
 
-       if (entry->hook == NF_INET_LOCAL_OUT) {
+       if (entry->state.hook == NF_INET_LOCAL_OUT) {
                const struct ipv6hdr *iph = ipv6_hdr(skb);
 
                rt_info->daddr = iph->daddr;
@@ -98,7 +98,7 @@ static int nf_ip6_reroute(struct sk_buff *skb,
 {
        struct ip6_rt_info *rt_info = nf_queue_entry_reroute(entry);
 
-       if (entry->hook == NF_INET_LOCAL_OUT) {
+       if (entry->state.hook == NF_INET_LOCAL_OUT) {
                const struct ipv6hdr *iph = ipv6_hdr(skb);
                if (!ipv6_addr_equal(&iph->daddr, &rt_info->daddr) ||
                    !ipv6_addr_equal(&iph->saddr, &rt_info->saddr) ||
index a069822936e6086af56f64d628278440e0279005..ca6998345b4213107d6f08d34740c75820509f84 100644 (file)
@@ -25,14 +25,16 @@ config NF_CONNTRACK_IPV6
 
          To compile it as a module, choose M here.  If unsure, say N.
 
+if NF_TABLES
+
 config NF_TABLES_IPV6
-       depends on NF_TABLES
        tristate "IPv6 nf_tables support"
        help
          This option enables the IPv6 support for nf_tables.
 
+if NF_TABLES_IPV6
+
 config NFT_CHAIN_ROUTE_IPV6
-       depends on NF_TABLES_IPV6
        tristate "IPv6 nf_tables route chain support"
        help
          This option enables the "route" chain for IPv6 in nf_tables. This
@@ -40,16 +42,18 @@ config NFT_CHAIN_ROUTE_IPV6
          fields such as the source, destination, flowlabel, hop-limit and
          the packet mark.
 
-config NF_REJECT_IPV6
-       tristate "IPv6 packet rejection"
-       default m if NETFILTER_ADVANCED=n
-
 config NFT_REJECT_IPV6
-       depends on NF_TABLES_IPV6
        select NF_REJECT_IPV6
        default NFT_REJECT
        tristate
 
+endif # NF_TABLES_IPV6
+endif # NF_TABLES
+
+config NF_REJECT_IPV6
+       tristate "IPv6 packet rejection"
+       default m if NETFILTER_ADVANCED=n
+
 config NF_LOG_IPV6
        tristate "IPv6 packet logging"
        default m if NETFILTER_ADVANCED=n
index bb00c6f2a8855fb72dcc6a1bc5b496e8216d683f..1a732a1d3c8e13c58508cef9381d2d32e5a34448 100644 (file)
@@ -9,7 +9,10 @@
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
  */
+
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
 #include <linux/capability.h>
 #include <linux/in.h>
 #include <linux/skbuff.h>
@@ -234,7 +237,7 @@ static struct nf_loginfo trace_loginfo = {
        .type = NF_LOG_TYPE_LOG,
        .u = {
                .log = {
-                       .level = 4,
+                       .level = LOGLEVEL_WARNING,
                        .logflags = NF_LOG_MASK,
                },
        },
@@ -314,8 +317,7 @@ ip6t_next_entry(const struct ip6t_entry *entry)
 unsigned int
 ip6t_do_table(struct sk_buff *skb,
              unsigned int hook,
-             const struct net_device *in,
-             const struct net_device *out,
+             const struct nf_hook_state *state,
              struct xt_table *table)
 {
        static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
@@ -330,8 +332,8 @@ ip6t_do_table(struct sk_buff *skb,
        unsigned int addend;
 
        /* Initialization */
-       indev = in ? in->name : nulldevname;
-       outdev = out ? out->name : nulldevname;
+       indev = state->in ? state->in->name : nulldevname;
+       outdev = state->out ? state->out->name : nulldevname;
        /* We handle fragments by dealing with the first fragment as
         * if it was a normal packet.  All other fragments are treated
         * normally, except that they will NEVER match rules that ask
@@ -339,8 +341,8 @@ ip6t_do_table(struct sk_buff *skb,
         * rule is also a fragment-specific rule, non-fragments won't
         * match it. */
        acpar.hotdrop = false;
-       acpar.in      = in;
-       acpar.out     = out;
+       acpar.in      = state->in;
+       acpar.out     = state->out;
        acpar.family  = NFPROTO_IPV6;
        acpar.hooknum = hook;
 
@@ -390,7 +392,7 @@ ip6t_do_table(struct sk_buff *skb,
 #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
                /* The packet is traced: log it */
                if (unlikely(skb->nf_trace))
-                       trace_packet(skb, hook, in, out,
+                       trace_packet(skb, hook, state->in, state->out,
                                     table->name, private, e);
 #endif
                /* Standard target? */
index 544b0a9da1b59db2fc59cbaf11a1eccd1de8dd3f..12331efd49cf865b2e3ce934a734af47f77928a1 100644 (file)
@@ -83,7 +83,8 @@ static int reject_tg6_check(const struct xt_tgchk_param *par)
                return -EINVAL;
        } else if (rejinfo->with == IP6T_TCP_RESET) {
                /* Must specify that it's a TCP packet */
-               if (e->ipv6.proto != IPPROTO_TCP ||
+               if (!(e->ipv6.flags & IP6T_F_PROTO) ||
+                   e->ipv6.proto != IPPROTO_TCP ||
                    (e->ipv6.invflags & XT_INV_PROTO)) {
                        pr_info("TCP_RESET illegal for non-tcp\n");
                        return -EINVAL;
index a0d17270117c37793be3cb61c4d767cd57f70611..6edb7b106de769728357174d0657c644f83e41e8 100644 (file)
@@ -315,11 +315,9 @@ synproxy_tg6(struct sk_buff *skb, const struct xt_action_param *par)
 
 static unsigned int ipv6_synproxy_hook(const struct nf_hook_ops *ops,
                                       struct sk_buff *skb,
-                                      const struct net_device *in,
-                                      const struct net_device *out,
-                                      int (*okfn)(struct sk_buff *))
+                                      const struct nf_hook_state *nhs)
 {
-       struct synproxy_net *snet = synproxy_pernet(dev_net(in ? : out));
+       struct synproxy_net *snet = synproxy_pernet(dev_net(nhs->in ? : nhs->out));
        enum ip_conntrack_info ctinfo;
        struct nf_conn *ct;
        struct nf_conn_synproxy *synproxy;
index ca7f6c1280861b2977dce643fdea349eb3ec5078..5c33d8abc0774e52a99c20273f3349b0b5374fc7 100644 (file)
@@ -33,13 +33,11 @@ static const struct xt_table packet_filter = {
 /* The work comes in here from netfilter.c. */
 static unsigned int
 ip6table_filter_hook(const struct nf_hook_ops *ops, struct sk_buff *skb,
-                    const struct net_device *in, const struct net_device *out,
-                    int (*okfn)(struct sk_buff *))
+                    const struct nf_hook_state *state)
 {
-       const struct net *net = dev_net((in != NULL) ? in : out);
+       const struct net *net = dev_net(state->in ? state->in : state->out);
 
-       return ip6t_do_table(skb, ops->hooknum, in, out,
-                            net->ipv6.ip6table_filter);
+       return ip6t_do_table(skb, ops->hooknum, state, net->ipv6.ip6table_filter);
 }
 
 static struct nf_hook_ops *filter_ops __read_mostly;
index 307bbb782d147011d689f04c92e0ba5ac7c13074..b551f5b79fe2b7fa62278ae1f7d9327e82795253 100644 (file)
@@ -32,7 +32,7 @@ static const struct xt_table packet_mangler = {
 };
 
 static unsigned int
-ip6t_mangle_out(struct sk_buff *skb, const struct net_device *out)
+ip6t_mangle_out(struct sk_buff *skb, const struct nf_hook_state *state)
 {
        unsigned int ret;
        struct in6_addr saddr, daddr;
@@ -57,8 +57,8 @@ ip6t_mangle_out(struct sk_buff *skb, const struct net_device *out)
        /* flowlabel and prio (includes version, which shouldn't change either */
        flowlabel = *((u_int32_t *)ipv6_hdr(skb));
 
-       ret = ip6t_do_table(skb, NF_INET_LOCAL_OUT, NULL, out,
-                           dev_net(out)->ipv6.ip6table_mangle);
+       ret = ip6t_do_table(skb, NF_INET_LOCAL_OUT, state,
+                           dev_net(state->out)->ipv6.ip6table_mangle);
 
        if (ret != NF_DROP && ret != NF_STOLEN &&
            (!ipv6_addr_equal(&ipv6_hdr(skb)->saddr, &saddr) ||
@@ -77,17 +77,16 @@ ip6t_mangle_out(struct sk_buff *skb, const struct net_device *out)
 /* The work comes in here from netfilter.c. */
 static unsigned int
 ip6table_mangle_hook(const struct nf_hook_ops *ops, struct sk_buff *skb,
-                    const struct net_device *in, const struct net_device *out,
-                    int (*okfn)(struct sk_buff *))
+                    const struct nf_hook_state *state)
 {
        if (ops->hooknum == NF_INET_LOCAL_OUT)
-               return ip6t_mangle_out(skb, out);
+               return ip6t_mangle_out(skb, state);
        if (ops->hooknum == NF_INET_POST_ROUTING)
-               return ip6t_do_table(skb, ops->hooknum, in, out,
-                                    dev_net(out)->ipv6.ip6table_mangle);
+               return ip6t_do_table(skb, ops->hooknum, state,
+                                    dev_net(state->out)->ipv6.ip6table_mangle);
        /* INPUT/FORWARD */
-       return ip6t_do_table(skb, ops->hooknum, in, out,
-                            dev_net(in)->ipv6.ip6table_mangle);
+       return ip6t_do_table(skb, ops->hooknum, state,
+                            dev_net(state->in)->ipv6.ip6table_mangle);
 }
 
 static struct nf_hook_ops *mangle_ops __read_mostly;
index b0634ac996b706a9108b2f1369e4c2be9c1188c4..c3a7f7af0ed4d183d00a5f50307f44fa44399460 100644 (file)
@@ -32,49 +32,40 @@ static const struct xt_table nf_nat_ipv6_table = {
 
 static unsigned int ip6table_nat_do_chain(const struct nf_hook_ops *ops,
                                          struct sk_buff *skb,
-                                         const struct net_device *in,
-                                         const struct net_device *out,
+                                         const struct nf_hook_state *state,
                                          struct nf_conn *ct)
 {
        struct net *net = nf_ct_net(ct);
 
-       return ip6t_do_table(skb, ops->hooknum, in, out, net->ipv6.ip6table_nat);
+       return ip6t_do_table(skb, ops->hooknum, state, net->ipv6.ip6table_nat);
 }
 
 static unsigned int ip6table_nat_fn(const struct nf_hook_ops *ops,
                                    struct sk_buff *skb,
-                                   const struct net_device *in,
-                                   const struct net_device *out,
-                                   int (*okfn)(struct sk_buff *))
+                                   const struct nf_hook_state *state)
 {
-       return nf_nat_ipv6_fn(ops, skb, in, out, ip6table_nat_do_chain);
+       return nf_nat_ipv6_fn(ops, skb, state, ip6table_nat_do_chain);
 }
 
 static unsigned int ip6table_nat_in(const struct nf_hook_ops *ops,
                                    struct sk_buff *skb,
-                                   const struct net_device *in,
-                                   const struct net_device *out,
-                                   int (*okfn)(struct sk_buff *))
+                                   const struct nf_hook_state *state)
 {
-       return nf_nat_ipv6_in(ops, skb, in, out, ip6table_nat_do_chain);
+       return nf_nat_ipv6_in(ops, skb, state, ip6table_nat_do_chain);
 }
 
 static unsigned int ip6table_nat_out(const struct nf_hook_ops *ops,
                                     struct sk_buff *skb,
-                                    const struct net_device *in,
-                                    const struct net_device *out,
-                                    int (*okfn)(struct sk_buff *))
+                                    const struct nf_hook_state *state)
 {
-       return nf_nat_ipv6_out(ops, skb, in, out, ip6table_nat_do_chain);
+       return nf_nat_ipv6_out(ops, skb, state, ip6table_nat_do_chain);
 }
 
 static unsigned int ip6table_nat_local_fn(const struct nf_hook_ops *ops,
                                          struct sk_buff *skb,
-                                         const struct net_device *in,
-                                         const struct net_device *out,
-                                         int (*okfn)(struct sk_buff *))
+                                         const struct nf_hook_state *state)
 {
-       return nf_nat_ipv6_local_fn(ops, skb, in, out, ip6table_nat_do_chain);
+       return nf_nat_ipv6_local_fn(ops, skb, state, ip6table_nat_do_chain);
 }
 
 static struct nf_hook_ops nf_nat_ipv6_ops[] __read_mostly = {
index 5274740acecc93b4550dabdd7f48fac3c04f67ac..0b33caad2b69254e29af5ff38484e37a0dc6c711 100644 (file)
@@ -20,13 +20,11 @@ static const struct xt_table packet_raw = {
 /* The work comes in here from netfilter.c. */
 static unsigned int
 ip6table_raw_hook(const struct nf_hook_ops *ops, struct sk_buff *skb,
-                 const struct net_device *in, const struct net_device *out,
-                 int (*okfn)(struct sk_buff *))
+                 const struct nf_hook_state *state)
 {
-       const struct net *net = dev_net((in != NULL) ? in : out);
+       const struct net *net = dev_net(state->in ? state->in : state->out);
 
-       return ip6t_do_table(skb, ops->hooknum, in, out,
-                            net->ipv6.ip6table_raw);
+       return ip6t_do_table(skb, ops->hooknum, state, net->ipv6.ip6table_raw);
 }
 
 static struct nf_hook_ops *rawtable_ops __read_mostly;
index ab3b0219ecfa436c07eb5cb86af36bd04efbdfb7..fcef83c25f7b3281a92a2d5be27512e057dddfff 100644 (file)
@@ -37,13 +37,11 @@ static const struct xt_table security_table = {
 
 static unsigned int
 ip6table_security_hook(const struct nf_hook_ops *ops, struct sk_buff *skb,
-                      const struct net_device *in,
-                      const struct net_device *out,
-                      int (*okfn)(struct sk_buff *))
+                      const struct nf_hook_state *state)
 {
-       const struct net *net = dev_net((in != NULL) ? in : out);
+       const struct net *net = dev_net(state->in ? state->in : state->out);
 
-       return ip6t_do_table(skb, ops->hooknum, in, out,
+       return ip6t_do_table(skb, ops->hooknum, state,
                             net->ipv6.ip6table_security);
 }
 
index b68d0e59c1f8bfec0894caff3f53c86a1dca79e9..4ba0c34c627b0e88d3a06fda6532c83a3936315e 100644 (file)
@@ -97,9 +97,7 @@ static int ipv6_get_l4proto(const struct sk_buff *skb, unsigned int nhoff,
 
 static unsigned int ipv6_helper(const struct nf_hook_ops *ops,
                                struct sk_buff *skb,
-                               const struct net_device *in,
-                               const struct net_device *out,
-                               int (*okfn)(struct sk_buff *))
+                               const struct nf_hook_state *state)
 {
        struct nf_conn *ct;
        const struct nf_conn_help *help;
@@ -135,9 +133,7 @@ static unsigned int ipv6_helper(const struct nf_hook_ops *ops,
 
 static unsigned int ipv6_confirm(const struct nf_hook_ops *ops,
                                 struct sk_buff *skb,
-                                const struct net_device *in,
-                                const struct net_device *out,
-                                int (*okfn)(struct sk_buff *))
+                                const struct nf_hook_state *state)
 {
        struct nf_conn *ct;
        enum ip_conntrack_info ctinfo;
@@ -171,25 +167,21 @@ out:
 
 static unsigned int ipv6_conntrack_in(const struct nf_hook_ops *ops,
                                      struct sk_buff *skb,
-                                     const struct net_device *in,
-                                     const struct net_device *out,
-                                     int (*okfn)(struct sk_buff *))
+                                     const struct nf_hook_state *state)
 {
-       return nf_conntrack_in(dev_net(in), PF_INET6, ops->hooknum, skb);
+       return nf_conntrack_in(dev_net(state->in), PF_INET6, ops->hooknum, skb);
 }
 
 static unsigned int ipv6_conntrack_local(const struct nf_hook_ops *ops,
                                         struct sk_buff *skb,
-                                        const struct net_device *in,
-                                        const struct net_device *out,
-                                        int (*okfn)(struct sk_buff *))
+                                        const struct nf_hook_state *state)
 {
        /* root is playing with raw sockets. */
        if (skb->len < sizeof(struct ipv6hdr)) {
                net_notice_ratelimited("ipv6_conntrack_local: packet too short\n");
                return NF_ACCEPT;
        }
-       return nf_conntrack_in(dev_net(out), PF_INET6, ops->hooknum, skb);
+       return nf_conntrack_in(dev_net(state->out), PF_INET6, ops->hooknum, skb);
 }
 
 static struct nf_hook_ops ipv6_conntrack_ops[] __read_mostly = {
@@ -290,10 +282,8 @@ ipv6_getorigdst(struct sock *sk, int optval, void __user *user, int *len)
 static int ipv6_tuple_to_nlattr(struct sk_buff *skb,
                                const struct nf_conntrack_tuple *tuple)
 {
-       if (nla_put(skb, CTA_IP_V6_SRC, sizeof(u_int32_t) * 4,
-                   &tuple->src.u3.ip6) ||
-           nla_put(skb, CTA_IP_V6_DST, sizeof(u_int32_t) * 4,
-                   &tuple->dst.u3.ip6))
+       if (nla_put_in6_addr(skb, CTA_IP_V6_SRC, &tuple->src.u3.in6) ||
+           nla_put_in6_addr(skb, CTA_IP_V6_DST, &tuple->dst.u3.in6))
                goto nla_put_failure;
        return 0;
 
@@ -312,10 +302,8 @@ static int ipv6_nlattr_to_tuple(struct nlattr *tb[],
        if (!tb[CTA_IP_V6_SRC] || !tb[CTA_IP_V6_DST])
                return -EINVAL;
 
-       memcpy(&t->src.u3.ip6, nla_data(tb[CTA_IP_V6_SRC]),
-              sizeof(u_int32_t) * 4);
-       memcpy(&t->dst.u3.ip6, nla_data(tb[CTA_IP_V6_DST]),
-              sizeof(u_int32_t) * 4);
+       t->src.u3.in6 = nla_get_in6_addr(tb[CTA_IP_V6_SRC]);
+       t->dst.u3.in6 = nla_get_in6_addr(tb[CTA_IP_V6_DST]);
 
        return 0;
 }
index e70382e4dfb5d05d91d9dad132d04baabd2b48d5..e2b88205675152fd229ca4a4c15f4d7212f14956 100644 (file)
@@ -54,9 +54,7 @@ static enum ip6_defrag_users nf_ct6_defrag_user(unsigned int hooknum,
 
 static unsigned int ipv6_defrag(const struct nf_hook_ops *ops,
                                struct sk_buff *skb,
-                               const struct net_device *in,
-                               const struct net_device *out,
-                               int (*okfn)(struct sk_buff *))
+                               const struct nf_hook_state *state)
 {
        struct sk_buff *reasm;
 
@@ -78,8 +76,8 @@ static unsigned int ipv6_defrag(const struct nf_hook_ops *ops,
        nf_ct_frag6_consume_orig(reasm);
 
        NF_HOOK_THRESH(NFPROTO_IPV6, ops->hooknum, reasm,
-                      (struct net_device *) in, (struct net_device *) out,
-                      okfn, NF_IP6_PRI_CONNTRACK_DEFRAG + 1);
+                      state->in, state->out,
+                      state->okfn, NF_IP6_PRI_CONNTRACK_DEFRAG + 1);
 
        return NF_STOLEN;
 }
index ddf07e6f59d7de70bcf41ba84880b2c699296db7..8dd869642f45a032fcd0c1303319313392244a13 100644 (file)
@@ -5,8 +5,10 @@
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
  */
+
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
+#include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/spinlock.h>
 #include <linux/skbuff.h>
@@ -27,7 +29,7 @@ static struct nf_loginfo default_loginfo = {
        .type   = NF_LOG_TYPE_LOG,
        .u = {
                .log = {
-                       .level    = 5,
+                       .level    = LOGLEVEL_NOTICE,
                        .logflags = NF_LOG_MASK,
                },
        },
index c5812e1c1ffbfbd6029ba1dce305ca2d6f691f98..e76900e0aa925a26c226f733f9a44e396ea7cc7f 100644 (file)
@@ -263,11 +263,10 @@ EXPORT_SYMBOL_GPL(nf_nat_icmpv6_reply_translation);
 
 unsigned int
 nf_nat_ipv6_fn(const struct nf_hook_ops *ops, struct sk_buff *skb,
-              const struct net_device *in, const struct net_device *out,
+              const struct nf_hook_state *state,
               unsigned int (*do_chain)(const struct nf_hook_ops *ops,
                                        struct sk_buff *skb,
-                                       const struct net_device *in,
-                                       const struct net_device *out,
+                                       const struct nf_hook_state *state,
                                        struct nf_conn *ct))
 {
        struct nf_conn *ct;
@@ -318,7 +317,7 @@ nf_nat_ipv6_fn(const struct nf_hook_ops *ops, struct sk_buff *skb,
                if (!nf_nat_initialized(ct, maniptype)) {
                        unsigned int ret;
 
-                       ret = do_chain(ops, skb, in, out, ct);
+                       ret = do_chain(ops, skb, state, ct);
                        if (ret != NF_ACCEPT)
                                return ret;
 
@@ -332,7 +331,7 @@ nf_nat_ipv6_fn(const struct nf_hook_ops *ops, struct sk_buff *skb,
                        pr_debug("Already setup manip %s for ct %p\n",
                                 maniptype == NF_NAT_MANIP_SRC ? "SRC" : "DST",
                                 ct);
-                       if (nf_nat_oif_changed(ops->hooknum, ctinfo, nat, out))
+                       if (nf_nat_oif_changed(ops->hooknum, ctinfo, nat, state->out))
                                goto oif_changed;
                }
                break;
@@ -341,7 +340,7 @@ nf_nat_ipv6_fn(const struct nf_hook_ops *ops, struct sk_buff *skb,
                /* ESTABLISHED */
                NF_CT_ASSERT(ctinfo == IP_CT_ESTABLISHED ||
                             ctinfo == IP_CT_ESTABLISHED_REPLY);
-               if (nf_nat_oif_changed(ops->hooknum, ctinfo, nat, out))
+               if (nf_nat_oif_changed(ops->hooknum, ctinfo, nat, state->out))
                        goto oif_changed;
        }
 
@@ -355,17 +354,16 @@ EXPORT_SYMBOL_GPL(nf_nat_ipv6_fn);
 
 unsigned int
 nf_nat_ipv6_in(const struct nf_hook_ops *ops, struct sk_buff *skb,
-              const struct net_device *in, const struct net_device *out,
+              const struct nf_hook_state *state,
               unsigned int (*do_chain)(const struct nf_hook_ops *ops,
                                        struct sk_buff *skb,
-                                       const struct net_device *in,
-                                       const struct net_device *out,
+                                       const struct nf_hook_state *state,
                                        struct nf_conn *ct))
 {
        unsigned int ret;
        struct in6_addr daddr = ipv6_hdr(skb)->daddr;
 
-       ret = nf_nat_ipv6_fn(ops, skb, in, out, do_chain);
+       ret = nf_nat_ipv6_fn(ops, skb, state, do_chain);
        if (ret != NF_DROP && ret != NF_STOLEN &&
            ipv6_addr_cmp(&daddr, &ipv6_hdr(skb)->daddr))
                skb_dst_drop(skb);
@@ -376,11 +374,10 @@ EXPORT_SYMBOL_GPL(nf_nat_ipv6_in);
 
 unsigned int
 nf_nat_ipv6_out(const struct nf_hook_ops *ops, struct sk_buff *skb,
-               const struct net_device *in, const struct net_device *out,
+               const struct nf_hook_state *state,
                unsigned int (*do_chain)(const struct nf_hook_ops *ops,
                                         struct sk_buff *skb,
-                                        const struct net_device *in,
-                                        const struct net_device *out,
+                                        const struct nf_hook_state *state,
                                         struct nf_conn *ct))
 {
 #ifdef CONFIG_XFRM
@@ -394,7 +391,7 @@ nf_nat_ipv6_out(const struct nf_hook_ops *ops, struct sk_buff *skb,
        if (skb->len < sizeof(struct ipv6hdr))
                return NF_ACCEPT;
 
-       ret = nf_nat_ipv6_fn(ops, skb, in, out, do_chain);
+       ret = nf_nat_ipv6_fn(ops, skb, state, do_chain);
 #ifdef CONFIG_XFRM
        if (ret != NF_DROP && ret != NF_STOLEN &&
            !(IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED) &&
@@ -418,11 +415,10 @@ EXPORT_SYMBOL_GPL(nf_nat_ipv6_out);
 
 unsigned int
 nf_nat_ipv6_local_fn(const struct nf_hook_ops *ops, struct sk_buff *skb,
-                    const struct net_device *in, const struct net_device *out,
+                    const struct nf_hook_state *state,
                     unsigned int (*do_chain)(const struct nf_hook_ops *ops,
                                              struct sk_buff *skb,
-                                             const struct net_device *in,
-                                             const struct net_device *out,
+                                             const struct nf_hook_state *state,
                                              struct nf_conn *ct))
 {
        const struct nf_conn *ct;
@@ -434,7 +430,7 @@ nf_nat_ipv6_local_fn(const struct nf_hook_ops *ops, struct sk_buff *skb,
        if (skb->len < sizeof(struct ipv6hdr))
                return NF_ACCEPT;
 
-       ret = nf_nat_ipv6_fn(ops, skb, in, out, do_chain);
+       ret = nf_nat_ipv6_fn(ops, skb, state, do_chain);
        if (ret != NF_DROP && ret != NF_STOLEN &&
            (ct = nf_ct_get(skb, &ctinfo)) != NULL) {
                enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
index d05b36440e8be341e3b22cc172acc05962c8a37a..3afdce03d94e7c2dd27d19fabff9250c0f5aaddc 100644 (file)
@@ -65,7 +65,7 @@ EXPORT_SYMBOL_GPL(nf_reject_ip6_tcphdr_get);
 
 struct ipv6hdr *nf_reject_ip6hdr_put(struct sk_buff *nskb,
                                     const struct sk_buff *oldskb,
-                                    __be16 protocol, int hoplimit)
+                                    __u8 protocol, int hoplimit)
 {
        struct ipv6hdr *ip6h;
        const struct ipv6hdr *oip6h = ipv6_hdr(oldskb);
@@ -208,4 +208,39 @@ void nf_send_reset6(struct net *net, struct sk_buff *oldskb, int hook)
 }
 EXPORT_SYMBOL_GPL(nf_send_reset6);
 
+static bool reject6_csum_ok(struct sk_buff *skb, int hook)
+{
+       const struct ipv6hdr *ip6h = ipv6_hdr(skb);
+       int thoff;
+       __be16 fo;
+       u8 proto;
+
+       if (skb->csum_bad)
+               return false;
+
+       if (skb_csum_unnecessary(skb))
+               return true;
+
+       proto = ip6h->nexthdr;
+       thoff = ipv6_skip_exthdr(skb, ((u8*)(ip6h+1) - skb->data), &proto, &fo);
+
+       if (thoff < 0 || thoff >= skb->len || (fo & htons(~0x7)) != 0)
+               return false;
+
+       return nf_ip6_checksum(skb, hook, thoff, proto) == 0;
+}
+
+void nf_send_unreach6(struct net *net, struct sk_buff *skb_in,
+                     unsigned char code, unsigned int hooknum)
+{
+       if (!reject6_csum_ok(skb_in, hooknum))
+               return;
+
+       if (hooknum == NF_INET_LOCAL_OUT && skb_in->dev == NULL)
+               skb_in->dev = net->loopback_dev;
+
+       icmpv6_send(skb_in, ICMPV6_DEST_UNREACH, code, 0);
+}
+EXPORT_SYMBOL_GPL(nf_send_unreach6);
+
 MODULE_LICENSE("GPL");
index 0d812b31277d9eb04133dbc880e0e151e0f2cf1f..c8148ba76d1a765e1ee2ba190961045fad033c6b 100644 (file)
 
 static unsigned int nft_do_chain_ipv6(const struct nf_hook_ops *ops,
                                      struct sk_buff *skb,
-                                     const struct net_device *in,
-                                     const struct net_device *out,
-                                     int (*okfn)(struct sk_buff *))
+                                     const struct nf_hook_state *state)
 {
        struct nft_pktinfo pkt;
 
        /* malformed packet, drop it */
-       if (nft_set_pktinfo_ipv6(&pkt, ops, skb, in, out) < 0)
+       if (nft_set_pktinfo_ipv6(&pkt, ops, skb, state) < 0)
                return NF_DROP;
 
        return nft_do_chain(&pkt, ops);
@@ -33,9 +31,7 @@ static unsigned int nft_do_chain_ipv6(const struct nf_hook_ops *ops,
 
 static unsigned int nft_ipv6_output(const struct nf_hook_ops *ops,
                                    struct sk_buff *skb,
-                                   const struct net_device *in,
-                                   const struct net_device *out,
-                                   int (*okfn)(struct sk_buff *))
+                                   const struct nf_hook_state *state)
 {
        if (unlikely(skb->len < sizeof(struct ipv6hdr))) {
                if (net_ratelimit())
@@ -44,7 +40,7 @@ static unsigned int nft_ipv6_output(const struct nf_hook_ops *ops,
                return NF_ACCEPT;
        }
 
-       return nft_do_chain_ipv6(ops, skb, in, out, okfn);
+       return nft_do_chain_ipv6(ops, skb, state);
 }
 
 struct nft_af_info nft_af_ipv6 __read_mostly = {
index 1c4b75dd425b8e7fe421df37e215534a6eb19584..951bb458b7bd53968f76b6e8431f12214a05b88a 100644 (file)
 
 static unsigned int nft_nat_do_chain(const struct nf_hook_ops *ops,
                                     struct sk_buff *skb,
-                                    const struct net_device *in,
-                                    const struct net_device *out,
+                                    const struct nf_hook_state *state,
                                     struct nf_conn *ct)
 {
        struct nft_pktinfo pkt;
 
-       nft_set_pktinfo_ipv6(&pkt, ops, skb, in, out);
+       nft_set_pktinfo_ipv6(&pkt, ops, skb, state);
 
        return nft_do_chain(&pkt, ops);
 }
 
 static unsigned int nft_nat_ipv6_fn(const struct nf_hook_ops *ops,
                                    struct sk_buff *skb,
-                                   const struct net_device *in,
-                                   const struct net_device *out,
-                                   int (*okfn)(struct sk_buff *))
+                                   const struct nf_hook_state *state)
 {
-       return nf_nat_ipv6_fn(ops, skb, in, out, nft_nat_do_chain);
+       return nf_nat_ipv6_fn(ops, skb, state, nft_nat_do_chain);
 }
 
 static unsigned int nft_nat_ipv6_in(const struct nf_hook_ops *ops,
                                    struct sk_buff *skb,
-                                   const struct net_device *in,
-                                   const struct net_device *out,
-                                   int (*okfn)(struct sk_buff *))
+                                   const struct nf_hook_state *state)
 {
-       return nf_nat_ipv6_in(ops, skb, in, out, nft_nat_do_chain);
+       return nf_nat_ipv6_in(ops, skb, state, nft_nat_do_chain);
 }
 
 static unsigned int nft_nat_ipv6_out(const struct nf_hook_ops *ops,
                                     struct sk_buff *skb,
-                                    const struct net_device *in,
-                                    const struct net_device *out,
-                                    int (*okfn)(struct sk_buff *))
+                                    const struct nf_hook_state *state)
 {
-       return nf_nat_ipv6_out(ops, skb, in, out, nft_nat_do_chain);
+       return nf_nat_ipv6_out(ops, skb, state, nft_nat_do_chain);
 }
 
 static unsigned int nft_nat_ipv6_local_fn(const struct nf_hook_ops *ops,
                                          struct sk_buff *skb,
-                                         const struct net_device *in,
-                                         const struct net_device *out,
-                                         int (*okfn)(struct sk_buff *))
+                                         const struct nf_hook_state *state)
 {
-       return nf_nat_ipv6_local_fn(ops, skb, in, out, nft_nat_do_chain);
+       return nf_nat_ipv6_local_fn(ops, skb, state, nft_nat_do_chain);
 }
 
 static const struct nf_chain_type nft_chain_nat_ipv6 = {
index 42031299585e1be0452b35a264ee010bb9d7f7fb..0dafdaac5e175062b8c81665834390a8aabe5171 100644 (file)
@@ -24,9 +24,7 @@
 
 static unsigned int nf_route_table_hook(const struct nf_hook_ops *ops,
                                        struct sk_buff *skb,
-                                       const struct net_device *in,
-                                       const struct net_device *out,
-                                       int (*okfn)(struct sk_buff *))
+                                       const struct nf_hook_state *state)
 {
        unsigned int ret;
        struct nft_pktinfo pkt;
@@ -35,7 +33,7 @@ static unsigned int nf_route_table_hook(const struct nf_hook_ops *ops,
        u32 mark, flowlabel;
 
        /* malformed packet, drop it */
-       if (nft_set_pktinfo_ipv6(&pkt, ops, skb, in, out) < 0)
+       if (nft_set_pktinfo_ipv6(&pkt, ops, skb, state) < 0)
                return NF_DROP;
 
        /* save source/dest address, mark, hoplimit, flowlabel, priority */
index 74581f706c4da49edef5947e755d049b125d5a25..4016a6ef9d61479e9c6c418db608588b385314b6 100644 (file)
@@ -9,13 +9,14 @@
 #include <net/addrconf.h>
 #include <net/secure_seq.h>
 
-static u32 __ipv6_select_ident(u32 hashrnd, struct in6_addr *dst,
-                              struct in6_addr *src)
+static u32 __ipv6_select_ident(struct net *net, u32 hashrnd,
+                              struct in6_addr *dst, struct in6_addr *src)
 {
        u32 hash, id;
 
        hash = __ipv6_addr_jhash(dst, hashrnd);
        hash = __ipv6_addr_jhash(src, hash);
+       hash ^= net_hash_mix(net);
 
        /* Treat id of 0 as unset and if we get 0 back from ip_idents_reserve,
         * set the hight order instead thus minimizing possible future
@@ -36,7 +37,7 @@ static u32 __ipv6_select_ident(u32 hashrnd, struct in6_addr *dst,
  *
  * The network header must be set before calling this.
  */
-void ipv6_proxy_select_ident(struct sk_buff *skb)
+void ipv6_proxy_select_ident(struct net *net, struct sk_buff *skb)
 {
        static u32 ip6_proxy_idents_hashrnd __read_mostly;
        struct in6_addr buf[2];
@@ -53,20 +54,21 @@ void ipv6_proxy_select_ident(struct sk_buff *skb)
        net_get_random_once(&ip6_proxy_idents_hashrnd,
                            sizeof(ip6_proxy_idents_hashrnd));
 
-       id = __ipv6_select_ident(ip6_proxy_idents_hashrnd,
+       id = __ipv6_select_ident(net, ip6_proxy_idents_hashrnd,
                                 &addrs[1], &addrs[0]);
        skb_shinfo(skb)->ip6_frag_id = htonl(id);
 }
 EXPORT_SYMBOL_GPL(ipv6_proxy_select_ident);
 
-void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt)
+void ipv6_select_ident(struct net *net, struct frag_hdr *fhdr,
+                      struct rt6_info *rt)
 {
        static u32 ip6_idents_hashrnd __read_mostly;
        u32 id;
 
        net_get_random_once(&ip6_idents_hashrnd, sizeof(ip6_idents_hashrnd));
 
-       id = __ipv6_select_ident(ip6_idents_hashrnd, &rt->rt6i_dst.addr,
+       id = __ipv6_select_ident(net, ip6_idents_hashrnd, &rt->rt6i_dst.addr,
                                 &rt->rt6i_src.addr);
        fhdr->identification = htonl(id);
 }
index a2dfff6ff227e09607d1d267265e7635d64a2030..263a5164a6f5af1520158293dfb3ab641c223ae2 100644 (file)
@@ -77,8 +77,7 @@ static int dummy_ipv6_chk_addr(struct net *net, const struct in6_addr *addr,
        return 0;
 }
 
-int ping_v6_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
-                   size_t len)
+int ping_v6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
 {
        struct inet_sock *inet = inet_sk(sk);
        struct ipv6_pinfo *np = inet6_sk(sk);
index dae7f1a1e46481d72e5b61da3cc9990d03c36380..79ccdb4c1b336bca0f9ed72dfdd4ea02fbe6a01a 100644 (file)
@@ -32,7 +32,7 @@
 #include <linux/netfilter_ipv6.h>
 #include <linux/skbuff.h>
 #include <linux/compat.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
 #include <asm/ioctls.h>
 
 #include <net/net_namespace.h>
@@ -172,7 +172,7 @@ static bool ipv6_raw_deliver(struct sk_buff *skb, int nexthdr)
        read_lock(&raw_v6_hashinfo.lock);
        sk = sk_head(&raw_v6_hashinfo.ht[hash]);
 
-       if (sk == NULL)
+       if (!sk)
                goto out;
 
        net = dev_net(skb->dev);
@@ -367,7 +367,7 @@ void raw6_icmp_error(struct sk_buff *skb, int nexthdr,
 
        read_lock(&raw_v6_hashinfo.lock);
        sk = sk_head(&raw_v6_hashinfo.ht[hash]);
-       if (sk != NULL) {
+       if (sk) {
                /* Note: ipv6_hdr(skb) != skb->data */
                const struct ipv6hdr *ip6h = (const struct ipv6hdr *)skb->data;
                saddr = &ip6h->saddr;
@@ -456,9 +456,8 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
  *     we return it, otherwise we block.
  */
 
-static int rawv6_recvmsg(struct kiocb *iocb, struct sock *sk,
-                 struct msghdr *msg, size_t len,
-                 int noblock, int flags, int *addr_len)
+static int rawv6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
+                        int noblock, int flags, int *addr_len)
 {
        struct ipv6_pinfo *np = inet6_sk(sk);
        DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, msg->msg_name);
@@ -631,7 +630,7 @@ static int rawv6_send_hdrinc(struct sock *sk, struct msghdr *msg, int length,
        skb = sock_alloc_send_skb(sk,
                                  length + hlen + tlen + 15,
                                  flags & MSG_DONTWAIT, &err);
-       if (skb == NULL)
+       if (!skb)
                goto error;
        skb_reserve(skb, hlen);
 
@@ -730,8 +729,7 @@ static int raw6_getfrag(void *from, char *to, int offset, int len, int odd,
        return ip_generic_getfrag(rfv->msg, to, offset, len, odd, skb);
 }
 
-static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk,
-                  struct msghdr *msg, size_t len)
+static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
 {
        struct ipv6_txoptions opt_space;
        DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, msg->msg_name);
@@ -791,7 +789,7 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk,
                        fl6.flowlabel = sin6->sin6_flowinfo&IPV6_FLOWINFO_MASK;
                        if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
                                flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
-                               if (flowlabel == NULL)
+                               if (!flowlabel)
                                        return -EINVAL;
                        }
                }
@@ -833,13 +831,13 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk,
                }
                if ((fl6.flowlabel&IPV6_FLOWLABEL_MASK) && !flowlabel) {
                        flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
-                       if (flowlabel == NULL)
+                       if (!flowlabel)
                                return -EINVAL;
                }
                if (!(opt->opt_nflen|opt->opt_flen))
                        opt = NULL;
        }
-       if (opt == NULL)
+       if (!opt)
                opt = np->opt;
        if (flowlabel)
                opt = fl6_merge_options(&opt_space, flowlabel, opt);
@@ -1132,7 +1130,7 @@ static int rawv6_ioctl(struct sock *sk, int cmd, unsigned long arg)
 
                spin_lock_bh(&sk->sk_receive_queue.lock);
                skb = skb_peek(&sk->sk_receive_queue);
-               if (skb != NULL)
+               if (skb)
                        amount = skb_tail_pointer(skb) -
                                skb_transport_header(skb);
                spin_unlock_bh(&sk->sk_receive_queue.lock);
index d7d70e69973b7455fbf2b5cb88a09d428f9c45ea..8ffa2c8cce774e8398a031ab90c69a3ed2934a6a 100644 (file)
@@ -430,7 +430,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
                int i, plen = 0;
 
                clone = alloc_skb(0, GFP_ATOMIC);
-               if (clone == NULL)
+               if (!clone)
                        goto out_oom;
                clone->next = head->next;
                head->next = clone;
@@ -552,7 +552,7 @@ static int ipv6_frag_rcv(struct sk_buff *skb)
 
        fq = fq_find(net, fhdr->identification, &hdr->saddr, &hdr->daddr,
                     ip6_frag_ecn(hdr));
-       if (fq != NULL) {
+       if (fq) {
                int ret;
 
                spin_lock(&fq->q.lock);
@@ -632,7 +632,7 @@ static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
        table = ip6_frags_ns_ctl_table;
        if (!net_eq(net, &init_net)) {
                table = kmemdup(table, sizeof(ip6_frags_ns_ctl_table), GFP_KERNEL);
-               if (table == NULL)
+               if (!table)
                        goto err_alloc;
 
                table[0].data = &net->ipv6.frags.high_thresh;
@@ -648,7 +648,7 @@ static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
        }
 
        hdr = register_net_sysctl(net, "net/ipv6", table);
-       if (hdr == NULL)
+       if (!hdr)
                goto err_reg;
 
        net->ipv6.sysctl.frags_hdr = hdr;
index 4688bd4d7f59587eaf12e91a33bdc81379fd32ea..5c48293ff06235e72f586007ff1e7bb568733b92 100644 (file)
@@ -194,7 +194,6 @@ static struct neighbour *ip6_neigh_lookup(const struct dst_entry *dst,
 
 static struct dst_ops ip6_dst_ops_template = {
        .family                 =       AF_INET6,
-       .protocol               =       cpu_to_be16(ETH_P_IPV6),
        .gc                     =       ip6_dst_gc,
        .gc_thresh              =       1024,
        .check                  =       ip6_dst_check,
@@ -236,7 +235,6 @@ static u32 *ip6_rt_blackhole_cow_metrics(struct dst_entry *dst,
 
 static struct dst_ops ip6_dst_blackhole_ops = {
        .family                 =       AF_INET6,
-       .protocol               =       cpu_to_be16(ETH_P_IPV6),
        .destroy                =       ip6_dst_destroy,
        .check                  =       ip6_dst_check,
        .mtu                    =       ip6_blackhole_mtu,
@@ -1478,7 +1476,7 @@ static int ip6_convert_metrics(struct mx6_config *mxc,
        int remaining;
        u32 *mp;
 
-       if (cfg->fc_mx == NULL)
+       if (!cfg->fc_mx)
                return 0;
 
        mp = kzalloc(sizeof(u32) * RTAX_MAX, GFP_KERNEL);
@@ -2400,6 +2398,7 @@ static const struct nla_policy rtm_ipv6_policy[RTA_MAX+1] = {
        [RTA_PRIORITY]          = { .type = NLA_U32 },
        [RTA_METRICS]           = { .type = NLA_NESTED },
        [RTA_MULTIPATH]         = { .len = sizeof(struct rtnexthop) },
+       [RTA_PREF]              = { .type = NLA_U8 },
 };
 
 static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
@@ -2407,6 +2406,7 @@ static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
 {
        struct rtmsg *rtm;
        struct nlattr *tb[RTA_MAX+1];
+       unsigned int pref;
        int err;
 
        err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv6_policy);
@@ -2438,7 +2438,7 @@ static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
        cfg->fc_nlinfo.nl_net = sock_net(skb->sk);
 
        if (tb[RTA_GATEWAY]) {
-               nla_memcpy(&cfg->fc_gateway, tb[RTA_GATEWAY], 16);
+               cfg->fc_gateway = nla_get_in6_addr(tb[RTA_GATEWAY]);
                cfg->fc_flags |= RTF_GATEWAY;
        }
 
@@ -2461,7 +2461,7 @@ static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
        }
 
        if (tb[RTA_PREFSRC])
-               nla_memcpy(&cfg->fc_prefsrc, tb[RTA_PREFSRC], 16);
+               cfg->fc_prefsrc = nla_get_in6_addr(tb[RTA_PREFSRC]);
 
        if (tb[RTA_OIF])
                cfg->fc_ifindex = nla_get_u32(tb[RTA_OIF]);
@@ -2482,6 +2482,14 @@ static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
                cfg->fc_mp_len = nla_len(tb[RTA_MULTIPATH]);
        }
 
+       if (tb[RTA_PREF]) {
+               pref = nla_get_u8(tb[RTA_PREF]);
+               if (pref != ICMPV6_ROUTER_PREF_LOW &&
+                   pref != ICMPV6_ROUTER_PREF_HIGH)
+                       pref = ICMPV6_ROUTER_PREF_MEDIUM;
+               cfg->fc_flags |= RTF_PREF(pref);
+       }
+
        err = 0;
 errout:
        return err;
@@ -2511,7 +2519,7 @@ beginning:
 
                        nla = nla_find(attrs, attrlen, RTA_GATEWAY);
                        if (nla) {
-                               nla_memcpy(&r_cfg.fc_gateway, nla, 16);
+                               r_cfg.fc_gateway = nla_get_in6_addr(nla);
                                r_cfg.fc_flags |= RTF_GATEWAY;
                        }
                }
@@ -2585,7 +2593,8 @@ static inline size_t rt6_nlmsg_size(void)
               + nla_total_size(4) /* RTA_PRIORITY */
               + RTAX_MAX * nla_total_size(4) /* RTA_METRICS */
               + nla_total_size(sizeof(struct rta_cacheinfo))
-              + nla_total_size(TCP_CA_NAME_MAX); /* RTAX_CC_ALGO */
+              + nla_total_size(TCP_CA_NAME_MAX) /* RTAX_CC_ALGO */
+              + nla_total_size(1); /* RTA_PREF */
 }
 
 static int rt6_fill_node(struct net *net,
@@ -2660,19 +2669,19 @@ static int rt6_fill_node(struct net *net,
                rtm->rtm_flags |= RTM_F_CLONED;
 
        if (dst) {
-               if (nla_put(skb, RTA_DST, 16, dst))
+               if (nla_put_in6_addr(skb, RTA_DST, dst))
                        goto nla_put_failure;
                rtm->rtm_dst_len = 128;
        } else if (rtm->rtm_dst_len)
-               if (nla_put(skb, RTA_DST, 16, &rt->rt6i_dst.addr))
+               if (nla_put_in6_addr(skb, RTA_DST, &rt->rt6i_dst.addr))
                        goto nla_put_failure;
 #ifdef CONFIG_IPV6_SUBTREES
        if (src) {
-               if (nla_put(skb, RTA_SRC, 16, src))
+               if (nla_put_in6_addr(skb, RTA_SRC, src))
                        goto nla_put_failure;
                rtm->rtm_src_len = 128;
        } else if (rtm->rtm_src_len &&
-                  nla_put(skb, RTA_SRC, 16, &rt->rt6i_src.addr))
+                  nla_put_in6_addr(skb, RTA_SRC, &rt->rt6i_src.addr))
                goto nla_put_failure;
 #endif
        if (iif) {
@@ -2696,14 +2705,14 @@ static int rt6_fill_node(struct net *net,
        } else if (dst) {
                struct in6_addr saddr_buf;
                if (ip6_route_get_saddr(net, rt, dst, 0, &saddr_buf) == 0 &&
-                   nla_put(skb, RTA_PREFSRC, 16, &saddr_buf))
+                   nla_put_in6_addr(skb, RTA_PREFSRC, &saddr_buf))
                        goto nla_put_failure;
        }
 
        if (rt->rt6i_prefsrc.plen) {
                struct in6_addr saddr_buf;
                saddr_buf = rt->rt6i_prefsrc.addr;
-               if (nla_put(skb, RTA_PREFSRC, 16, &saddr_buf))
+               if (nla_put_in6_addr(skb, RTA_PREFSRC, &saddr_buf))
                        goto nla_put_failure;
        }
 
@@ -2711,7 +2720,7 @@ static int rt6_fill_node(struct net *net,
                goto nla_put_failure;
 
        if (rt->rt6i_flags & RTF_GATEWAY) {
-               if (nla_put(skb, RTA_GATEWAY, 16, &rt->rt6i_gateway) < 0)
+               if (nla_put_in6_addr(skb, RTA_GATEWAY, &rt->rt6i_gateway) < 0)
                        goto nla_put_failure;
        }
 
@@ -2726,6 +2735,9 @@ static int rt6_fill_node(struct net *net,
        if (rtnl_put_cacheinfo(skb, &rt->dst, 0, expires, rt->dst.error) < 0)
                goto nla_put_failure;
 
+       if (nla_put_u8(skb, RTA_PREF, IPV6_EXTRACT_PREF(rt->rt6i_flags)))
+               goto nla_put_failure;
+
        nlmsg_end(skb, nlh);
        return 0;
 
index e4cbd5798eba0c9f393a3ca0857afa69ee666baf..6cf2026a9cea849c067d7388bc23f5d3a20cbeac 100644 (file)
@@ -118,7 +118,7 @@ static struct ip_tunnel *ipip6_tunnel_lookup(struct net *net,
                        return t;
        }
        t = rcu_dereference(sitn->tunnels_wc[0]);
-       if ((t != NULL) && (t->dev->flags & IFF_UP))
+       if (t && (t->dev->flags & IFF_UP))
                return t;
        return NULL;
 }
@@ -251,7 +251,7 @@ static struct ip_tunnel *ipip6_tunnel_locate(struct net *net,
 
        dev = alloc_netdev(sizeof(*t), name, NET_NAME_UNKNOWN,
                           ipip6_tunnel_setup);
-       if (dev == NULL)
+       if (!dev)
                return NULL;
 
        dev_net_set(dev, net);
@@ -555,7 +555,7 @@ static int ipip6_err(struct sk_buff *skb, u32 info)
                                skb->dev,
                                iph->daddr,
                                iph->saddr);
-       if (t == NULL)
+       if (!t)
                goto out;
 
        if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
@@ -671,7 +671,7 @@ static int ipip6_rcv(struct sk_buff *skb)
 
        tunnel = ipip6_tunnel_lookup(dev_net(skb->dev), skb->dev,
                                     iph->saddr, iph->daddr);
-       if (tunnel != NULL) {
+       if (tunnel) {
                struct pcpu_sw_netstats *tstats;
 
                if (tunnel->parms.iph.protocol != IPPROTO_IPV6 &&
@@ -733,7 +733,7 @@ static int ipip_rcv(struct sk_buff *skb)
        iph = ip_hdr(skb);
        tunnel = ipip6_tunnel_lookup(dev_net(skb->dev), skb->dev,
                                     iph->saddr, iph->daddr);
-       if (tunnel != NULL) {
+       if (tunnel) {
                if (tunnel->parms.iph.protocol != IPPROTO_IPIP &&
                    tunnel->parms.iph.protocol != 0)
                        goto drop;
@@ -838,7 +838,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
                if (skb_dst(skb))
                        neigh = dst_neigh_lookup(skb_dst(skb), &iph6->daddr);
 
-               if (neigh == NULL) {
+               if (!neigh) {
                        net_dbg_ratelimited("nexthop == NULL\n");
                        goto tx_error;
                }
@@ -867,7 +867,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
                if (skb_dst(skb))
                        neigh = dst_neigh_lookup(skb_dst(skb), &iph6->daddr);
 
-               if (neigh == NULL) {
+               if (!neigh) {
                        net_dbg_ratelimited("nexthop == NULL\n");
                        goto tx_error;
                }
@@ -1076,7 +1076,6 @@ static void ipip6_tunnel_bind_dev(struct net_device *dev)
                if (dev->mtu < IPV6_MIN_MTU)
                        dev->mtu = IPV6_MIN_MTU;
        }
-       dev->iflink = tunnel->parms.link;
 }
 
 static void ipip6_tunnel_update(struct ip_tunnel *t, struct ip_tunnel_parm *p)
@@ -1158,7 +1157,7 @@ ipip6_tunnel_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
                                break;
                        }
                        t = ipip6_tunnel_locate(net, &p, 0);
-                       if (t == NULL)
+                       if (!t)
                                t = netdev_priv(dev);
                }
 
@@ -1206,7 +1205,7 @@ ipip6_tunnel_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
                t = ipip6_tunnel_locate(net, &p, cmd == SIOCADDTUNNEL);
 
                if (dev != sitn->fb_tunnel_dev && cmd == SIOCCHGTUNNEL) {
-                       if (t != NULL) {
+                       if (t) {
                                if (t->dev != dev) {
                                        err = -EEXIST;
                                        break;
@@ -1242,7 +1241,7 @@ ipip6_tunnel_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
                                goto done;
                        err = -ENOENT;
                        t = ipip6_tunnel_locate(net, &p, 0);
-                       if (t == NULL)
+                       if (!t)
                                goto done;
                        err = -EPERM;
                        if (t == netdev_priv(sitn->fb_tunnel_dev))
@@ -1336,6 +1335,7 @@ static const struct net_device_ops ipip6_netdev_ops = {
        .ndo_do_ioctl   = ipip6_tunnel_ioctl,
        .ndo_change_mtu = ipip6_tunnel_change_mtu,
        .ndo_get_stats64 = ip_tunnel_get_stats64,
+       .ndo_get_iflink = ip_tunnel_get_iflink,
 };
 
 static void ipip6_dev_free(struct net_device *dev)
@@ -1366,7 +1366,6 @@ static void ipip6_tunnel_setup(struct net_device *dev)
        dev->mtu                = ETH_DATA_LEN - t_hlen;
        dev->flags              = IFF_NOARP;
        netif_keep_dst(dev);
-       dev->iflink             = 0;
        dev->addr_len           = 4;
        dev->features           |= NETIF_F_LLTX;
        dev->features           |= SIT_FEATURES;
@@ -1530,8 +1529,7 @@ static bool ipip6_netlink_6rd_parms(struct nlattr *data[],
 
        if (data[IFLA_IPTUN_6RD_PREFIX]) {
                ret = true;
-               nla_memcpy(&ip6rd->prefix, data[IFLA_IPTUN_6RD_PREFIX],
-                          sizeof(struct in6_addr));
+               ip6rd->prefix = nla_get_in6_addr(data[IFLA_IPTUN_6RD_PREFIX]);
        }
 
        if (data[IFLA_IPTUN_6RD_RELAY_PREFIX]) {
@@ -1683,8 +1681,8 @@ static int ipip6_fill_info(struct sk_buff *skb, const struct net_device *dev)
        struct ip_tunnel_parm *parm = &tunnel->parms;
 
        if (nla_put_u32(skb, IFLA_IPTUN_LINK, parm->link) ||
-           nla_put_be32(skb, IFLA_IPTUN_LOCAL, parm->iph.saddr) ||
-           nla_put_be32(skb, IFLA_IPTUN_REMOTE, parm->iph.daddr) ||
+           nla_put_in_addr(skb, IFLA_IPTUN_LOCAL, parm->iph.saddr) ||
+           nla_put_in_addr(skb, IFLA_IPTUN_REMOTE, parm->iph.daddr) ||
            nla_put_u8(skb, IFLA_IPTUN_TTL, parm->iph.ttl) ||
            nla_put_u8(skb, IFLA_IPTUN_TOS, parm->iph.tos) ||
            nla_put_u8(skb, IFLA_IPTUN_PMTUDISC,
@@ -1694,10 +1692,10 @@ static int ipip6_fill_info(struct sk_buff *skb, const struct net_device *dev)
                goto nla_put_failure;
 
 #ifdef CONFIG_IPV6_SIT_6RD
-       if (nla_put(skb, IFLA_IPTUN_6RD_PREFIX, sizeof(struct in6_addr),
-                   &tunnel->ip6rd.prefix) ||
-           nla_put_be32(skb, IFLA_IPTUN_6RD_RELAY_PREFIX,
-                        tunnel->ip6rd.relay_prefix) ||
+       if (nla_put_in6_addr(skb, IFLA_IPTUN_6RD_PREFIX,
+                            &tunnel->ip6rd.prefix) ||
+           nla_put_in_addr(skb, IFLA_IPTUN_6RD_RELAY_PREFIX,
+                           tunnel->ip6rd.relay_prefix) ||
            nla_put_u16(skb, IFLA_IPTUN_6RD_PREFIXLEN,
                        tunnel->ip6rd.prefixlen) ||
            nla_put_u16(skb, IFLA_IPTUN_6RD_RELAY_PREFIXLEN,
@@ -1795,7 +1793,7 @@ static void __net_exit sit_destroy_tunnels(struct net *net,
                        struct ip_tunnel *t;
 
                        t = rtnl_dereference(sitn->tunnels[prio][h]);
-                       while (t != NULL) {
+                       while (t) {
                                /* If dev is in the same netns, it has already
                                 * been added to the list by the previous loop.
                                 */
index 7337fc7947e2eba2c5e6eaccbc9cfd660d3a0ccd..21bc2eb53c57bce6dd0a1073bc77766eabf9ac5f 100644 (file)
@@ -49,11 +49,12 @@ static inline struct sock *get_cookie_sock(struct sock *sk, struct sk_buff *skb,
        struct sock *child;
 
        child = icsk->icsk_af_ops->syn_recv_sock(sk, skb, req, dst);
-       if (child)
+       if (child) {
+               atomic_set(&req->rsk_refcnt, 1);
                inet_csk_reqsk_queue_add(sk, req, child);
-       else
+       } else {
                reqsk_free(req);
-
+       }
        return child;
 }
 
@@ -189,13 +190,13 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
                goto out;
 
        ret = NULL;
-       req = inet_reqsk_alloc(&tcp6_request_sock_ops);
+       req = inet_reqsk_alloc(&tcp6_request_sock_ops, sk);
        if (!req)
                goto out;
 
        ireq = inet_rsk(req);
        treq = tcp_rsk(req);
-       treq->listener = NULL;
+       treq->tfo_listener = false;
 
        if (security_inet_conn_request(sk, skb, req))
                goto out_free;
@@ -220,7 +221,6 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
 
        ireq->ir_mark = inet_request_mark(sk, skb);
 
-       req->expires = 0UL;
        req->num_retrans = 0;
        ireq->snd_wscale        = tcp_opt.snd_wscale;
        ireq->sack_ok           = tcp_opt.sack_ok;
index c5c10fafcfe2e068fe33adc8367206a16e40c7bf..abcc79f649b34750ee5f25051d5197dd64d91856 100644 (file)
@@ -54,6 +54,20 @@ static struct ctl_table ipv6_table_template[] = {
                .mode           = 0644,
                .proc_handler   = proc_dointvec
        },
+       {
+               .procname       = "idgen_retries",
+               .data           = &init_net.ipv6.sysctl.idgen_retries,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec,
+       },
+       {
+               .procname       = "idgen_delay",
+               .data           = &init_net.ipv6.sysctl.idgen_delay,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec_jiffies,
+       },
        { }
 };
 
@@ -93,6 +107,8 @@ static int __net_init ipv6_sysctl_net_init(struct net *net)
        ipv6_table[2].data = &net->ipv6.sysctl.flowlabel_consistency;
        ipv6_table[3].data = &net->ipv6.sysctl.auto_flowlabels;
        ipv6_table[4].data = &net->ipv6.sysctl.fwmark_reflect;
+       ipv6_table[5].data = &net->ipv6.sysctl.idgen_retries;
+       ipv6_table[6].data = &net->ipv6.sysctl.idgen_delay;
 
        ipv6_route_table = ipv6_route_sysctl_init(net);
        if (!ipv6_route_table)
@@ -163,7 +179,7 @@ int ipv6_sysctl_register(void)
        int err = -ENOMEM;
 
        ip6_header = register_net_sysctl(&init_net, "net/ipv6", ipv6_rotable);
-       if (ip6_header == NULL)
+       if (!ip6_header)
                goto out;
 
        err = register_pernet_subsys(&ipv6_sysctl_net_ops);
index 1f5e62229aaa8b4d5822f82c3af3a6c8b1382ba6..f73a97f6e68ec8286972fadcf9328e29af123242 100644 (file)
@@ -104,19 +104,6 @@ static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
        }
 }
 
-static void tcp_v6_hash(struct sock *sk)
-{
-       if (sk->sk_state != TCP_CLOSE) {
-               if (inet_csk(sk)->icsk_af_ops == &ipv6_mapped) {
-                       tcp_prot.hash(sk);
-                       return;
-               }
-               local_bh_disable();
-               __inet6_hash(sk, NULL);
-               local_bh_enable();
-       }
-}
-
 static __u32 tcp_v6_init_sequence(const struct sk_buff *skb)
 {
        return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
@@ -154,7 +141,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
                if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
                        struct ip6_flowlabel *flowlabel;
                        flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
-                       if (flowlabel == NULL)
+                       if (!flowlabel)
                                return -EINVAL;
                        fl6_sock_release(flowlabel);
                }
@@ -233,11 +220,8 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
                        tp->af_specific = &tcp_sock_ipv6_specific;
 #endif
                        goto failure;
-               } else {
-                       ipv6_addr_set_v4mapped(inet->inet_saddr, &np->saddr);
-                       ipv6_addr_set_v4mapped(inet->inet_rcv_saddr,
-                                              &sk->sk_v6_rcv_saddr);
                }
+               np->saddr = sk->sk_v6_rcv_saddr;
 
                return err;
        }
@@ -263,7 +247,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
                goto failure;
        }
 
-       if (saddr == NULL) {
+       if (!saddr) {
                saddr = &fl6.saddr;
                sk->sk_v6_rcv_saddr = *saddr;
        }
@@ -340,18 +324,20 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
 {
        const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
        const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
+       struct net *net = dev_net(skb->dev);
+       struct request_sock *fastopen;
        struct ipv6_pinfo *np;
-       struct sock *sk;
-       int err;
        struct tcp_sock *tp;
-       struct request_sock *fastopen;
        __u32 seq, snd_una;
-       struct net *net = dev_net(skb->dev);
+       struct sock *sk;
+       int err;
 
-       sk = inet6_lookup(net, &tcp_hashinfo, &hdr->daddr,
-                       th->dest, &hdr->saddr, th->source, skb->dev->ifindex);
+       sk = __inet6_lookup_established(net, &tcp_hashinfo,
+                                       &hdr->daddr, th->dest,
+                                       &hdr->saddr, ntohs(th->source),
+                                       skb->dev->ifindex);
 
-       if (sk == NULL) {
+       if (!sk) {
                ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev),
                                   ICMP6_MIB_INERRORS);
                return;
@@ -361,6 +347,9 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
                inet_twsk_put(inet_twsk(sk));
                return;
        }
+       seq = ntohl(th->seq);
+       if (sk->sk_state == TCP_NEW_SYN_RECV)
+               return tcp_req_err(sk, seq);
 
        bh_lock_sock(sk);
        if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG)
@@ -375,7 +364,6 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
        }
 
        tp = tcp_sk(sk);
-       seq = ntohl(th->seq);
        /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
        fastopen = tp->fastopen_rsk;
        snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
@@ -419,37 +407,12 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
 
        /* Might be for an request_sock */
        switch (sk->sk_state) {
-               struct request_sock *req, **prev;
-       case TCP_LISTEN:
-               if (sock_owned_by_user(sk))
-                       goto out;
-
-               /* Note : We use inet6_iif() here, not tcp_v6_iif() */
-               req = inet6_csk_search_req(sk, &prev, th->dest, &hdr->daddr,
-                                          &hdr->saddr, inet6_iif(skb));
-               if (!req)
-                       goto out;
-
-               /* ICMPs are not backlogged, hence we cannot get
-                * an established socket here.
-                */
-               WARN_ON(req->sk != NULL);
-
-               if (seq != tcp_rsk(req)->snt_isn) {
-                       NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
-                       goto out;
-               }
-
-               inet_csk_reqsk_queue_drop(sk, req, prev);
-               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
-               goto out;
-
        case TCP_SYN_SENT:
        case TCP_SYN_RECV:
                /* Only in fast or simultaneous open. If a fast open socket is
                 * is already accepted it is treated as a connected one below.
                 */
-               if (fastopen && fastopen->sk == NULL)
+               if (fastopen && !fastopen->sk)
                        break;
 
                if (!sock_owned_by_user(sk)) {
@@ -497,7 +460,7 @@ static int tcp_v6_send_synack(struct sock *sk, struct dst_entry *dst,
                                    &ireq->ir_v6_rmt_addr);
 
                fl6->daddr = ireq->ir_v6_rmt_addr;
-               if (np->repflow && (ireq->pktopts != NULL))
+               if (np->repflow && ireq->pktopts)
                        fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts));
 
                skb_set_queue_mapping(skb, queue_mapping);
@@ -523,17 +486,11 @@ static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
 }
 
 static struct tcp_md5sig_key *tcp_v6_md5_lookup(struct sock *sk,
-                                               struct sock *addr_sk)
+                                               const struct sock *addr_sk)
 {
        return tcp_v6_md5_do_lookup(sk, &addr_sk->sk_v6_daddr);
 }
 
-static struct tcp_md5sig_key *tcp_v6_reqsk_md5_lookup(struct sock *sk,
-                                                     struct request_sock *req)
-{
-       return tcp_v6_md5_do_lookup(sk, &inet_rsk(req)->ir_v6_rmt_addr);
-}
-
 static int tcp_v6_parse_md5_keys(struct sock *sk, char __user *optval,
                                 int optlen)
 {
@@ -619,9 +576,9 @@ clear_hash_noput:
        return 1;
 }
 
-static int tcp_v6_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
+static int tcp_v6_md5_hash_skb(char *md5_hash,
+                              const struct tcp_md5sig_key *key,
                               const struct sock *sk,
-                              const struct request_sock *req,
                               const struct sk_buff *skb)
 {
        const struct in6_addr *saddr, *daddr;
@@ -629,12 +586,9 @@ static int tcp_v6_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
        struct hash_desc *desc;
        const struct tcphdr *th = tcp_hdr(skb);
 
-       if (sk) {
-               saddr = &inet6_sk(sk)->saddr;
+       if (sk) { /* valid for establish/request sockets */
+               saddr = &sk->sk_v6_rcv_saddr;
                daddr = &sk->sk_v6_daddr;
-       } else if (req) {
-               saddr = &inet_rsk(req)->ir_v6_loc_addr;
-               daddr = &inet_rsk(req)->ir_v6_rmt_addr;
        } else {
                const struct ipv6hdr *ip6h = ipv6_hdr(skb);
                saddr = &ip6h->saddr;
@@ -670,8 +624,7 @@ clear_hash_noput:
        return 1;
 }
 
-static int __tcp_v6_inbound_md5_hash(struct sock *sk,
-                                    const struct sk_buff *skb)
+static bool tcp_v6_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
 {
        const __u8 *hash_location = NULL;
        struct tcp_md5sig_key *hash_expected;
@@ -685,44 +638,32 @@ static int __tcp_v6_inbound_md5_hash(struct sock *sk,
 
        /* We've parsed the options - do we have a hash? */
        if (!hash_expected && !hash_location)
-               return 0;
+               return false;
 
        if (hash_expected && !hash_location) {
                NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
-               return 1;
+               return true;
        }
 
        if (!hash_expected && hash_location) {
                NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
-               return 1;
+               return true;
        }
 
        /* check the signature */
        genhash = tcp_v6_md5_hash_skb(newhash,
                                      hash_expected,
-                                     NULL, NULL, skb);
+                                     NULL, skb);
 
        if (genhash || memcmp(hash_location, newhash, 16) != 0) {
                net_info_ratelimited("MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n",
                                     genhash ? "failed" : "mismatch",
                                     &ip6h->saddr, ntohs(th->source),
                                     &ip6h->daddr, ntohs(th->dest));
-               return 1;
+               return true;
        }
-       return 0;
-}
-
-static int tcp_v6_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
-{
-       int ret;
-
-       rcu_read_lock();
-       ret = __tcp_v6_inbound_md5_hash(sk, skb);
-       rcu_read_unlock();
-
-       return ret;
+       return false;
 }
-
 #endif
 
 static void tcp_v6_init_req(struct request_sock *req, struct sock *sk,
@@ -734,8 +675,6 @@ static void tcp_v6_init_req(struct request_sock *req, struct sock *sk,
        ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
        ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
 
-       ireq->ir_iif = sk->sk_bound_dev_if;
-
        /* So that link locals have meaning */
        if (!sk->sk_bound_dev_if &&
            ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL)
@@ -774,7 +713,7 @@ static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
        .mss_clamp      =       IPV6_MIN_MTU - sizeof(struct tcphdr) -
                                sizeof(struct ipv6hdr),
 #ifdef CONFIG_TCP_MD5SIG
-       .md5_lookup     =       tcp_v6_reqsk_md5_lookup,
+       .req_md5_lookup =       tcp_v6_md5_lookup,
        .calc_md5_hash  =       tcp_v6_md5_hash_skb,
 #endif
        .init_req       =       tcp_v6_init_req,
@@ -811,7 +750,7 @@ static void tcp_v6_send_response(struct sock *sk, struct sk_buff *skb, u32 seq,
 
        buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
                         GFP_ATOMIC);
-       if (buff == NULL)
+       if (!buff)
                return;
 
        skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
@@ -931,7 +870,7 @@ static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
                if (!key)
                        goto release_sk1;
 
-               genhash = tcp_v6_md5_hash_skb(newhash, key, NULL, NULL, skb);
+               genhash = tcp_v6_md5_hash_skb(newhash, key, NULL, skb);
                if (genhash || memcmp(hash_location, newhash, 16) != 0)
                        goto release_sk1;
        } else {
@@ -997,17 +936,19 @@ static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
 
 static struct sock *tcp_v6_hnd_req(struct sock *sk, struct sk_buff *skb)
 {
-       struct request_sock *req, **prev;
        const struct tcphdr *th = tcp_hdr(skb);
+       struct request_sock *req;
        struct sock *nsk;
 
        /* Find possible connection requests. */
-       req = inet6_csk_search_req(sk, &prev, th->source,
+       req = inet6_csk_search_req(sk, th->source,
                                   &ipv6_hdr(skb)->saddr,
                                   &ipv6_hdr(skb)->daddr, tcp_v6_iif(skb));
-       if (req)
-               return tcp_check_req(sk, skb, req, prev, false);
-
+       if (req) {
+               nsk = tcp_check_req(sk, skb, req, false);
+               reqsk_put(req);
+               return nsk;
+       }
        nsk = __inet6_lookup_established(sock_net(sk), &tcp_hashinfo,
                                         &ipv6_hdr(skb)->saddr, th->source,
                                         &ipv6_hdr(skb)->daddr, ntohs(th->dest),
@@ -1067,7 +1008,7 @@ static struct sock *tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
 
                newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst);
 
-               if (newsk == NULL)
+               if (!newsk)
                        return NULL;
 
                newtcp6sk = (struct tcp6_sock *)newsk;
@@ -1079,11 +1020,7 @@ static struct sock *tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
 
                memcpy(newnp, np, sizeof(struct ipv6_pinfo));
 
-               ipv6_addr_set_v4mapped(newinet->inet_daddr, &newsk->sk_v6_daddr);
-
-               ipv6_addr_set_v4mapped(newinet->inet_saddr, &newnp->saddr);
-
-               newsk->sk_v6_rcv_saddr = newnp->saddr;
+               newnp->saddr = newsk->sk_v6_rcv_saddr;
 
                inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
                newsk->sk_backlog_rcv = tcp_v4_do_rcv;
@@ -1128,7 +1065,7 @@ static struct sock *tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
        }
 
        newsk = tcp_create_openreq_child(sk, req, skb);
-       if (newsk == NULL)
+       if (!newsk)
                goto out_nonewsk;
 
        /*
@@ -1170,7 +1107,7 @@ static struct sock *tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
 
        /* Clone pktoptions received with SYN */
        newnp->pktoptions = NULL;
-       if (ireq->pktopts != NULL) {
+       if (ireq->pktopts) {
                newnp->pktoptions = skb_clone(ireq->pktopts,
                                              sk_gfp_atomic(sk, GFP_ATOMIC));
                consume_skb(ireq->pktopts);
@@ -1215,7 +1152,7 @@ static struct sock *tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
 #ifdef CONFIG_TCP_MD5SIG
        /* Copy over the MD5 key from the original socket */
        key = tcp_v6_md5_do_lookup(sk, &newsk->sk_v6_daddr);
-       if (key != NULL) {
+       if (key) {
                /* We're using one, so create a matching key
                 * on the newsk structure. If we fail to get
                 * memory, then we end up not copying the key
@@ -1232,7 +1169,7 @@ static struct sock *tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
                tcp_done(newsk);
                goto out;
        }
-       __inet6_hash(newsk, NULL);
+       __inet_hash(newsk, NULL);
 
        return newsk;
 
@@ -1547,7 +1484,7 @@ do_time_wait:
                                            &ipv6_hdr(skb)->saddr, th->source,
                                            &ipv6_hdr(skb)->daddr,
                                            ntohs(th->dest), tcp_v6_iif(skb));
-               if (sk2 != NULL) {
+               if (sk2) {
                        struct inet_timewait_sock *tw = inet_twsk(sk);
                        inet_twsk_deschedule(tw, &tcp_death_row);
                        inet_twsk_put(tw);
@@ -1595,7 +1532,7 @@ static void tcp_v6_early_demux(struct sk_buff *skb)
        if (sk) {
                skb->sk = sk;
                skb->destructor = sock_edemux;
-               if (sk->sk_state != TCP_TIME_WAIT) {
+               if (sk_fullsock(sk)) {
                        struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
 
                        if (dst)
@@ -1700,9 +1637,9 @@ static void tcp_v6_destroy_sock(struct sock *sk)
 #ifdef CONFIG_PROC_FS
 /* Proc filesystem TCPv6 sock list dumping. */
 static void get_openreq6(struct seq_file *seq,
-                        const struct sock *sk, struct request_sock *req, int i, kuid_t uid)
+                        struct request_sock *req, int i, kuid_t uid)
 {
-       int ttd = req->expires - jiffies;
+       long ttd = req->rsk_timer.expires - jiffies;
        const struct in6_addr *src = &inet_rsk(req)->ir_v6_loc_addr;
        const struct in6_addr *dest = &inet_rsk(req)->ir_v6_rmt_addr;
 
@@ -1838,7 +1775,7 @@ static int tcp6_seq_show(struct seq_file *seq, void *v)
                        get_tcp6_sock(seq, v, st->num);
                break;
        case TCP_SEQ_STATE_OPENREQ:
-               get_openreq6(seq, st->syn_wait_sk, v, st->num, st->uid);
+               get_openreq6(seq, v, st->num, st->uid);
                break;
        }
 out:
@@ -1902,7 +1839,7 @@ struct proto tcpv6_prot = {
        .sendpage               = tcp_sendpage,
        .backlog_rcv            = tcp_v6_do_rcv,
        .release_cb             = tcp_release_cb,
-       .hash                   = tcp_v6_hash,
+       .hash                   = inet_hash,
        .unhash                 = inet_unhash,
        .get_port               = inet_csk_get_port,
        .enter_memory_pressure  = tcp_enter_memory_pressure,
index c1ab77105b4c3f8025beea58d06c3607eda44656..d883c9204c01d525fcd51012c34afbf5f8af11ed 100644 (file)
@@ -41,8 +41,8 @@ static int tcp6_gro_complete(struct sk_buff *skb, int thoff)
        return tcp_gro_complete(skb);
 }
 
-struct sk_buff *tcp6_gso_segment(struct sk_buff *skb,
-                                netdev_features_t features)
+static struct sk_buff *tcp6_gso_segment(struct sk_buff *skb,
+                                       netdev_features_t features)
 {
        struct tcphdr *th;
 
index d048d46779fc55407e9c45d123d6ba7dd6858782..120aff9aa010f3e3b4d5b04b290324f16eecc5d9 100644 (file)
 #include <trace/events/skb.h>
 #include "udp_impl.h"
 
-static unsigned int udp6_ehashfn(struct net *net,
-                                 const struct in6_addr *laddr,
-                                 const u16 lport,
-                                 const struct in6_addr *faddr,
-                                 const __be16 fport)
+static u32 udp6_ehashfn(const struct net *net,
+                       const struct in6_addr *laddr,
+                       const u16 lport,
+                       const struct in6_addr *faddr,
+                       const __be16 fport)
 {
        static u32 udp6_ehash_secret __read_mostly;
        static u32 udp_ipv6_hash_secret __read_mostly;
@@ -104,9 +104,9 @@ int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
        return 0;
 }
 
-static unsigned int udp6_portaddr_hash(struct net *net,
-                                      const struct in6_addr *addr6,
-                                      unsigned int port)
+static u32 udp6_portaddr_hash(const struct net *net,
+                             const struct in6_addr *addr6,
+                             unsigned int port)
 {
        unsigned int hash, mix = net_hash_mix(net);
 
@@ -391,8 +391,7 @@ EXPORT_SYMBOL_GPL(udp6_lib_lookup);
  *     return it, otherwise we block.
  */
 
-int udpv6_recvmsg(struct kiocb *iocb, struct sock *sk,
-                 struct msghdr *msg, size_t len,
+int udpv6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
                  int noblock, int flags, int *addr_len)
 {
        struct ipv6_pinfo *np = inet6_sk(sk);
@@ -551,7 +550,7 @@ void __udp6_lib_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
 
        sk = __udp6_lib_lookup(net, daddr, uh->dest,
                               saddr, uh->source, inet6_iif(skb), udptable);
-       if (sk == NULL) {
+       if (!sk) {
                ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev),
                                   ICMP6_MIB_INERRORS);
                return;
@@ -649,7 +648,7 @@ int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
 
                /* if we're overly short, let UDP handle it */
                encap_rcv = ACCESS_ONCE(up->encap_rcv);
-               if (skb->len > sizeof(struct udphdr) && encap_rcv != NULL) {
+               if (skb->len > sizeof(struct udphdr) && encap_rcv) {
                        int ret;
 
                        /* Verify checksum before giving to encap */
@@ -750,7 +749,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
 
        for (i = 0; i < count; i++) {
                sk = stack[i];
-               if (likely(skb1 == NULL))
+               if (likely(!skb1))
                        skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
                if (!skb1) {
                        atomic_inc(&sk->sk_drops);
@@ -900,7 +899,7 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
         * for sock caches... i'll skip this for now.
         */
        sk = __udp6_lib_lookup_skb(skb, uh->source, uh->dest, udptable);
-       if (sk != NULL) {
+       if (sk) {
                int ret;
 
                if (!uh->check && !udp_sk(sk)->no_check6_rx) {
@@ -1101,8 +1100,7 @@ out:
        return err;
 }
 
-int udpv6_sendmsg(struct kiocb *iocb, struct sock *sk,
-                 struct msghdr *msg, size_t len)
+int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
 {
        struct ipv6_txoptions opt_space;
        struct udp_sock *up = udp_sk(sk);
@@ -1164,12 +1162,12 @@ int udpv6_sendmsg(struct kiocb *iocb, struct sock *sk,
 do_udp_sendmsg:
                        if (__ipv6_only_sock(sk))
                                return -ENETUNREACH;
-                       return udp_sendmsg(iocb, sk, msg, len);
+                       return udp_sendmsg(sk, msg, len);
                }
        }
 
        if (up->pending == AF_INET)
-               return udp_sendmsg(iocb, sk, msg, len);
+               return udp_sendmsg(sk, msg, len);
 
        /* Rough check on arithmetic overflow,
           better check is made in ip6_append_data().
@@ -1209,7 +1207,7 @@ do_udp_sendmsg:
                        fl6.flowlabel = sin6->sin6_flowinfo&IPV6_FLOWINFO_MASK;
                        if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
                                flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
-                               if (flowlabel == NULL)
+                               if (!flowlabel)
                                        return -EINVAL;
                        }
                }
@@ -1257,14 +1255,14 @@ do_udp_sendmsg:
                }
                if ((fl6.flowlabel&IPV6_FLOWLABEL_MASK) && !flowlabel) {
                        flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
-                       if (flowlabel == NULL)
+                       if (!flowlabel)
                                return -EINVAL;
                }
                if (!(opt->opt_nflen|opt->opt_flen))
                        opt = NULL;
                connected = 0;
        }
-       if (opt == NULL)
+       if (!opt)
                opt = np->opt;
        if (flowlabel)
                opt = fl6_merge_options(&opt_space, flowlabel, opt);
index c779c3c90b9d3b90c3b4821508d3fdf31b6d5da6..0682c031ccdc77da801c46851d7ad2163921fe19 100644 (file)
@@ -23,10 +23,9 @@ int compat_udpv6_setsockopt(struct sock *sk, int level, int optname,
 int compat_udpv6_getsockopt(struct sock *sk, int level, int optname,
                            char __user *optval, int __user *optlen);
 #endif
-int udpv6_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
-                 size_t len);
-int udpv6_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
-                 size_t len, int noblock, int flags, int *addr_len);
+int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len);
+int udpv6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int noblock,
+                 int flags, int *addr_len);
 int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
 void udpv6_destroy_sock(struct sock *sk);
 
index be2c0ba82c8525ca466468ae45b05df5b35f85b6..7441e1e6389381a9ae65df5c433433d5cf63a3f2 100644 (file)
@@ -54,7 +54,7 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb,
 
                /* Set the IPv6 fragment id if not set yet */
                if (!skb_shinfo(skb)->ip6_frag_id)
-                       ipv6_proxy_select_ident(skb);
+                       ipv6_proxy_select_ident(dev_net(skb->dev), skb);
 
                segs = NULL;
                goto out;
@@ -113,7 +113,7 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb,
                fptr->nexthdr = nexthdr;
                fptr->reserved = 0;
                if (!skb_shinfo(skb)->ip6_frag_id)
-                       ipv6_proxy_select_ident(skb);
+                       ipv6_proxy_select_ident(dev_net(skb->dev), skb);
                fptr->identification = skb_shinfo(skb)->ip6_frag_id;
 
                /* Fragment the skb. ipv6 header and the remaining fields of the
index 9949a356d62c8f5e1808e3de0957b28a814b5084..1e205c3253acbcdde34ccf2c2dbce3fb63b10967 100644 (file)
@@ -95,8 +95,8 @@ static int xfrm6_beet_input(struct xfrm_state *x, struct sk_buff *skb)
 
        ip6h = ipv6_hdr(skb);
        ip6h->payload_len = htons(skb->len - size);
-       ip6h->daddr = *(struct in6_addr *)&x->sel.daddr.a6;
-       ip6h->saddr = *(struct in6_addr *)&x->sel.saddr.a6;
+       ip6h->daddr = x->sel.daddr.in6;
+       ip6h->saddr = x->sel.saddr.in6;
        err = 0;
 out:
        return err;
index 8d2d01b4800a197eaaa64fb184f56c58920ad462..f337a908a76a1145c8b878c224f961d087769785 100644 (file)
@@ -61,9 +61,7 @@ static int xfrm6_get_saddr(struct net *net,
                return -EHOSTUNREACH;
 
        dev = ip6_dst_idev(dst)->dev;
-       ipv6_dev_get_saddr(dev_net(dev), dev,
-                          (struct in6_addr *)&daddr->a6, 0,
-                          (struct in6_addr *)&saddr->a6);
+       ipv6_dev_get_saddr(dev_net(dev), dev, &daddr->in6, 0, &saddr->in6);
        dst_release(dst);
        return 0;
 }
@@ -293,7 +291,6 @@ static void xfrm6_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
 
 static struct dst_ops xfrm6_dst_ops = {
        .family =               AF_INET6,
-       .protocol =             cpu_to_be16(ETH_P_IPV6),
        .gc =                   xfrm6_garbage_collect,
        .update_pmtu =          xfrm6_update_pmtu,
        .redirect =             xfrm6_redirect,
@@ -371,7 +368,7 @@ static void __net_exit xfrm6_net_exit(struct net *net)
 {
        struct ctl_table *table;
 
-       if (net->ipv6.sysctl.xfrm6_hdr == NULL)
+       if (!net->ipv6.sysctl.xfrm6_hdr)
                return;
 
        table = net->ipv6.sysctl.xfrm6_hdr->ctl_table_arg;
index f11ad1d95e0e6e88294003fee338743863daa443..4ea5d7497b5f29ac41a00783c48d1898aa769937 100644 (file)
@@ -1688,8 +1688,7 @@ out:
        return rc;
 }
 
-static int ipx_sendmsg(struct kiocb *iocb, struct socket *sock,
-       struct msghdr *msg, size_t len)
+static int ipx_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
 {
        struct sock *sk = sock->sk;
        struct ipx_sock *ipxs = ipx_sk(sk);
@@ -1754,8 +1753,8 @@ out:
 }
 
 
-static int ipx_recvmsg(struct kiocb *iocb, struct socket *sock,
-               struct msghdr *msg, size_t size, int flags)
+static int ipx_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
+                      int flags)
 {
        struct sock *sk = sock->sk;
        struct ipx_sock *ipxs = ipx_sk(sk);
index 568edc72d7371f2b01dffa9af9cc114fad5d7967..ee0ea25c8e7aa9ddd182c0bb284c75a5c0a198bf 100644 (file)
@@ -1256,14 +1256,13 @@ static int irda_release(struct socket *sock)
 }
 
 /*
- * Function irda_sendmsg (iocb, sock, msg, len)
+ * Function irda_sendmsg (sock, msg, len)
  *
  *    Send message down to TinyTP. This function is used for both STREAM and
  *    SEQPACK services. This is possible since it forces the client to
  *    fragment the message if necessary
  */
-static int irda_sendmsg(struct kiocb *iocb, struct socket *sock,
-                       struct msghdr *msg, size_t len)
+static int irda_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
 {
        struct sock *sk = sock->sk;
        struct irda_sock *self;
@@ -1348,13 +1347,13 @@ out:
 }
 
 /*
- * Function irda_recvmsg_dgram (iocb, sock, msg, size, flags)
+ * Function irda_recvmsg_dgram (sock, msg, size, flags)
  *
  *    Try to receive message and copy it to user. The frame is discarded
  *    after being read, regardless of how much the user actually read
  */
-static int irda_recvmsg_dgram(struct kiocb *iocb, struct socket *sock,
-                             struct msghdr *msg, size_t size, int flags)
+static int irda_recvmsg_dgram(struct socket *sock, struct msghdr *msg,
+                             size_t size, int flags)
 {
        struct sock *sk = sock->sk;
        struct irda_sock *self = irda_sk(sk);
@@ -1398,10 +1397,10 @@ static int irda_recvmsg_dgram(struct kiocb *iocb, struct socket *sock,
 }
 
 /*
- * Function irda_recvmsg_stream (iocb, sock, msg, size, flags)
+ * Function irda_recvmsg_stream (sock, msg, size, flags)
  */
-static int irda_recvmsg_stream(struct kiocb *iocb, struct socket *sock,
-                              struct msghdr *msg, size_t size, int flags)
+static int irda_recvmsg_stream(struct socket *sock, struct msghdr *msg,
+                              size_t size, int flags)
 {
        struct sock *sk = sock->sk;
        struct irda_sock *self = irda_sk(sk);
@@ -1515,14 +1514,14 @@ static int irda_recvmsg_stream(struct kiocb *iocb, struct socket *sock,
 }
 
 /*
- * Function irda_sendmsg_dgram (iocb, sock, msg, len)
+ * Function irda_sendmsg_dgram (sock, msg, len)
  *
  *    Send message down to TinyTP for the unreliable sequenced
  *    packet service...
  *
  */
-static int irda_sendmsg_dgram(struct kiocb *iocb, struct socket *sock,
-                             struct msghdr *msg, size_t len)
+static int irda_sendmsg_dgram(struct socket *sock, struct msghdr *msg,
+                             size_t len)
 {
        struct sock *sk = sock->sk;
        struct irda_sock *self;
@@ -1594,14 +1593,14 @@ out:
 }
 
 /*
- * Function irda_sendmsg_ultra (iocb, sock, msg, len)
+ * Function irda_sendmsg_ultra (sock, msg, len)
  *
  *    Send message down to IrLMP for the unreliable Ultra
  *    packet service...
  */
 #ifdef CONFIG_IRDA_ULTRA
-static int irda_sendmsg_ultra(struct kiocb *iocb, struct socket *sock,
-                             struct msghdr *msg, size_t len)
+static int irda_sendmsg_ultra(struct socket *sock, struct msghdr *msg,
+                             size_t len)
 {
        struct sock *sk = sock->sk;
        struct irda_sock *self;
index 53d931172088b15b2890c42dd649308771b6e7d3..6daa52a18d40ca2a40f702acefccd01e6b145f36 100644 (file)
@@ -1026,8 +1026,8 @@ static int iucv_send_iprm(struct iucv_path *path, struct iucv_message *msg,
                                 (void *) prmdata, 8);
 }
 
-static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
-                            struct msghdr *msg, size_t len)
+static int iucv_sock_sendmsg(struct socket *sock, struct msghdr *msg,
+                            size_t len)
 {
        struct sock *sk = sock->sk;
        struct iucv_sock *iucv = iucv_sk(sk);
@@ -1315,8 +1315,8 @@ static void iucv_process_message_q(struct sock *sk)
        }
 }
 
-static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
-                            struct msghdr *msg, size_t len, int flags)
+static int iucv_sock_recvmsg(struct socket *sock, struct msghdr *msg,
+                            size_t len, int flags)
 {
        int noblock = flags & MSG_DONTWAIT;
        struct sock *sk = sock->sk;
index f8ac939d52b4b83ce3720e274f33461347029661..f0d52d721b3a4405b47f38d8f5d6c1990cbf8b5f 100644 (file)
@@ -709,7 +709,7 @@ static unsigned int pfkey_sockaddr_fill(const xfrm_address_t *xaddr, __be16 port
                sin6->sin6_family = AF_INET6;
                sin6->sin6_port = port;
                sin6->sin6_flowinfo = 0;
-               sin6->sin6_addr = *(struct in6_addr *)xaddr->a6;
+               sin6->sin6_addr = xaddr->in6;
                sin6->sin6_scope_id = 0;
                return 128;
            }
@@ -3588,8 +3588,7 @@ static int pfkey_send_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
 }
 #endif
 
-static int pfkey_sendmsg(struct kiocb *kiocb,
-                        struct socket *sock, struct msghdr *msg, size_t len)
+static int pfkey_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
 {
        struct sock *sk = sock->sk;
        struct sk_buff *skb = NULL;
@@ -3630,8 +3629,7 @@ out:
        return err ? : len;
 }
 
-static int pfkey_recvmsg(struct kiocb *kiocb,
-                        struct socket *sock, struct msghdr *msg, size_t len,
+static int pfkey_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
                         int flags)
 {
        struct sock *sk = sock->sk;
index 895348e44c7d22c9e6d4828195e7099a74154531..a29a504492af6f2c38607f2c15e123a297d565cd 100644 (file)
@@ -1871,6 +1871,7 @@ static int __init l2tp_init(void)
        l2tp_wq = alloc_workqueue("l2tp", WQ_UNBOUND, 0);
        if (!l2tp_wq) {
                pr_err("alloc_workqueue failed\n");
+               unregister_pernet_device(&l2tp_net_ops);
                rc = -ENOMEM;
                goto out;
        }
index 781b3a226ba73204aa9ff68923ae2385a88e7205..4b552873b55603a648f37bbd497efbb43bd869a4 100644 (file)
@@ -74,7 +74,7 @@ static int l2tp_eth_dev_init(struct net_device *dev)
 
        priv->dev = dev;
        eth_hw_addr_random(dev);
-       memset(&dev->broadcast[0], 0xff, 6);
+       eth_broadcast_addr(dev->broadcast);
        dev->qdisc_tx_busylock = &l2tp_eth_tx_busylock;
        return 0;
 }
index 05dfc8aa36afc83b61e2cefeb76738ded8a4db2d..79649937ec71da6ffc70584b51ccbba6b73e391f 100644 (file)
@@ -385,7 +385,7 @@ drop:
 /* Userspace will call sendmsg() on the tunnel socket to send L2TP
  * control frames.
  */
-static int l2tp_ip_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, size_t len)
+static int l2tp_ip_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
 {
        struct sk_buff *skb;
        int rc;
@@ -506,7 +506,7 @@ no_route:
        goto out;
 }
 
-static int l2tp_ip_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
+static int l2tp_ip_recvmsg(struct sock *sk, struct msghdr *msg,
                           size_t len, int noblock, int flags, int *addr_len)
 {
        struct inet_sock *inet = inet_sk(sk);
index 8611f1b6314161d4df90c4969b3b9cb9066a446d..d1ded3777815e5b997db37cf4975422f4708dec1 100644 (file)
@@ -480,8 +480,7 @@ out:
 /* Userspace will call sendmsg() on the tunnel socket to send L2TP
  * control frames.
  */
-static int l2tp_ip6_sendmsg(struct kiocb *iocb, struct sock *sk,
-                           struct msghdr *msg, size_t len)
+static int l2tp_ip6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
 {
        struct ipv6_txoptions opt_space;
        DECLARE_SOCKADDR(struct sockaddr_l2tpip6 *, lsa, msg->msg_name);
@@ -643,9 +642,8 @@ do_confirm:
        goto done;
 }
 
-static int l2tp_ip6_recvmsg(struct kiocb *iocb, struct sock *sk,
-                           struct msghdr *msg, size_t len, int noblock,
-                           int flags, int *addr_len)
+static int l2tp_ip6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
+                           int noblock, int flags, int *addr_len)
 {
        struct ipv6_pinfo *np = inet6_sk(sk);
        DECLARE_SOCKADDR(struct sockaddr_l2tpip6 *, lsa, msg->msg_name);
index b4e923f7795460736f48c73e9942efad46b8a285..9e13c2ff878970fbbe355990f756684d9e868988 100644 (file)
@@ -205,9 +205,9 @@ static int l2tp_nl_cmd_tunnel_create(struct sk_buff *skb, struct genl_info *info
 #endif
                if (info->attrs[L2TP_ATTR_IP_SADDR] &&
                    info->attrs[L2TP_ATTR_IP_DADDR]) {
-                       cfg.local_ip.s_addr = nla_get_be32(
+                       cfg.local_ip.s_addr = nla_get_in_addr(
                                info->attrs[L2TP_ATTR_IP_SADDR]);
-                       cfg.peer_ip.s_addr = nla_get_be32(
+                       cfg.peer_ip.s_addr = nla_get_in_addr(
                                info->attrs[L2TP_ATTR_IP_DADDR]);
                } else {
                        ret = -EINVAL;
@@ -376,15 +376,17 @@ static int l2tp_nl_tunnel_send(struct sk_buff *skb, u32 portid, u32 seq, int fla
        case L2TP_ENCAPTYPE_IP:
 #if IS_ENABLED(CONFIG_IPV6)
                if (np) {
-                       if (nla_put(skb, L2TP_ATTR_IP6_SADDR, sizeof(np->saddr),
-                                   &np->saddr) ||
-                           nla_put(skb, L2TP_ATTR_IP6_DADDR, sizeof(sk->sk_v6_daddr),
-                                   &sk->sk_v6_daddr))
+                       if (nla_put_in6_addr(skb, L2TP_ATTR_IP6_SADDR,
+                                            &np->saddr) ||
+                           nla_put_in6_addr(skb, L2TP_ATTR_IP6_DADDR,
+                                            &sk->sk_v6_daddr))
                                goto nla_put_failure;
                } else
 #endif
-               if (nla_put_be32(skb, L2TP_ATTR_IP_SADDR, inet->inet_saddr) ||
-                   nla_put_be32(skb, L2TP_ATTR_IP_DADDR, inet->inet_daddr))
+               if (nla_put_in_addr(skb, L2TP_ATTR_IP_SADDR,
+                                   inet->inet_saddr) ||
+                   nla_put_in_addr(skb, L2TP_ATTR_IP_DADDR,
+                                   inet->inet_daddr))
                        goto nla_put_failure;
                break;
        }
index cc7a828fc914d7e05a9495b70df4b1411e0de60e..e9b0dec56b8e80e13ea780d11bb56e921814c317 100644 (file)
@@ -185,9 +185,8 @@ static int pppol2tp_recv_payload_hook(struct sk_buff *skb)
 
 /* Receive message. This is the recvmsg for the PPPoL2TP socket.
  */
-static int pppol2tp_recvmsg(struct kiocb *iocb, struct socket *sock,
-                           struct msghdr *msg, size_t len,
-                           int flags)
+static int pppol2tp_recvmsg(struct socket *sock, struct msghdr *msg,
+                           size_t len, int flags)
 {
        int err;
        struct sk_buff *skb;
@@ -295,7 +294,7 @@ static void pppol2tp_session_sock_put(struct l2tp_session *session)
  * when a user application does a sendmsg() on the session socket. L2TP and
  * PPP headers must be inserted into the user's data.
  */
-static int pppol2tp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
+static int pppol2tp_sendmsg(struct socket *sock, struct msghdr *m,
                            size_t total_len)
 {
        static const unsigned char ppph[2] = { 0xff, 0x03 };
index 2c0b83ce43bda478f6c56ebdc0951a54658c9098..17a8dff0609066e338b528328c4ed01259314d43 100644 (file)
@@ -704,8 +704,8 @@ out:
  *     Copy received data to the socket user.
  *     Returns non-negative upon success, negative otherwise.
  */
-static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock,
-                         struct msghdr *msg, size_t len, int flags)
+static int llc_ui_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
+                         int flags)
 {
        DECLARE_SOCKADDR(struct sockaddr_llc *, uaddr, msg->msg_name);
        const int nonblock = flags & MSG_DONTWAIT;
@@ -878,8 +878,7 @@ copy_uaddr:
  *     Transmit data provided by the socket user.
  *     Returns non-negative upon success, negative otherwise.
  */
-static int llc_ui_sendmsg(struct kiocb *iocb, struct socket *sock,
-                         struct msghdr *msg, size_t len)
+static int llc_ui_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
 {
        struct sock *sk = sock->sk;
        struct llc_sock *llc = llc_sk(sk);
index 7869bb40acaa1acbe60763493a738bf32812bb34..208df7c0b6eaf432343e83aef3914f26a66f251e 100644 (file)
@@ -85,11 +85,15 @@ struct crypto_aead *ieee80211_aes_key_setup_encrypt(const u8 key[],
                return tfm;
 
        err = crypto_aead_setkey(tfm, key, key_len);
-       if (!err)
-               err = crypto_aead_setauthsize(tfm, mic_len);
-       if (!err)
-               return tfm;
+       if (err)
+               goto free_aead;
+       err = crypto_aead_setauthsize(tfm, mic_len);
+       if (err)
+               goto free_aead;
+
+       return tfm;
 
+free_aead:
        crypto_free_aead(tfm);
        return ERR_PTR(err);
 }
index c2bf6698d7384195bca6e6d3952ab98e44e0be3b..fd278bbe1b0db49ef825a025f11488eec7014daa 100644 (file)
@@ -80,11 +80,15 @@ struct crypto_aead *ieee80211_aes_gcm_key_setup_encrypt(const u8 key[],
                return tfm;
 
        err = crypto_aead_setkey(tfm, key, key_len);
-       if (!err)
-               err = crypto_aead_setauthsize(tfm, IEEE80211_GCMP_MIC_LEN);
-       if (!err)
-               return tfm;
+       if (err)
+               goto free_aead;
+       err = crypto_aead_setauthsize(tfm, IEEE80211_GCMP_MIC_LEN);
+       if (err)
+               goto free_aead;
+
+       return tfm;
 
+free_aead:
        crypto_free_aead(tfm);
        return ERR_PTR(err);
 }
index 1c72edcb008312bcdf9d63ef1a9122d07f5ab86d..f1321b7d650675b725b3e96bdbf7611d3d0b1b42 100644 (file)
@@ -69,10 +69,10 @@ struct crypto_aead *ieee80211_aes_gmac_key_setup(const u8 key[],
                return tfm;
 
        err = crypto_aead_setkey(tfm, key, key_len);
-       if (!err)
-               return tfm;
        if (!err)
                err = crypto_aead_setauthsize(tfm, GMAC_MIC_LEN);
+       if (!err)
+               return tfm;
 
        crypto_free_aead(tfm);
        return ERR_PTR(err);
index 7702978a4c999dfd10d9b49792b8b25899e23966..5c564a68fb5088e21ccec11ffdde963bf8bb6cdf 100644 (file)
@@ -238,6 +238,14 @@ void __ieee80211_start_rx_ba_session(struct sta_info *sta,
        int i, ret = -EOPNOTSUPP;
        u16 status = WLAN_STATUS_REQUEST_DECLINED;
 
+       if (!sta->sta.ht_cap.ht_supported) {
+               ht_dbg(sta->sdata,
+                      "STA %pM erroneously requests BA session on tid %d w/o QoS\n",
+                      sta->sta.addr, tid);
+               /* send a response anyway, it's an error case if we get here */
+               goto end_no_lock;
+       }
+
        if (test_sta_flag(sta, WLAN_STA_BLOCK_BA)) {
                ht_dbg(sta->sdata,
                       "Suspend in progress - Denying ADDBA request (%pM tid %d)\n",
index a360c15cc978b0a6c396e0a18a07cdfd5e3154e5..20522492d8cc80028b81dc772d32468951402ac6 100644 (file)
@@ -509,11 +509,14 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid,
        struct tid_ampdu_tx *tid_tx;
        int ret = 0;
 
+       trace_api_start_tx_ba_session(pubsta, tid);
+
        if (WARN(sta->reserved_tid == tid,
                 "Requested to start BA session on reserved tid=%d", tid))
                return -EINVAL;
 
-       trace_api_start_tx_ba_session(pubsta, tid);
+       if (!pubsta->ht_cap.ht_supported)
+               return -EINVAL;
 
        if (WARN_ON_ONCE(!local->ops->ampdu_action))
                return -EINVAL;
@@ -793,6 +796,7 @@ void ieee80211_stop_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u8 tid)
        struct ieee80211_local *local = sdata->local;
        struct sta_info *sta;
        struct tid_ampdu_tx *tid_tx;
+       bool send_delba = false;
 
        trace_api_stop_tx_ba_cb(sdata, ra, tid);
 
@@ -824,13 +828,17 @@ void ieee80211_stop_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u8 tid)
        }
 
        if (tid_tx->stop_initiator == WLAN_BACK_INITIATOR && tid_tx->tx_stop)
-               ieee80211_send_delba(sta->sdata, ra, tid,
-                       WLAN_BACK_INITIATOR, WLAN_REASON_QSTA_NOT_USE);
+               send_delba = true;
 
        ieee80211_remove_tid_tx(sta, tid);
 
  unlock_sta:
        spin_unlock_bh(&sta->lock);
+
+       if (send_delba)
+               ieee80211_send_delba(sdata, ra, tid,
+                       WLAN_BACK_INITIATOR, WLAN_REASON_QSTA_NOT_USE);
+
        mutex_unlock(&sta->ampdu_mlme.mtx);
  unlock:
        mutex_unlock(&local->sta_mtx);
index dd4ff36c557a44158ef64cd18aa090600fec1faf..265e42721a661cf54a46246065168d6a17885147 100644 (file)
@@ -24,6 +24,7 @@
 
 static struct wireless_dev *ieee80211_add_iface(struct wiphy *wiphy,
                                                const char *name,
+                                               unsigned char name_assign_type,
                                                enum nl80211_iftype type,
                                                u32 *flags,
                                                struct vif_params *params)
@@ -33,7 +34,7 @@ static struct wireless_dev *ieee80211_add_iface(struct wiphy *wiphy,
        struct ieee80211_sub_if_data *sdata;
        int err;
 
-       err = ieee80211_if_add(local, name, &wdev, type, params);
+       err = ieee80211_if_add(local, name, name_assign_type, &wdev, type, params);
        if (err)
                return ERR_PTR(err);
 
@@ -977,6 +978,14 @@ static int sta_apply_auth_flags(struct ieee80211_local *local,
        if (mask & BIT(NL80211_STA_FLAG_ASSOCIATED) &&
            set & BIT(NL80211_STA_FLAG_ASSOCIATED) &&
            !test_sta_flag(sta, WLAN_STA_ASSOC)) {
+               /*
+                * When peer becomes associated, init rate control as
+                * well. Some drivers require rate control initialized
+                * before drv_sta_state() is called.
+                */
+               if (test_sta_flag(sta, WLAN_STA_TDLS_PEER))
+                       rate_control_rate_init(sta);
+
                ret = sta_info_move_state(sta, IEEE80211_STA_ASSOC);
                if (ret)
                        return ret;
@@ -1050,6 +1059,10 @@ static int sta_apply_parameters(struct ieee80211_local *local,
                }
        }
 
+       if (mask & BIT(NL80211_STA_FLAG_WME) &&
+           local->hw.queues >= IEEE80211_NUM_ACS)
+               sta->sta.wme = set & BIT(NL80211_STA_FLAG_WME);
+
        /* auth flags will be set later for TDLS stations */
        if (!test_sta_flag(sta, WLAN_STA_TDLS_PEER)) {
                ret = sta_apply_auth_flags(local, sta, mask, set);
@@ -1064,10 +1077,8 @@ static int sta_apply_parameters(struct ieee80211_local *local,
                        clear_sta_flag(sta, WLAN_STA_SHORT_PREAMBLE);
        }
 
-       if (mask & BIT(NL80211_STA_FLAG_WME))
-               sta->sta.wme = set & BIT(NL80211_STA_FLAG_WME);
-
        if (mask & BIT(NL80211_STA_FLAG_MFP)) {
+               sta->sta.mfp = !!(set & BIT(NL80211_STA_FLAG_MFP));
                if (set & BIT(NL80211_STA_FLAG_MFP))
                        set_sta_flag(sta, WLAN_STA_MFP);
                else
@@ -1377,11 +1388,6 @@ static int ieee80211_change_station(struct wiphy *wiphy,
        if (err)
                goto out_err;
 
-       /* When peer becomes authorized, init rate control as well */
-       if (test_sta_flag(sta, WLAN_STA_TDLS_PEER) &&
-           test_sta_flag(sta, WLAN_STA_AUTHORIZED))
-               rate_control_rate_init(sta);
-
        mutex_unlock(&local->sta_mtx);
 
        if ((sdata->vif.type == NL80211_IFTYPE_AP ||
@@ -1488,7 +1494,7 @@ static void mpath_set_pinfo(struct mesh_path *mpath, u8 *next_hop,
        if (next_hop_sta)
                memcpy(next_hop, next_hop_sta->sta.addr, ETH_ALEN);
        else
-               memset(next_hop, 0, ETH_ALEN);
+               eth_zero_addr(next_hop);
 
        memset(pinfo, 0, sizeof(*pinfo));
 
@@ -2273,7 +2279,6 @@ int __ieee80211_request_smps_ap(struct ieee80211_sub_if_data *sdata,
 {
        struct sta_info *sta;
        enum ieee80211_smps_mode old_req;
-       int i;
 
        if (WARN_ON_ONCE(sdata->vif.type != NL80211_IFTYPE_AP))
                return -EINVAL;
@@ -2297,52 +2302,44 @@ int __ieee80211_request_smps_ap(struct ieee80211_sub_if_data *sdata,
        }
 
        ht_dbg(sdata,
-              "SMSP %d requested in AP mode, sending Action frame to %d stations\n",
+              "SMPS %d requested in AP mode, sending Action frame to %d stations\n",
               smps_mode, atomic_read(&sdata->u.ap.num_mcast_sta));
 
        mutex_lock(&sdata->local->sta_mtx);
-       for (i = 0; i < STA_HASH_SIZE; i++) {
-               for (sta = rcu_dereference_protected(sdata->local->sta_hash[i],
-                               lockdep_is_held(&sdata->local->sta_mtx));
-                    sta;
-                    sta = rcu_dereference_protected(sta->hnext,
-                               lockdep_is_held(&sdata->local->sta_mtx))) {
-                       /*
-                        * Only stations associated to our AP and
-                        * associated VLANs
-                        */
-                       if (sta->sdata->bss != &sdata->u.ap)
-                               continue;
+       list_for_each_entry(sta, &sdata->local->sta_list, list) {
+               /*
+                * Only stations associated to our AP and
+                * associated VLANs
+                */
+               if (sta->sdata->bss != &sdata->u.ap)
+                       continue;
 
-                       /* This station doesn't support MIMO - skip it */
-                       if (sta_info_tx_streams(sta) == 1)
-                               continue;
+               /* This station doesn't support MIMO - skip it */
+               if (sta_info_tx_streams(sta) == 1)
+                       continue;
 
-                       /*
-                        * Don't wake up a STA just to send the action frame
-                        * unless we are getting more restrictive.
-                        */
-                       if (test_sta_flag(sta, WLAN_STA_PS_STA) &&
-                           !ieee80211_smps_is_restrictive(sta->known_smps_mode,
-                                                          smps_mode)) {
-                               ht_dbg(sdata,
-                                      "Won't send SMPS to sleeping STA %pM\n",
-                                      sta->sta.addr);
-                               continue;
-                       }
+               /*
+                * Don't wake up a STA just to send the action frame
+                * unless we are getting more restrictive.
+                */
+               if (test_sta_flag(sta, WLAN_STA_PS_STA) &&
+                   !ieee80211_smps_is_restrictive(sta->known_smps_mode,
+                                                  smps_mode)) {
+                       ht_dbg(sdata, "Won't send SMPS to sleeping STA %pM\n",
+                              sta->sta.addr);
+                       continue;
+               }
 
-                       /*
-                        * If the STA is not authorized, wait until it gets
-                        * authorized and the action frame will be sent then.
-                        */
-                       if (!test_sta_flag(sta, WLAN_STA_AUTHORIZED))
-                               continue;
+               /*
+                * If the STA is not authorized, wait until it gets
+                * authorized and the action frame will be sent then.
+                */
+               if (!test_sta_flag(sta, WLAN_STA_AUTHORIZED))
+                       continue;
 
-                       ht_dbg(sdata, "Sending SMPS to %pM\n", sta->sta.addr);
-                       ieee80211_send_smps_action(sdata, smps_mode,
-                                                  sta->sta.addr,
-                                                  sdata->vif.bss_conf.bssid);
-               }
+               ht_dbg(sdata, "Sending SMPS to %pM\n", sta->sta.addr);
+               ieee80211_send_smps_action(sdata, smps_mode, sta->sta.addr,
+                                          sdata->vif.bss_conf.bssid);
        }
        mutex_unlock(&sdata->local->sta_mtx);
 
@@ -3581,7 +3578,7 @@ static int ieee80211_probe_client(struct wiphy *wiphy, struct net_device *dev,
                nullfunc->qos_ctrl = cpu_to_le16(7);
 
        local_bh_disable();
-       ieee80211_xmit(sdata, skb);
+       ieee80211_xmit(sdata, sta, skb);
        local_bh_enable();
        rcu_read_unlock();
 
index eeb0bbd69d980a6f3b548baa3d2abda2dfcf88e5..23813ebb349cd67c3f3248da90312695657e686f 100644 (file)
 
 #define DEBUGFS_FORMAT_BUFFER_SIZE 100
 
-#define TX_LATENCY_BIN_DELIMTER_C ','
-#define TX_LATENCY_BIN_DELIMTER_S ","
-#define TX_LATENCY_BINS_DISABLED "enable(bins disabled)\n"
-#define TX_LATENCY_DISABLED "disable\n"
-
-
-/*
- * Display if Tx latency statistics & bins are enabled/disabled
- */
-static ssize_t sta_tx_latency_stat_read(struct file *file,
-                                       char __user *userbuf,
-                                       size_t count, loff_t *ppos)
-{
-       struct ieee80211_local *local = file->private_data;
-       struct ieee80211_tx_latency_bin_ranges  *tx_latency;
-       char *buf;
-       int bufsz, i, ret;
-       int pos = 0;
-
-       rcu_read_lock();
-
-       tx_latency = rcu_dereference(local->tx_latency);
-
-       if (tx_latency && tx_latency->n_ranges) {
-               bufsz = tx_latency->n_ranges * 15;
-               buf = kzalloc(bufsz, GFP_ATOMIC);
-               if (!buf)
-                       goto err;
-
-               for (i = 0; i < tx_latency->n_ranges; i++)
-                       pos += scnprintf(buf + pos, bufsz - pos, "%d,",
-                                        tx_latency->ranges[i]);
-               pos += scnprintf(buf + pos, bufsz - pos, "\n");
-       } else if (tx_latency) {
-               bufsz = sizeof(TX_LATENCY_BINS_DISABLED) + 1;
-               buf = kzalloc(bufsz, GFP_ATOMIC);
-               if (!buf)
-                       goto err;
-
-               pos += scnprintf(buf + pos, bufsz - pos, "%s\n",
-                                TX_LATENCY_BINS_DISABLED);
-       } else {
-               bufsz = sizeof(TX_LATENCY_DISABLED) + 1;
-               buf = kzalloc(bufsz, GFP_ATOMIC);
-               if (!buf)
-                       goto err;
-
-               pos += scnprintf(buf + pos, bufsz - pos, "%s\n",
-                                TX_LATENCY_DISABLED);
-       }
-
-       rcu_read_unlock();
-
-       ret = simple_read_from_buffer(userbuf, count, ppos, buf, pos);
-       kfree(buf);
-
-       return ret;
-err:
-       rcu_read_unlock();
-       return -ENOMEM;
-}
-
-/*
- * Receive input from user regarding Tx latency statistics
- * The input should indicate if Tx latency statistics and bins are
- * enabled/disabled.
- * If bins are enabled input should indicate the amount of different bins and
- * their ranges. Each bin will count how many Tx frames transmitted within the
- * appropriate latency.
- * Legal input is:
- * a) "enable(bins disabled)" - to enable only general statistics
- * b) "a,b,c,d,...z" - to enable general statistics and bins, where all are
- * numbers and a < b < c < d.. < z
- * c) "disable" - disable all statistics
- * NOTE: must configure Tx latency statistics bins before stations connected.
- */
-
-static ssize_t sta_tx_latency_stat_write(struct file *file,
-                                        const char __user *userbuf,
-                                        size_t count, loff_t *ppos)
-{
-       struct ieee80211_local *local = file->private_data;
-       char buf[128] = {};
-       char *bins = buf;
-       char *token;
-       int buf_size, i, alloc_size;
-       int prev_bin = 0;
-       int n_ranges = 0;
-       int ret = count;
-       struct ieee80211_tx_latency_bin_ranges  *tx_latency;
-
-       if (sizeof(buf) <= count)
-               return -EINVAL;
-       buf_size = count;
-       if (copy_from_user(buf, userbuf, buf_size))
-               return -EFAULT;
-
-       mutex_lock(&local->sta_mtx);
-
-       /* cannot change config once we have stations */
-       if (local->num_sta)
-               goto unlock;
-
-       tx_latency =
-               rcu_dereference_protected(local->tx_latency,
-                                         lockdep_is_held(&local->sta_mtx));
-
-       /* disable Tx statistics */
-       if (!strcmp(buf, TX_LATENCY_DISABLED)) {
-               if (!tx_latency)
-                       goto unlock;
-               RCU_INIT_POINTER(local->tx_latency, NULL);
-               synchronize_rcu();
-               kfree(tx_latency);
-               goto unlock;
-       }
-
-       /* Tx latency already enabled */
-       if (tx_latency)
-               goto unlock;
-
-       if (strcmp(TX_LATENCY_BINS_DISABLED, buf)) {
-               /* check how many bins and between what ranges user requested */
-               token = buf;
-               while (*token != '\0') {
-                       if (*token == TX_LATENCY_BIN_DELIMTER_C)
-                               n_ranges++;
-                       token++;
-               }
-               n_ranges++;
-       }
-
-       alloc_size = sizeof(struct ieee80211_tx_latency_bin_ranges) +
-                    n_ranges * sizeof(u32);
-       tx_latency = kzalloc(alloc_size, GFP_ATOMIC);
-       if (!tx_latency) {
-               ret = -ENOMEM;
-               goto unlock;
-       }
-       tx_latency->n_ranges = n_ranges;
-       for (i = 0; i < n_ranges; i++) { /* setting bin ranges */
-               token = strsep(&bins, TX_LATENCY_BIN_DELIMTER_S);
-               sscanf(token, "%d", &tx_latency->ranges[i]);
-               /* bins values should be in ascending order */
-               if (prev_bin >= tx_latency->ranges[i]) {
-                       ret = -EINVAL;
-                       kfree(tx_latency);
-                       goto unlock;
-               }
-               prev_bin = tx_latency->ranges[i];
-       }
-       rcu_assign_pointer(local->tx_latency, tx_latency);
-
-unlock:
-       mutex_unlock(&local->sta_mtx);
-
-       return ret;
-}
-
-static const struct file_operations stats_tx_latency_ops = {
-       .write = sta_tx_latency_stat_write,
-       .read = sta_tx_latency_stat_read,
-       .open = simple_open,
-       .llseek = generic_file_llseek,
-};
-
 int mac80211_format_buffer(char __user *userbuf, size_t count,
                                  loff_t *ppos, char *fmt, ...)
 {
@@ -440,8 +274,6 @@ void debugfs_hw_add(struct ieee80211_local *local)
 #ifdef CONFIG_MAC80211_DEBUG_COUNTERS
        DEBUGFS_STATS_ADD(tx_handlers_drop, local->tx_handlers_drop);
        DEBUGFS_STATS_ADD(tx_handlers_queued, local->tx_handlers_queued);
-       DEBUGFS_STATS_ADD(tx_handlers_drop_unencrypted,
-               local->tx_handlers_drop_unencrypted);
        DEBUGFS_STATS_ADD(tx_handlers_drop_fragment,
                local->tx_handlers_drop_fragment);
        DEBUGFS_STATS_ADD(tx_handlers_drop_wep,
@@ -475,6 +307,4 @@ void debugfs_hw_add(struct ieee80211_local *local)
        DEBUGFS_DEVSTATS_ADD(dot11RTSFailureCount);
        DEBUGFS_DEVSTATS_ADD(dot11FCSErrorCount);
        DEBUGFS_DEVSTATS_ADD(dot11RTSSuccessCount);
-
-       DEBUGFS_DEVSTATS_ADD(tx_latency);
 }
index c68896adfa960113c584656c478fd8ff043a11ca..29236e832e44470a4f13637847c2d1cbafb9bfb2 100644 (file)
@@ -177,7 +177,6 @@ static ssize_t ieee80211_if_write_##name(struct file *file,         \
        IEEE80211_IF_FILE_R(name)
 
 /* common attributes */
-IEEE80211_IF_FILE(drop_unencrypted, drop_unencrypted, DEC);
 IEEE80211_IF_FILE(rc_rateidx_mask_2ghz, rc_rateidx_mask[IEEE80211_BAND_2GHZ],
                  HEX);
 IEEE80211_IF_FILE(rc_rateidx_mask_5ghz, rc_rateidx_mask[IEEE80211_BAND_5GHZ],
@@ -562,7 +561,6 @@ IEEE80211_IF_FILE(dot11MeshAwakeWindowDuration,
 
 static void add_common_files(struct ieee80211_sub_if_data *sdata)
 {
-       DEBUGFS_ADD(drop_unencrypted);
        DEBUGFS_ADD(rc_rateidx_mask_2ghz);
        DEBUGFS_ADD(rc_rateidx_mask_5ghz);
        DEBUGFS_ADD(rc_rateidx_mcs_mask_2ghz);
index 94c70091bbd7e067b5bf1cc313e28d6a0842530e..252859e90e8a5085f18916edda7a3bf407cdd000 100644 (file)
@@ -39,13 +39,6 @@ static const struct file_operations sta_ ##name## _ops = {           \
        .llseek = generic_file_llseek,                                  \
 }
 
-#define STA_OPS_W(name)                                                        \
-static const struct file_operations sta_ ##name## _ops = {             \
-       .write = sta_##name##_write,                                    \
-       .open = simple_open,                                            \
-       .llseek = generic_file_llseek,                                  \
-}
-
 #define STA_OPS_RW(name)                                               \
 static const struct file_operations sta_ ##name## _ops = {             \
        .read = sta_##name##_read,                                      \
@@ -398,131 +391,6 @@ static ssize_t sta_last_rx_rate_read(struct file *file, char __user *userbuf,
 }
 STA_OPS(last_rx_rate);
 
-static int
-sta_tx_latency_stat_header(struct ieee80211_tx_latency_bin_ranges *tx_latency,
-                          char *buf, int pos, int bufsz)
-{
-       int i;
-       int range_count = tx_latency->n_ranges;
-       u32 *bin_ranges = tx_latency->ranges;
-
-       pos += scnprintf(buf + pos, bufsz - pos,
-                         "Station\t\t\tTID\tMax\tAvg");
-       if (range_count) {
-               pos += scnprintf(buf + pos, bufsz - pos,
-                                 "\t<=%d", bin_ranges[0]);
-               for (i = 0; i < range_count - 1; i++)
-                       pos += scnprintf(buf + pos, bufsz - pos, "\t%d-%d",
-                                         bin_ranges[i], bin_ranges[i+1]);
-               pos += scnprintf(buf + pos, bufsz - pos,
-                                 "\t%d<", bin_ranges[range_count - 1]);
-       }
-
-       pos += scnprintf(buf + pos, bufsz - pos, "\n");
-
-       return pos;
-}
-
-static int
-sta_tx_latency_stat_table(struct ieee80211_tx_latency_bin_ranges *tx_lat_range,
-                         struct ieee80211_tx_latency_stat *tx_lat,
-                         char *buf, int pos, int bufsz, int tid)
-{
-       u32 avg = 0;
-       int j;
-       int bin_count = tx_lat->bin_count;
-
-       pos += scnprintf(buf + pos, bufsz - pos, "\t\t\t%d", tid);
-       /* make sure you don't divide in 0 */
-       if (tx_lat->counter)
-               avg = tx_lat->sum / tx_lat->counter;
-
-       pos += scnprintf(buf + pos, bufsz - pos, "\t%d\t%d",
-                         tx_lat->max, avg);
-
-       if (tx_lat_range->n_ranges && tx_lat->bins)
-               for (j = 0; j < bin_count; j++)
-                       pos += scnprintf(buf + pos, bufsz - pos,
-                                         "\t%d", tx_lat->bins[j]);
-       pos += scnprintf(buf + pos, bufsz - pos, "\n");
-
-       return pos;
-}
-
-/*
- * Output Tx latency statistics station && restart all statistics information
- */
-static ssize_t sta_tx_latency_stat_read(struct file *file,
-                                       char __user *userbuf,
-                                       size_t count, loff_t *ppos)
-{
-       struct sta_info *sta = file->private_data;
-       struct ieee80211_local *local = sta->local;
-       struct ieee80211_tx_latency_bin_ranges *tx_latency;
-       char *buf;
-       int bufsz, ret, i;
-       int pos = 0;
-
-       bufsz = 20 * IEEE80211_NUM_TIDS *
-               sizeof(struct ieee80211_tx_latency_stat);
-       buf = kzalloc(bufsz, GFP_KERNEL);
-       if (!buf)
-               return -ENOMEM;
-
-       rcu_read_lock();
-
-       tx_latency = rcu_dereference(local->tx_latency);
-
-       if (!sta->tx_lat) {
-               pos += scnprintf(buf + pos, bufsz - pos,
-                                "Tx latency statistics are not enabled\n");
-               goto unlock;
-       }
-
-       pos = sta_tx_latency_stat_header(tx_latency, buf, pos, bufsz);
-
-       pos += scnprintf(buf + pos, bufsz - pos, "%pM\n", sta->sta.addr);
-       for (i = 0; i < IEEE80211_NUM_TIDS; i++)
-               pos = sta_tx_latency_stat_table(tx_latency, &sta->tx_lat[i],
-                                               buf, pos, bufsz, i);
-unlock:
-       rcu_read_unlock();
-
-       ret = simple_read_from_buffer(userbuf, count, ppos, buf, pos);
-       kfree(buf);
-
-       return ret;
-}
-STA_OPS(tx_latency_stat);
-
-static ssize_t sta_tx_latency_stat_reset_write(struct file *file,
-                                              const char __user *userbuf,
-                                              size_t count, loff_t *ppos)
-{
-       u32 *bins;
-       int bin_count;
-       struct sta_info *sta = file->private_data;
-       int i;
-
-       if (!sta->tx_lat)
-               return -EINVAL;
-
-       for (i = 0; i < IEEE80211_NUM_TIDS; i++) {
-               bins = sta->tx_lat[i].bins;
-               bin_count = sta->tx_lat[i].bin_count;
-
-               sta->tx_lat[i].max = 0;
-               sta->tx_lat[i].sum = 0;
-               sta->tx_lat[i].counter = 0;
-
-               if (bin_count)
-                       memset(bins, 0, bin_count * sizeof(u32));
-       }
-
-       return count;
-}
-STA_OPS_W(tx_latency_stat_reset);
-
 #define DEBUGFS_ADD(name) \
        debugfs_create_file(#name, 0400, \
                sta->debugfs.dir, sta, &sta_ ##name## _ops);
@@ -576,8 +444,6 @@ void ieee80211_sta_debugfs_add(struct sta_info *sta)
        DEBUGFS_ADD(last_ack_signal);
        DEBUGFS_ADD(current_tx_rate);
        DEBUGFS_ADD(last_rx_rate);
-       DEBUGFS_ADD(tx_latency_stat);
-       DEBUGFS_ADD(tx_latency_stat_reset);
 
        DEBUGFS_ADD_COUNTER(rx_packets, rx_packets);
        DEBUGFS_ADD_COUNTER(tx_packets, tx_packets);
index fdeda17b8dd223bc852fa747bbfd04474b9d652d..0a39d3db951a4a411448039c0d80190d50f299e2 100644 (file)
@@ -941,13 +941,13 @@ static inline void drv_set_rekey_data(struct ieee80211_local *local,
        trace_drv_return_void(local);
 }
 
-static inline void drv_rssi_callback(struct ieee80211_local *local,
-                                    struct ieee80211_sub_if_data *sdata,
-                                    const enum ieee80211_rssi_event event)
+static inline void drv_event_callback(struct ieee80211_local *local,
+                                     struct ieee80211_sub_if_data *sdata,
+                                     const struct ieee80211_event *event)
 {
-       trace_drv_rssi_callback(local, sdata, event);
-       if (local->ops->rssi_callback)
-               local->ops->rssi_callback(&local->hw, &sdata->vif, event);
+       trace_drv_event_callback(local, sdata, event);
+       if (local->ops->event_callback)
+               local->ops->event_callback(&local->hw, &sdata->vif, event);
        trace_drv_return_void(local);
 }
 
index ff630be2ca750182fabfce3ea363a44cd24d7928..7a76ce639d58d6071681551e916f0125cf376212 100644 (file)
@@ -252,8 +252,6 @@ bool ieee80211_ht_cap_ie_to_sta_ht_cap(struct ieee80211_sub_if_data *sdata,
                break;
        }
 
-       if (bw != sta->sta.bandwidth)
-               changed = true;
        sta->sta.bandwidth = bw;
 
        sta->cur_max_bandwidth =
index b606b53a49a7d92e178eb502f18469bd9da7a5cd..bfef1b2150504fa9ed2a0b4b9a7b4b60e229f1de 100644 (file)
@@ -188,6 +188,16 @@ ieee80211_ibss_build_presp(struct ieee80211_sub_if_data *sdata,
                 */
                pos = ieee80211_ie_build_ht_oper(pos, &sband->ht_cap,
                                                 chandef, 0);
+
+               /* add VHT capability and information IEs */
+               if (chandef->width != NL80211_CHAN_WIDTH_20 &&
+                   chandef->width != NL80211_CHAN_WIDTH_40 &&
+                   sband->vht_cap.vht_supported) {
+                       pos = ieee80211_ie_build_vht_cap(pos, &sband->vht_cap,
+                                                        sband->vht_cap.cap);
+                       pos = ieee80211_ie_build_vht_oper(pos, &sband->vht_cap,
+                                                         chandef);
+               }
        }
 
        if (local->hw.queues >= IEEE80211_NUM_ACS)
@@ -249,8 +259,6 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
        if (presp)
                kfree_rcu(presp, rcu_head);
 
-       sdata->drop_unencrypted = capability & WLAN_CAPABILITY_PRIVACY ? 1 : 0;
-
        /* make a copy of the chandef, it could be modified below. */
        chandef = *req_chandef;
        chan = chandef.chan;
@@ -417,6 +425,11 @@ static void ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
                                        NL80211_CHAN_WIDTH_20_NOHT);
                chandef.width = sdata->u.ibss.chandef.width;
                break;
+       case NL80211_CHAN_WIDTH_80:
+       case NL80211_CHAN_WIDTH_160:
+               chandef = sdata->u.ibss.chandef;
+               chandef.chan = cbss->channel;
+               break;
        default:
                /* fall back to 20 MHz for unsupported modes */
                cfg80211_chandef_create(&chandef, cbss->channel,
@@ -470,22 +483,19 @@ int ieee80211_ibss_csa_beacon(struct ieee80211_sub_if_data *sdata,
        struct beacon_data *presp, *old_presp;
        struct cfg80211_bss *cbss;
        const struct cfg80211_bss_ies *ies;
-       u16 capability;
+       u16 capability = 0;
        u64 tsf;
        int ret = 0;
 
        sdata_assert_lock(sdata);
 
-       capability = WLAN_CAPABILITY_IBSS;
-
        if (ifibss->privacy)
-               capability |= WLAN_CAPABILITY_PRIVACY;
+               capability = WLAN_CAPABILITY_PRIVACY;
 
        cbss = cfg80211_get_bss(sdata->local->hw.wiphy, ifibss->chandef.chan,
                                ifibss->bssid, ifibss->ssid,
-                               ifibss->ssid_len, WLAN_CAPABILITY_IBSS |
-                               WLAN_CAPABILITY_PRIVACY,
-                               capability);
+                               ifibss->ssid_len, IEEE80211_BSS_TYPE_IBSS,
+                               IEEE80211_PRIVACY(ifibss->privacy));
 
        if (WARN_ON(!cbss)) {
                ret = -EINVAL;
@@ -525,23 +535,17 @@ int ieee80211_ibss_finish_csa(struct ieee80211_sub_if_data *sdata)
        struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
        struct cfg80211_bss *cbss;
        int err, changed = 0;
-       u16 capability;
 
        sdata_assert_lock(sdata);
 
        /* update cfg80211 bss information with the new channel */
        if (!is_zero_ether_addr(ifibss->bssid)) {
-               capability = WLAN_CAPABILITY_IBSS;
-
-               if (ifibss->privacy)
-                       capability |= WLAN_CAPABILITY_PRIVACY;
-
                cbss = cfg80211_get_bss(sdata->local->hw.wiphy,
                                        ifibss->chandef.chan,
                                        ifibss->bssid, ifibss->ssid,
-                                       ifibss->ssid_len, WLAN_CAPABILITY_IBSS |
-                                       WLAN_CAPABILITY_PRIVACY,
-                                       capability);
+                                       ifibss->ssid_len,
+                                       IEEE80211_BSS_TYPE_IBSS,
+                                       IEEE80211_PRIVACY(ifibss->privacy));
                /* XXX: should not really modify cfg80211 data */
                if (cbss) {
                        cbss->channel = sdata->csa_chandef.chan;
@@ -682,19 +686,13 @@ static void ieee80211_ibss_disconnect(struct ieee80211_sub_if_data *sdata)
        struct cfg80211_bss *cbss;
        struct beacon_data *presp;
        struct sta_info *sta;
-       u16 capability;
 
        if (!is_zero_ether_addr(ifibss->bssid)) {
-               capability = WLAN_CAPABILITY_IBSS;
-
-               if (ifibss->privacy)
-                       capability |= WLAN_CAPABILITY_PRIVACY;
-
                cbss = cfg80211_get_bss(local->hw.wiphy, ifibss->chandef.chan,
                                        ifibss->bssid, ifibss->ssid,
-                                       ifibss->ssid_len, WLAN_CAPABILITY_IBSS |
-                                       WLAN_CAPABILITY_PRIVACY,
-                                       capability);
+                                       ifibss->ssid_len,
+                                       IEEE80211_BSS_TYPE_IBSS,
+                                       IEEE80211_PRIVACY(ifibss->privacy));
 
                if (cbss) {
                        cfg80211_unlink_bss(local->hw.wiphy, cbss);
@@ -980,110 +978,140 @@ static void ieee80211_rx_mgmt_auth_ibss(struct ieee80211_sub_if_data *sdata,
                            mgmt->sa, sdata->u.ibss.bssid, NULL, 0, 0, 0);
 }
 
-static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
-                                 struct ieee80211_mgmt *mgmt, size_t len,
-                                 struct ieee80211_rx_status *rx_status,
-                                 struct ieee802_11_elems *elems)
+static void ieee80211_update_sta_info(struct ieee80211_sub_if_data *sdata,
+                                     struct ieee80211_mgmt *mgmt, size_t len,
+                                     struct ieee80211_rx_status *rx_status,
+                                     struct ieee802_11_elems *elems,
+                                     struct ieee80211_channel *channel)
 {
-       struct ieee80211_local *local = sdata->local;
-       struct cfg80211_bss *cbss;
-       struct ieee80211_bss *bss;
        struct sta_info *sta;
-       struct ieee80211_channel *channel;
-       u64 beacon_timestamp, rx_timestamp;
-       u32 supp_rates = 0;
        enum ieee80211_band band = rx_status->band;
        enum nl80211_bss_scan_width scan_width;
+       struct ieee80211_local *local = sdata->local;
        struct ieee80211_supported_band *sband = local->hw.wiphy->bands[band];
        bool rates_updated = false;
+       u32 supp_rates = 0;
 
-       channel = ieee80211_get_channel(local->hw.wiphy, rx_status->freq);
-       if (!channel)
+       if (sdata->vif.type != NL80211_IFTYPE_ADHOC)
                return;
 
-       if (sdata->vif.type == NL80211_IFTYPE_ADHOC &&
-           ether_addr_equal(mgmt->bssid, sdata->u.ibss.bssid)) {
+       if (!ether_addr_equal(mgmt->bssid, sdata->u.ibss.bssid))
+               return;
 
-               rcu_read_lock();
-               sta = sta_info_get(sdata, mgmt->sa);
-
-               if (elems->supp_rates) {
-                       supp_rates = ieee80211_sta_get_rates(sdata, elems,
-                                                            band, NULL);
-                       if (sta) {
-                               u32 prev_rates;
-
-                               prev_rates = sta->sta.supp_rates[band];
-                               /* make sure mandatory rates are always added */
-                               scan_width = NL80211_BSS_CHAN_WIDTH_20;
-                               if (rx_status->flag & RX_FLAG_5MHZ)
-                                       scan_width = NL80211_BSS_CHAN_WIDTH_5;
-                               if (rx_status->flag & RX_FLAG_10MHZ)
-                                       scan_width = NL80211_BSS_CHAN_WIDTH_10;
-
-                               sta->sta.supp_rates[band] = supp_rates |
-                                       ieee80211_mandatory_rates(sband,
-                                                                 scan_width);
-                               if (sta->sta.supp_rates[band] != prev_rates) {
-                                       ibss_dbg(sdata,
-                                                "updated supp_rates set for %pM based on beacon/probe_resp (0x%x -> 0x%x)\n",
-                                                sta->sta.addr, prev_rates,
-                                                sta->sta.supp_rates[band]);
-                                       rates_updated = true;
-                               }
-                       } else {
-                               rcu_read_unlock();
-                               sta = ieee80211_ibss_add_sta(sdata, mgmt->bssid,
-                                               mgmt->sa, supp_rates);
+       rcu_read_lock();
+       sta = sta_info_get(sdata, mgmt->sa);
+
+       if (elems->supp_rates) {
+               supp_rates = ieee80211_sta_get_rates(sdata, elems,
+                                                    band, NULL);
+               if (sta) {
+                       u32 prev_rates;
+
+                       prev_rates = sta->sta.supp_rates[band];
+                       /* make sure mandatory rates are always added */
+                       scan_width = NL80211_BSS_CHAN_WIDTH_20;
+                       if (rx_status->flag & RX_FLAG_5MHZ)
+                               scan_width = NL80211_BSS_CHAN_WIDTH_5;
+                       if (rx_status->flag & RX_FLAG_10MHZ)
+                               scan_width = NL80211_BSS_CHAN_WIDTH_10;
+
+                       sta->sta.supp_rates[band] = supp_rates |
+                               ieee80211_mandatory_rates(sband, scan_width);
+                       if (sta->sta.supp_rates[band] != prev_rates) {
+                               ibss_dbg(sdata,
+                                        "updated supp_rates set for %pM based on beacon/probe_resp (0x%x -> 0x%x)\n",
+                                        sta->sta.addr, prev_rates,
+                                        sta->sta.supp_rates[band]);
+                               rates_updated = true;
                        }
+               } else {
+                       rcu_read_unlock();
+                       sta = ieee80211_ibss_add_sta(sdata, mgmt->bssid,
+                                                    mgmt->sa, supp_rates);
                }
+       }
 
-               if (sta && elems->wmm_info)
-                       sta->sta.wme = true;
-
-               if (sta && elems->ht_operation && elems->ht_cap_elem &&
-                   sdata->u.ibss.chandef.width != NL80211_CHAN_WIDTH_20_NOHT &&
-                   sdata->u.ibss.chandef.width != NL80211_CHAN_WIDTH_5 &&
-                   sdata->u.ibss.chandef.width != NL80211_CHAN_WIDTH_10) {
-                       /* we both use HT */
-                       struct ieee80211_ht_cap htcap_ie;
-                       struct cfg80211_chan_def chandef;
-
-                       ieee80211_ht_oper_to_chandef(channel,
-                                                    elems->ht_operation,
-                                                    &chandef);
-
-                       memcpy(&htcap_ie, elems->ht_cap_elem, sizeof(htcap_ie));
-
-                       /*
-                        * fall back to HT20 if we don't use or use
-                        * the other extension channel
-                        */
-                       if (chandef.center_freq1 !=
-                           sdata->u.ibss.chandef.center_freq1)
-                               htcap_ie.cap_info &=
-                                       cpu_to_le16(~IEEE80211_HT_CAP_SUP_WIDTH_20_40);
-
-                       rates_updated |= ieee80211_ht_cap_ie_to_sta_ht_cap(
-                                               sdata, sband, &htcap_ie, sta);
+       if (sta && elems->wmm_info && local->hw.queues >= IEEE80211_NUM_ACS)
+               sta->sta.wme = true;
+
+       if (sta && elems->ht_operation && elems->ht_cap_elem &&
+           sdata->u.ibss.chandef.width != NL80211_CHAN_WIDTH_20_NOHT &&
+           sdata->u.ibss.chandef.width != NL80211_CHAN_WIDTH_5 &&
+           sdata->u.ibss.chandef.width != NL80211_CHAN_WIDTH_10) {
+               /* we both use HT */
+               struct ieee80211_ht_cap htcap_ie;
+               struct cfg80211_chan_def chandef;
+               enum ieee80211_sta_rx_bandwidth bw = sta->sta.bandwidth;
+
+               ieee80211_ht_oper_to_chandef(channel,
+                                            elems->ht_operation,
+                                            &chandef);
+
+               memcpy(&htcap_ie, elems->ht_cap_elem, sizeof(htcap_ie));
+               rates_updated |= ieee80211_ht_cap_ie_to_sta_ht_cap(sdata, sband,
+                                                                  &htcap_ie,
+                                                                  sta);
+
+               if (elems->vht_operation && elems->vht_cap_elem &&
+                   sdata->u.ibss.chandef.width != NL80211_CHAN_WIDTH_20 &&
+                   sdata->u.ibss.chandef.width != NL80211_CHAN_WIDTH_40) {
+                       /* we both use VHT */
+                       struct ieee80211_vht_cap cap_ie;
+                       struct ieee80211_sta_vht_cap cap = sta->sta.vht_cap;
+
+                       ieee80211_vht_oper_to_chandef(channel,
+                                                     elems->vht_operation,
+                                                     &chandef);
+                       memcpy(&cap_ie, elems->vht_cap_elem, sizeof(cap_ie));
+                       ieee80211_vht_cap_ie_to_sta_vht_cap(sdata, sband,
+                                                           &cap_ie, sta);
+                       if (memcmp(&cap, &sta->sta.vht_cap, sizeof(cap)))
+                               rates_updated |= true;
                }
 
-               if (sta && rates_updated) {
-                       u32 changed = IEEE80211_RC_SUPP_RATES_CHANGED;
-                       u8 rx_nss = sta->sta.rx_nss;
+               if (bw != sta->sta.bandwidth)
+                       rates_updated |= true;
 
-                       /* Force rx_nss recalculation */
-                       sta->sta.rx_nss = 0;
-                       rate_control_rate_init(sta);
-                       if (sta->sta.rx_nss != rx_nss)
-                               changed |= IEEE80211_RC_NSS_CHANGED;
+               if (!cfg80211_chandef_compatible(&sdata->u.ibss.chandef,
+                                                &chandef))
+                       WARN_ON_ONCE(1);
+       }
 
-                       drv_sta_rc_update(local, sdata, &sta->sta, changed);
-               }
+       if (sta && rates_updated) {
+               u32 changed = IEEE80211_RC_SUPP_RATES_CHANGED;
+               u8 rx_nss = sta->sta.rx_nss;
 
-               rcu_read_unlock();
+               /* Force rx_nss recalculation */
+               sta->sta.rx_nss = 0;
+               rate_control_rate_init(sta);
+               if (sta->sta.rx_nss != rx_nss)
+                       changed |= IEEE80211_RC_NSS_CHANGED;
+
+               drv_sta_rc_update(local, sdata, &sta->sta, changed);
        }
 
+       rcu_read_unlock();
+}
+
+static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
+                                 struct ieee80211_mgmt *mgmt, size_t len,
+                                 struct ieee80211_rx_status *rx_status,
+                                 struct ieee802_11_elems *elems)
+{
+       struct ieee80211_local *local = sdata->local;
+       struct cfg80211_bss *cbss;
+       struct ieee80211_bss *bss;
+       struct ieee80211_channel *channel;
+       u64 beacon_timestamp, rx_timestamp;
+       u32 supp_rates = 0;
+       enum ieee80211_band band = rx_status->band;
+
+       channel = ieee80211_get_channel(local->hw.wiphy, rx_status->freq);
+       if (!channel)
+               return;
+
+       ieee80211_update_sta_info(sdata, mgmt, len, rx_status, elems, channel);
+
        bss = ieee80211_bss_info_update(local, rx_status, mgmt, len, elems,
                                        channel);
        if (!bss)
@@ -1273,7 +1301,7 @@ static void ieee80211_sta_merge_ibss(struct ieee80211_sub_if_data *sdata)
 
        scan_width = cfg80211_chandef_to_scan_width(&ifibss->chandef);
        ieee80211_request_ibss_scan(sdata, ifibss->ssid, ifibss->ssid_len,
-                                   NULL, scan_width);
+                                   NULL, 0, scan_width);
 }
 
 static void ieee80211_sta_create_ibss(struct ieee80211_sub_if_data *sdata)
@@ -1304,14 +1332,82 @@ static void ieee80211_sta_create_ibss(struct ieee80211_sub_if_data *sdata)
 
        if (ifibss->privacy)
                capability |= WLAN_CAPABILITY_PRIVACY;
-       else
-               sdata->drop_unencrypted = 0;
 
        __ieee80211_sta_join_ibss(sdata, bssid, sdata->vif.bss_conf.beacon_int,
                                  &ifibss->chandef, ifibss->basic_rates,
                                  capability, 0, true);
 }
 
+static unsigned ibss_setup_channels(struct wiphy *wiphy,
+                                   struct ieee80211_channel **channels,
+                                   unsigned int channels_max,
+                                   u32 center_freq, u32 width)
+{
+       struct ieee80211_channel *chan = NULL;
+       unsigned int n_chan = 0;
+       u32 start_freq, end_freq, freq;
+
+       if (width <= 20) {
+               start_freq = center_freq;
+               end_freq = center_freq;
+       } else {
+               start_freq = center_freq - width / 2 + 10;
+               end_freq = center_freq + width / 2 - 10;
+       }
+
+       for (freq = start_freq; freq <= end_freq; freq += 20) {
+               chan = ieee80211_get_channel(wiphy, freq);
+               if (!chan)
+                       continue;
+               if (n_chan >= channels_max)
+                       return n_chan;
+
+               channels[n_chan] = chan;
+               n_chan++;
+       }
+
+       return n_chan;
+}
+
+static unsigned int
+ieee80211_ibss_setup_scan_channels(struct wiphy *wiphy,
+                                  const struct cfg80211_chan_def *chandef,
+                                  struct ieee80211_channel **channels,
+                                  unsigned int channels_max)
+{
+       unsigned int n_chan = 0;
+       u32 width, cf1, cf2 = 0;
+
+       switch (chandef->width) {
+       case NL80211_CHAN_WIDTH_40:
+               width = 40;
+               break;
+       case NL80211_CHAN_WIDTH_80P80:
+               cf2 = chandef->center_freq2;
+               /* fall through */
+       case NL80211_CHAN_WIDTH_80:
+               width = 80;
+               break;
+       case NL80211_CHAN_WIDTH_160:
+               width = 160;
+               break;
+       default:
+               width = 20;
+               break;
+       }
+
+       cf1 = chandef->center_freq1;
+
+       n_chan = ibss_setup_channels(wiphy, channels, channels_max, cf1, width);
+
+       if (cf2)
+               n_chan += ibss_setup_channels(wiphy, &channels[n_chan],
+                                             channels_max - n_chan, cf2,
+                                             width);
+
+       return n_chan;
+}
+
 /*
  * This function is called with state == IEEE80211_IBSS_MLME_SEARCH
  */
@@ -1325,7 +1421,6 @@ static void ieee80211_sta_find_ibss(struct ieee80211_sub_if_data *sdata)
        const u8 *bssid = NULL;
        enum nl80211_bss_scan_width scan_width;
        int active_ibss;
-       u16 capability;
 
        sdata_assert_lock(sdata);
 
@@ -1335,9 +1430,6 @@ static void ieee80211_sta_find_ibss(struct ieee80211_sub_if_data *sdata)
        if (active_ibss)
                return;
 
-       capability = WLAN_CAPABILITY_IBSS;
-       if (ifibss->privacy)
-               capability |= WLAN_CAPABILITY_PRIVACY;
        if (ifibss->fixed_bssid)
                bssid = ifibss->bssid;
        if (ifibss->fixed_channel)
@@ -1346,8 +1438,8 @@ static void ieee80211_sta_find_ibss(struct ieee80211_sub_if_data *sdata)
                bssid = ifibss->bssid;
        cbss = cfg80211_get_bss(local->hw.wiphy, chan, bssid,
                                ifibss->ssid, ifibss->ssid_len,
-                               WLAN_CAPABILITY_IBSS | WLAN_CAPABILITY_PRIVACY,
-                               capability);
+                               IEEE80211_BSS_TYPE_IBSS,
+                               IEEE80211_PRIVACY(ifibss->privacy));
 
        if (cbss) {
                struct ieee80211_bss *bss;
@@ -1381,11 +1473,18 @@ static void ieee80211_sta_find_ibss(struct ieee80211_sub_if_data *sdata)
        /* Selected IBSS not found in current scan results - try to scan */
        if (time_after(jiffies, ifibss->last_scan_completed +
                                        IEEE80211_SCAN_INTERVAL)) {
+               struct ieee80211_channel *channels[8];
+               unsigned int num;
+
                sdata_info(sdata, "Trigger new scan to find an IBSS to join\n");
 
+               num = ieee80211_ibss_setup_scan_channels(local->hw.wiphy,
+                                                        &ifibss->chandef,
+                                                        channels,
+                                                        ARRAY_SIZE(channels));
                scan_width = cfg80211_chandef_to_scan_width(&ifibss->chandef);
                ieee80211_request_ibss_scan(sdata, ifibss->ssid,
-                                           ifibss->ssid_len, chan,
+                                           ifibss->ssid_len, channels, num,
                                            scan_width);
        } else {
                int interval = IEEE80211_SCAN_INTERVAL;
@@ -1742,7 +1841,7 @@ int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata)
 
        ieee80211_ibss_disconnect(sdata);
        ifibss->ssid_len = 0;
-       memset(ifibss->bssid, 0, ETH_ALEN);
+       eth_zero_addr(ifibss->bssid);
 
        /* remove beacon */
        kfree(sdata->u.ibss.ie);
index 8d53d65bd2abc2d993ae5dbc7df37b707fd9ec13..487f5e2a9283bd1819adb0f61fead485f7fa6b0f 100644 (file)
@@ -830,8 +830,6 @@ struct ieee80211_sub_if_data {
 
        unsigned long state;
 
-       int drop_unencrypted;
-
        char name[IFNAMSIZ];
 
        /* Fragment table for host-based reassembly */
@@ -1042,24 +1040,6 @@ struct tpt_led_trigger {
 };
 #endif
 
-/*
- * struct ieee80211_tx_latency_bin_ranges - Tx latency statistics bins ranges
- *
- * Measuring Tx latency statistics. Counts how many Tx frames transmitted in a
- * certain latency range (in Milliseconds). Each station that uses these
- * ranges will have bins to count the amount of frames received in that range.
- * The user can configure the ranges via debugfs.
- * If ranges is NULL then Tx latency statistics bins are disabled for all
- * stations.
- *
- * @n_ranges: number of ranges that are taken in account
- * @ranges: the ranges that the user requested or NULL if disabled.
- */
-struct ieee80211_tx_latency_bin_ranges {
-       int n_ranges;
-       u32 ranges[];
-};
-
 /**
  * mac80211 scan flags - currently active scan mode
  *
@@ -1211,12 +1191,6 @@ struct ieee80211_local {
        struct timer_list sta_cleanup;
        int sta_generation;
 
-       /*
-        * Tx latency statistics parameters for all stations.
-        * Can enable via debugfs (NULL when disabled).
-        */
-       struct ieee80211_tx_latency_bin_ranges __rcu *tx_latency;
-
        struct sk_buff_head pending[IEEE80211_MAX_QUEUES];
        struct tasklet_struct tx_pending_tasklet;
 
@@ -1298,7 +1272,6 @@ struct ieee80211_local {
        /* TX/RX handler statistics */
        unsigned int tx_handlers_drop;
        unsigned int tx_handlers_queued;
-       unsigned int tx_handlers_drop_unencrypted;
        unsigned int tx_handlers_drop_fragment;
        unsigned int tx_handlers_drop_wep;
        unsigned int tx_handlers_drop_not_assoc;
@@ -1568,7 +1541,8 @@ int ieee80211_mesh_finish_csa(struct ieee80211_sub_if_data *sdata);
 void ieee80211_scan_work(struct work_struct *work);
 int ieee80211_request_ibss_scan(struct ieee80211_sub_if_data *sdata,
                                const u8 *ssid, u8 ssid_len,
-                               struct ieee80211_channel *chan,
+                               struct ieee80211_channel **channels,
+                               unsigned int n_channels,
                                enum nl80211_bss_scan_width scan_width);
 int ieee80211_request_scan(struct ieee80211_sub_if_data *sdata,
                           struct cfg80211_scan_request *req);
@@ -1617,6 +1591,7 @@ int ieee80211_channel_switch(struct wiphy *wiphy, struct net_device *dev,
 int ieee80211_iface_init(void);
 void ieee80211_iface_exit(void);
 int ieee80211_if_add(struct ieee80211_local *local, const char *name,
+                    unsigned char name_assign_type,
                     struct wireless_dev **new_wdev, enum nl80211_iftype type,
                     struct vif_params *params);
 int ieee80211_if_change_type(struct ieee80211_sub_if_data *sdata,
@@ -1784,7 +1759,8 @@ void mac80211_ev_michael_mic_failure(struct ieee80211_sub_if_data *sdata, int ke
                                     gfp_t gfp);
 void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata,
                               bool bss_notify);
-void ieee80211_xmit(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb);
+void ieee80211_xmit(struct ieee80211_sub_if_data *sdata,
+                   struct sta_info *sta, struct sk_buff *skb);
 
 void __ieee80211_tx_skb_tid_band(struct ieee80211_sub_if_data *sdata,
                                 struct sk_buff *skb, int tid,
@@ -1979,6 +1955,8 @@ u8 *ieee80211_ie_build_ht_oper(u8 *pos, struct ieee80211_sta_ht_cap *ht_cap,
                               u16 prot_mode);
 u8 *ieee80211_ie_build_vht_cap(u8 *pos, struct ieee80211_sta_vht_cap *vht_cap,
                               u32 cap);
+u8 *ieee80211_ie_build_vht_oper(u8 *pos, struct ieee80211_sta_vht_cap *vht_cap,
+                               const struct cfg80211_chan_def *chandef);
 int ieee80211_parse_bitrates(struct cfg80211_chan_def *chandef,
                             const struct ieee80211_supported_band *sband,
                             const u8 *srates, int srates_len, u32 *rates);
@@ -1994,6 +1972,9 @@ u8 *ieee80211_add_wmm_info_ie(u8 *buf, u8 qosinfo);
 void ieee80211_ht_oper_to_chandef(struct ieee80211_channel *control_chan,
                                  const struct ieee80211_ht_operation *ht_oper,
                                  struct cfg80211_chan_def *chandef);
+void ieee80211_vht_oper_to_chandef(struct ieee80211_channel *control_chan,
+                                  const struct ieee80211_vht_operation *oper,
+                                  struct cfg80211_chan_def *chandef);
 u32 ieee80211_chandef_downgrade(struct cfg80211_chan_def *c);
 
 int __must_check
index 81a27516813e2f3473bec783ef9253b54967e62d..a0cd97fd0c49075d8682fadf734c18f3c018f13f 100644 (file)
@@ -1508,7 +1508,6 @@ int ieee80211_if_change_type(struct ieee80211_sub_if_data *sdata,
        }
 
        /* reset some values that shouldn't be kept across type changes */
-       sdata->drop_unencrypted = 0;
        if (type == NL80211_IFTYPE_STATION)
                sdata->u.mgd.use_4addr = false;
 
@@ -1649,6 +1648,7 @@ static void ieee80211_assign_perm_addr(struct ieee80211_local *local,
 }
 
 int ieee80211_if_add(struct ieee80211_local *local, const char *name,
+                    unsigned char name_assign_type,
                     struct wireless_dev **new_wdev, enum nl80211_iftype type,
                     struct vif_params *params)
 {
@@ -1677,7 +1677,7 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
                        txqs = IEEE80211_NUM_ACS;
 
                ndev = alloc_netdev_mqs(sizeof(*sdata) + local->hw.vif_data_size,
-                                       name, NET_NAME_UNKNOWN,
+                                       name, name_assign_type,
                                        ieee80211_if_setup, txqs, 1);
                if (!ndev)
                        return -ENOMEM;
index 0825d76edcfc81d93c4afa70ec81e157a7ecc2d2..2291cd7300911514db84c0135369b807e93a9d06 100644 (file)
@@ -492,6 +492,7 @@ ieee80211_key_alloc(u32 cipher, int idx, size_t key_len,
                                for (j = 0; j < len; j++)
                                        key->u.gen.rx_pn[i][j] =
                                                        seq[len - j - 1];
+                       key->flags |= KEY_FLAG_CIPHER_SCHEME;
                }
        }
        memcpy(key->conf.key, key_data, key_len);
index d57a9915494f94eb44bfbf3ed609286d8eb9907f..c5a31835be0e0ca22c154b1345d91be761308833 100644 (file)
@@ -30,10 +30,12 @@ struct sta_info;
  * @KEY_FLAG_UPLOADED_TO_HARDWARE: Indicates that this key is present
  *     in the hardware for TX crypto hardware acceleration.
  * @KEY_FLAG_TAINTED: Key is tainted and packets should be dropped.
+ * @KEY_FLAG_CIPHER_SCHEME: This key is for a hardware cipher scheme
  */
 enum ieee80211_internal_key_flags {
        KEY_FLAG_UPLOADED_TO_HARDWARE   = BIT(0),
        KEY_FLAG_TAINTED                = BIT(1),
+       KEY_FLAG_CIPHER_SCHEME          = BIT(2),
 };
 
 enum ieee80211_internal_tkip_state {
index 5e09d354c5a52f25a373740cbd54b8dddaf841df..4977967c8b0076a5960f3fc74e37271583d983fc 100644 (file)
@@ -1057,7 +1057,7 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
        /* add one default STA interface if supported */
        if (local->hw.wiphy->interface_modes & BIT(NL80211_IFTYPE_STATION) &&
            !(hw->flags & IEEE80211_HW_NO_AUTO_VIF)) {
-               result = ieee80211_if_add(local, "wlan%d", NULL,
+               result = ieee80211_if_add(local, "wlan%d", NET_NAME_ENUM, NULL,
                                          NL80211_IFTYPE_STATION, NULL);
                if (result)
                        wiphy_warn(local->hw.wiphy,
@@ -1201,8 +1201,6 @@ void ieee80211_free_hw(struct ieee80211_hw *hw)
                     ieee80211_free_ack_frame, NULL);
        idr_destroy(&local->ack_status_frames);
 
-       kfree(rcu_access_pointer(local->tx_latency));
-
        sta_info_stop(local);
 
        wiphy_free(local->hw.wiphy);
index 0c8b2a77d312d5e3ad18f975ce808c44755c820b..d4684242e78bf6de3145f6311dce342af6d47adc 100644 (file)
@@ -520,7 +520,7 @@ int ieee80211_fill_mesh_addresses(struct ieee80211_hdr *hdr, __le16 *fc,
        } else {
                *fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS);
                /* RA TA DA SA */
-               memset(hdr->addr1, 0, ETH_ALEN);   /* RA is resolved later */
+               eth_zero_addr(hdr->addr1);   /* RA is resolved later */
                memcpy(hdr->addr2, meshsa, ETH_ALEN);
                memcpy(hdr->addr3, meshda, ETH_ALEN);
                memcpy(hdr->addr4, meshsa, ETH_ALEN);
@@ -574,7 +574,8 @@ static void ieee80211_mesh_housekeeping(struct ieee80211_sub_if_data *sdata)
        struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
        u32 changed;
 
-       ieee80211_sta_expire(sdata, ifmsh->mshcfg.plink_timeout * HZ);
+       if (ifmsh->mshcfg.plink_timeout > 0)
+               ieee80211_sta_expire(sdata, ifmsh->mshcfg.plink_timeout * HZ);
        mesh_path_expire(sdata);
 
        changed = mesh_accept_plinks_update(sdata);
index b488e1859b18e8ed7797cffbb5ab2319138fdb28..60d737f144e37563ebfaa9f7c82efafc3e2c9135 100644 (file)
@@ -17,7 +17,7 @@
 #define PLINK_GET_PLID(p) (p + 4)
 
 #define mod_plink_timer(s, t) (mod_timer(&s->plink_timer, \
-                               jiffies + HZ * t / 1000))
+                               jiffies + msecs_to_jiffies(t)))
 
 enum plink_event {
        PLINK_UNDEFINED,
@@ -382,6 +382,7 @@ static void mesh_sta_info_init(struct ieee80211_sub_if_data *sdata,
        enum ieee80211_band band = ieee80211_get_sdata_band(sdata);
        struct ieee80211_supported_band *sband;
        u32 rates, basic_rates = 0, changed = 0;
+       enum ieee80211_sta_rx_bandwidth bw = sta->sta.bandwidth;
 
        sband = local->hw.wiphy->bands[band];
        rates = ieee80211_sta_get_rates(sdata, elems, band, &basic_rates);
@@ -401,6 +402,9 @@ static void mesh_sta_info_init(struct ieee80211_sub_if_data *sdata,
                                              elems->ht_cap_elem, sta))
                changed |= IEEE80211_RC_BW_CHANGED;
 
+       if (bw != sta->sta.bandwidth)
+               changed |= IEEE80211_RC_BW_CHANGED;
+
        /* HT peer is operating 20MHz-only */
        if (elems->ht_operation &&
            !(elems->ht_operation->ht_param &
@@ -621,9 +625,9 @@ static void mesh_plink_timer(unsigned long data)
                                    sta->llid, sta->plid, reason);
 }
 
-static inline void mesh_plink_timer_set(struct sta_info *sta, int timeout)
+static inline void mesh_plink_timer_set(struct sta_info *sta, u32 timeout)
 {
-       sta->plink_timer.expires = jiffies + (HZ * timeout / 1000);
+       sta->plink_timer.expires = jiffies + msecs_to_jiffies(timeout);
        sta->plink_timer.data = (unsigned long) sta;
        sta->plink_timer.function = mesh_plink_timer;
        sta->plink_timeout = timeout;
index 142f66aece18a8789205fc12b0e1d7c7816d0863..00103f36dcbf4f5866c40892cd76882ce8022be8 100644 (file)
@@ -1168,11 +1168,7 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
        if (!conf) {
                sdata_info(sdata,
                           "no channel context assigned to vif?, disconnecting\n");
-               ieee80211_queue_work(&local->hw,
-                                    &ifmgd->csa_connection_drop_work);
-               mutex_unlock(&local->chanctx_mtx);
-               mutex_unlock(&local->mtx);
-               return;
+               goto drop_connection;
        }
 
        chanctx = container_of(conf, struct ieee80211_chanctx, conf);
@@ -1181,11 +1177,7 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
            !(local->hw.flags & IEEE80211_HW_CHANCTX_STA_CSA)) {
                sdata_info(sdata,
                           "driver doesn't support chan-switch with channel contexts\n");
-               ieee80211_queue_work(&local->hw,
-                                    &ifmgd->csa_connection_drop_work);
-               mutex_unlock(&local->chanctx_mtx);
-               mutex_unlock(&local->mtx);
-               return;
+               goto drop_connection;
        }
 
        ch_switch.timestamp = timestamp;
@@ -1197,11 +1189,7 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
        if (drv_pre_channel_switch(sdata, &ch_switch)) {
                sdata_info(sdata,
                           "preparing for channel switch failed, disconnecting\n");
-               ieee80211_queue_work(&local->hw,
-                                    &ifmgd->csa_connection_drop_work);
-               mutex_unlock(&local->chanctx_mtx);
-               mutex_unlock(&local->mtx);
-               return;
+               goto drop_connection;
        }
 
        res = ieee80211_vif_reserve_chanctx(sdata, &csa_ie.chandef,
@@ -1210,11 +1198,7 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
                sdata_info(sdata,
                           "failed to reserve channel context for channel switch, disconnecting (err=%d)\n",
                           res);
-               ieee80211_queue_work(&local->hw,
-                                    &ifmgd->csa_connection_drop_work);
-               mutex_unlock(&local->chanctx_mtx);
-               mutex_unlock(&local->mtx);
-               return;
+               goto drop_connection;
        }
        mutex_unlock(&local->chanctx_mtx);
 
@@ -1244,6 +1228,11 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
                mod_timer(&ifmgd->chswitch_timer,
                          TU_TO_EXP_TIME((csa_ie.count - 1) *
                                         cbss->beacon_interval));
+       return;
+ drop_connection:
+       ieee80211_queue_work(&local->hw, &ifmgd->csa_connection_drop_work);
+       mutex_unlock(&local->chanctx_mtx);
+       mutex_unlock(&local->mtx);
 }
 
 static bool
@@ -1633,9 +1622,6 @@ void ieee80211_dynamic_ps_timer(unsigned long data)
 {
        struct ieee80211_local *local = (void *) data;
 
-       if (local->quiescing || local->suspended)
-               return;
-
        ieee80211_queue_work(&local->hw, &local->dynamic_ps_enable_work);
 }
 
@@ -2045,7 +2031,7 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
                ieee80211_flush_queues(local, sdata, false);
 
        /* clear bssid only after building the needed mgmt frames */
-       memset(ifmgd->bssid, 0, ETH_ALEN);
+       eth_zero_addr(ifmgd->bssid);
 
        /* remove AP and TDLS peers */
        sta_info_flush(sdata);
@@ -2260,7 +2246,7 @@ static void ieee80211_mgd_probe_ap_send(struct ieee80211_sub_if_data *sdata)
                else
                        ssid_len = ssid[1];
 
-               ieee80211_send_probe_req(sdata, sdata->vif.addr, NULL,
+               ieee80211_send_probe_req(sdata, sdata->vif.addr, dst,
                                         ssid + 2, ssid_len, NULL,
                                         0, (u32) -1, true, 0,
                                         ifmgd->associated->channel, false);
@@ -2372,6 +2358,24 @@ struct sk_buff *ieee80211_ap_probereq_get(struct ieee80211_hw *hw,
 }
 EXPORT_SYMBOL(ieee80211_ap_probereq_get);
 
+static void ieee80211_report_disconnect(struct ieee80211_sub_if_data *sdata,
+                                       const u8 *buf, size_t len, bool tx,
+                                       u16 reason)
+{
+       struct ieee80211_event event = {
+               .type = MLME_EVENT,
+               .u.mlme.data = tx ? DEAUTH_TX_EVENT : DEAUTH_RX_EVENT,
+               .u.mlme.reason = reason,
+       };
+
+       if (tx)
+               cfg80211_tx_mlme_mgmt(sdata->dev, buf, len);
+       else
+               cfg80211_rx_mlme_mgmt(sdata->dev, buf, len);
+
+       drv_event_callback(sdata->local, sdata, &event);
+}
+
 static void __ieee80211_disconnect(struct ieee80211_sub_if_data *sdata)
 {
        struct ieee80211_local *local = sdata->local;
@@ -2397,8 +2401,9 @@ static void __ieee80211_disconnect(struct ieee80211_sub_if_data *sdata)
        }
        mutex_unlock(&local->mtx);
 
-       cfg80211_tx_mlme_mgmt(sdata->dev, frame_buf,
-                             IEEE80211_DEAUTH_FRAME_LEN);
+       ieee80211_report_disconnect(sdata, frame_buf, sizeof(frame_buf), true,
+                                   WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY);
+
        sdata_unlock(sdata);
 }
 
@@ -2477,7 +2482,7 @@ static void ieee80211_destroy_auth_data(struct ieee80211_sub_if_data *sdata,
                del_timer_sync(&sdata->u.mgd.timer);
                sta_info_destroy_addr(sdata, auth_data->bss->bssid);
 
-               memset(sdata->u.mgd.bssid, 0, ETH_ALEN);
+               eth_zero_addr(sdata->u.mgd.bssid);
                ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BSSID);
                sdata->u.mgd.flags = 0;
                mutex_lock(&sdata->local->mtx);
@@ -2522,6 +2527,10 @@ static void ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata,
        u8 bssid[ETH_ALEN];
        u16 auth_alg, auth_transaction, status_code;
        struct sta_info *sta;
+       struct ieee80211_event event = {
+               .type = MLME_EVENT,
+               .u.mlme.data = AUTH_EVENT,
+       };
 
        sdata_assert_lock(sdata);
 
@@ -2554,6 +2563,9 @@ static void ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata,
                           mgmt->sa, status_code);
                ieee80211_destroy_auth_data(sdata, false);
                cfg80211_rx_mlme_mgmt(sdata->dev, (u8 *)mgmt, len);
+               event.u.mlme.status = MLME_DENIED;
+               event.u.mlme.reason = status_code;
+               drv_event_callback(sdata->local, sdata, &event);
                return;
        }
 
@@ -2576,6 +2588,8 @@ static void ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata,
                return;
        }
 
+       event.u.mlme.status = MLME_SUCCESS;
+       drv_event_callback(sdata->local, sdata, &event);
        sdata_info(sdata, "authenticated\n");
        ifmgd->auth_data->done = true;
        ifmgd->auth_data->timeout = jiffies + IEEE80211_AUTH_WAIT_ASSOC;
@@ -2694,7 +2708,7 @@ static void ieee80211_rx_mgmt_deauth(struct ieee80211_sub_if_data *sdata,
 
        ieee80211_set_disassoc(sdata, 0, 0, false, NULL);
 
-       cfg80211_rx_mlme_mgmt(sdata->dev, (u8 *)mgmt, len);
+       ieee80211_report_disconnect(sdata, (u8 *)mgmt, len, false, reason_code);
 }
 
 
@@ -2720,7 +2734,7 @@ static void ieee80211_rx_mgmt_disassoc(struct ieee80211_sub_if_data *sdata,
 
        ieee80211_set_disassoc(sdata, 0, 0, false, NULL);
 
-       cfg80211_rx_mlme_mgmt(sdata->dev, (u8 *)mgmt, len);
+       ieee80211_report_disconnect(sdata, (u8 *)mgmt, len, false, reason_code);
 }
 
 static void ieee80211_get_rates(struct ieee80211_supported_band *sband,
@@ -2790,7 +2804,7 @@ static void ieee80211_destroy_assoc_data(struct ieee80211_sub_if_data *sdata,
                del_timer_sync(&sdata->u.mgd.timer);
                sta_info_destroy_addr(sdata, assoc_data->bss->bssid);
 
-               memset(sdata->u.mgd.bssid, 0, ETH_ALEN);
+               eth_zero_addr(sdata->u.mgd.bssid);
                ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BSSID);
                sdata->u.mgd.flags = 0;
                mutex_lock(&sdata->local->mtx);
@@ -2982,10 +2996,14 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
 
        rate_control_rate_init(sta);
 
-       if (ifmgd->flags & IEEE80211_STA_MFP_ENABLED)
+       if (ifmgd->flags & IEEE80211_STA_MFP_ENABLED) {
                set_sta_flag(sta, WLAN_STA_MFP);
+               sta->sta.mfp = true;
+       } else {
+               sta->sta.mfp = false;
+       }
 
-       sta->sta.wme = elems.wmm_param;
+       sta->sta.wme = elems.wmm_param && local->hw.queues >= IEEE80211_NUM_ACS;
 
        err = sta_info_move_state(sta, IEEE80211_STA_ASSOC);
        if (!err && !(ifmgd->flags & IEEE80211_STA_CONTROL_PORT))
@@ -3055,6 +3073,10 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
        u8 *pos;
        bool reassoc;
        struct cfg80211_bss *bss;
+       struct ieee80211_event event = {
+               .type = MLME_EVENT,
+               .u.mlme.data = ASSOC_EVENT,
+       };
 
        sdata_assert_lock(sdata);
 
@@ -3106,6 +3128,9 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
                sdata_info(sdata, "%pM denied association (code=%d)\n",
                           mgmt->sa, status_code);
                ieee80211_destroy_assoc_data(sdata, false);
+               event.u.mlme.status = MLME_DENIED;
+               event.u.mlme.reason = status_code;
+               drv_event_callback(sdata->local, sdata, &event);
        } else {
                if (!ieee80211_assoc_success(sdata, bss, mgmt, len)) {
                        /* oops -- internal error -- send timeout for now */
@@ -3113,6 +3138,8 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
                        cfg80211_assoc_timeout(sdata->dev, bss);
                        return;
                }
+               event.u.mlme.status = MLME_SUCCESS;
+               drv_event_callback(sdata->local, sdata, &event);
                sdata_info(sdata, "associated\n");
 
                /*
@@ -3315,6 +3342,9 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
            ifmgd->count_beacon_signal >= IEEE80211_SIGNAL_AVE_MIN_COUNT) {
                int sig = ifmgd->ave_beacon_signal;
                int last_sig = ifmgd->last_ave_beacon_signal;
+               struct ieee80211_event event = {
+                       .type = RSSI_EVENT,
+               };
 
                /*
                 * if signal crosses either of the boundaries, invoke callback
@@ -3323,12 +3353,14 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
                if (sig > ifmgd->rssi_max_thold &&
                    (last_sig <= ifmgd->rssi_min_thold || last_sig == 0)) {
                        ifmgd->last_ave_beacon_signal = sig;
-                       drv_rssi_callback(local, sdata, RSSI_EVENT_HIGH);
+                       event.u.rssi.data = RSSI_EVENT_HIGH;
+                       drv_event_callback(local, sdata, &event);
                } else if (sig < ifmgd->rssi_min_thold &&
                           (last_sig >= ifmgd->rssi_max_thold ||
                           last_sig == 0)) {
                        ifmgd->last_ave_beacon_signal = sig;
-                       drv_rssi_callback(local, sdata, RSSI_EVENT_LOW);
+                       event.u.rssi.data = RSSI_EVENT_LOW;
+                       drv_event_callback(local, sdata, &event);
                }
        }
 
@@ -3433,6 +3465,26 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
        if (ifmgd->csa_waiting_bcn)
                ieee80211_chswitch_post_beacon(sdata);
 
+       /*
+        * Update beacon timing and dtim count on every beacon appearance. This
+        * will allow the driver to use the most updated values. Do it before
+        * comparing this one with last received beacon.
+        * IMPORTANT: These parameters would possibly be out of sync by the time
+        * the driver will use them. The synchronized view is currently
+        * guaranteed only in certain callbacks.
+        */
+       if (local->hw.flags & IEEE80211_HW_TIMING_BEACON_ONLY) {
+               sdata->vif.bss_conf.sync_tsf =
+                       le64_to_cpu(mgmt->u.beacon.timestamp);
+               sdata->vif.bss_conf.sync_device_ts =
+                       rx_status->device_timestamp;
+               if (elems.tim)
+                       sdata->vif.bss_conf.sync_dtim_count =
+                               elems.tim->dtim_count;
+               else
+                       sdata->vif.bss_conf.sync_dtim_count = 0;
+       }
+
        if (ncrc == ifmgd->beacon_crc && ifmgd->beacon_crc_valid)
                return;
        ifmgd->beacon_crc = ncrc;
@@ -3460,18 +3512,6 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
                else
                        bss_conf->dtim_period = 1;
 
-               if (local->hw.flags & IEEE80211_HW_TIMING_BEACON_ONLY) {
-                       sdata->vif.bss_conf.sync_tsf =
-                               le64_to_cpu(mgmt->u.beacon.timestamp);
-                       sdata->vif.bss_conf.sync_device_ts =
-                               rx_status->device_timestamp;
-                       if (elems.tim)
-                               sdata->vif.bss_conf.sync_dtim_count =
-                                       elems.tim->dtim_count;
-                       else
-                               sdata->vif.bss_conf.sync_dtim_count = 0;
-               }
-
                changed |= BSS_CHANGED_BEACON_INFO;
                ifmgd->have_beacon = true;
 
@@ -3502,8 +3542,9 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
                ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH,
                                       WLAN_REASON_DEAUTH_LEAVING,
                                       true, deauth_buf);
-               cfg80211_tx_mlme_mgmt(sdata->dev, deauth_buf,
-                                     sizeof(deauth_buf));
+               ieee80211_report_disconnect(sdata, deauth_buf,
+                                           sizeof(deauth_buf), true,
+                                           WLAN_REASON_DEAUTH_LEAVING);
                return;
        }
 
@@ -3621,8 +3662,8 @@ static void ieee80211_sta_connection_lost(struct ieee80211_sub_if_data *sdata,
        ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH, reason,
                               tx, frame_buf);
 
-       cfg80211_tx_mlme_mgmt(sdata->dev, frame_buf,
-                             IEEE80211_DEAUTH_FRAME_LEN);
+       ieee80211_report_disconnect(sdata, frame_buf, sizeof(frame_buf), true,
+                                   reason);
 }
 
 static int ieee80211_probe_auth(struct ieee80211_sub_if_data *sdata)
@@ -3816,12 +3857,18 @@ void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata)
                        ieee80211_destroy_auth_data(sdata, false);
                } else if (ieee80211_probe_auth(sdata)) {
                        u8 bssid[ETH_ALEN];
+                       struct ieee80211_event event = {
+                               .type = MLME_EVENT,
+                               .u.mlme.data = AUTH_EVENT,
+                               .u.mlme.status = MLME_TIMEOUT,
+                       };
 
                        memcpy(bssid, ifmgd->auth_data->bss->bssid, ETH_ALEN);
 
                        ieee80211_destroy_auth_data(sdata, false);
 
                        cfg80211_auth_timeout(sdata->dev, bssid);
+                       drv_event_callback(sdata->local, sdata, &event);
                }
        } else if (ifmgd->auth_data && ifmgd->auth_data->timeout_started)
                run_again(sdata, ifmgd->auth_data->timeout);
@@ -3831,9 +3878,15 @@ void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata)
                if ((ifmgd->assoc_data->need_beacon && !ifmgd->have_beacon) ||
                    ieee80211_do_assoc(sdata)) {
                        struct cfg80211_bss *bss = ifmgd->assoc_data->bss;
+                       struct ieee80211_event event = {
+                               .type = MLME_EVENT,
+                               .u.mlme.data = ASSOC_EVENT,
+                               .u.mlme.status = MLME_TIMEOUT,
+                       };
 
                        ieee80211_destroy_assoc_data(sdata, false);
                        cfg80211_assoc_timeout(sdata->dev, bss);
+                       drv_event_callback(sdata->local, sdata, &event);
                }
        } else if (ifmgd->assoc_data && ifmgd->assoc_data->timeout_started)
                run_again(sdata, ifmgd->assoc_data->timeout);
@@ -3905,12 +3958,8 @@ static void ieee80211_sta_bcn_mon_timer(unsigned long data)
 {
        struct ieee80211_sub_if_data *sdata =
                (struct ieee80211_sub_if_data *) data;
-       struct ieee80211_local *local = sdata->local;
        struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
 
-       if (local->quiescing)
-               return;
-
        if (sdata->vif.csa_active && !ifmgd->csa_waiting_bcn)
                return;
 
@@ -3926,9 +3975,6 @@ static void ieee80211_sta_conn_mon_timer(unsigned long data)
        struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
        struct ieee80211_local *local = sdata->local;
 
-       if (local->quiescing)
-               return;
-
        if (sdata->vif.csa_active && !ifmgd->csa_waiting_bcn)
                return;
 
@@ -3991,6 +4037,34 @@ void ieee80211_mgd_quiesce(struct ieee80211_sub_if_data *sdata)
                                      IEEE80211_DEAUTH_FRAME_LEN);
        }
 
+       /* This is a bit of a hack - we should find a better and more generic
+        * solution to this. Normally when suspending, cfg80211 will in fact
+        * deauthenticate. However, it doesn't (and cannot) stop an ongoing
+        * auth (not so important) or assoc (this is the problem) process.
+        *
+        * As a consequence, it can happen that we are in the process of both
+        * associating and suspending, and receive an association response
+        * after cfg80211 has checked if it needs to disconnect, but before
+        * we actually set the flag to drop incoming frames. This will then
+        * cause the workqueue flush to process the association response in
+        * the suspend, resulting in a successful association just before it
+        * tries to remove the interface from the driver, which now though
+        * has a channel context assigned ... this results in issues.
+        *
+        * To work around this (for now) simply deauth here again if we're
+        * now connected.
+        */
+       if (ifmgd->associated && !sdata->local->wowlan) {
+               u8 bssid[ETH_ALEN];
+               struct cfg80211_deauth_request req = {
+                       .reason_code = WLAN_REASON_DEAUTH_LEAVING,
+                       .bssid = bssid,
+               };
+
+               memcpy(bssid, ifmgd->associated->bssid, ETH_ALEN);
+               ieee80211_mgd_deauth(sdata, &req);
+       }
+
        sdata_unlock(sdata);
 }
 
@@ -4379,6 +4453,10 @@ static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata,
        } else
                WARN_ON_ONCE(!ether_addr_equal(ifmgd->bssid, cbss->bssid));
 
+       /* Cancel scan to ensure that nothing interferes with connection */
+       if (local->scanning)
+               ieee80211_scan_cancel(local);
+
        return 0;
 }
 
@@ -4467,8 +4545,9 @@ int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata,
                                       WLAN_REASON_UNSPECIFIED,
                                       false, frame_buf);
 
-               cfg80211_tx_mlme_mgmt(sdata->dev, frame_buf,
-                                     sizeof(frame_buf));
+               ieee80211_report_disconnect(sdata, frame_buf,
+                                           sizeof(frame_buf), true,
+                                           WLAN_REASON_UNSPECIFIED);
        }
 
        sdata_info(sdata, "authenticate with %pM\n", req->bss->bssid);
@@ -4488,7 +4567,7 @@ int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata,
        return 0;
 
  err_clear:
-       memset(ifmgd->bssid, 0, ETH_ALEN);
+       eth_zero_addr(ifmgd->bssid);
        ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BSSID);
        ifmgd->auth_data = NULL;
  err_free:
@@ -4568,8 +4647,9 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
                                       WLAN_REASON_UNSPECIFIED,
                                       false, frame_buf);
 
-               cfg80211_tx_mlme_mgmt(sdata->dev, frame_buf,
-                                     sizeof(frame_buf));
+               ieee80211_report_disconnect(sdata, frame_buf,
+                                           sizeof(frame_buf), true,
+                                           WLAN_REASON_UNSPECIFIED);
        }
 
        if (ifmgd->auth_data && !ifmgd->auth_data->done) {
@@ -4831,7 +4911,7 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
 
        return 0;
  err_clear:
-       memset(ifmgd->bssid, 0, ETH_ALEN);
+       eth_zero_addr(ifmgd->bssid);
        ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BSSID);
        ifmgd->assoc_data = NULL;
  err_free:
@@ -4859,8 +4939,9 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata,
                                               req->reason_code, tx,
                                               frame_buf);
                ieee80211_destroy_auth_data(sdata, false);
-               cfg80211_tx_mlme_mgmt(sdata->dev, frame_buf,
-                                     IEEE80211_DEAUTH_FRAME_LEN);
+               ieee80211_report_disconnect(sdata, frame_buf,
+                                           sizeof(frame_buf), true,
+                                           req->reason_code);
 
                return 0;
        }
@@ -4874,8 +4955,9 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata,
 
                ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH,
                                       req->reason_code, tx, frame_buf);
-               cfg80211_tx_mlme_mgmt(sdata->dev, frame_buf,
-                                     IEEE80211_DEAUTH_FRAME_LEN);
+               ieee80211_report_disconnect(sdata, frame_buf,
+                                           sizeof(frame_buf), true,
+                                           req->reason_code);
                return 0;
        }
 
@@ -4907,8 +4989,8 @@ int ieee80211_mgd_disassoc(struct ieee80211_sub_if_data *sdata,
                               req->reason_code, !req->local_state_change,
                               frame_buf);
 
-       cfg80211_tx_mlme_mgmt(sdata->dev, frame_buf,
-                             IEEE80211_DEAUTH_FRAME_LEN);
+       ieee80211_report_disconnect(sdata, frame_buf, sizeof(frame_buf), true,
+                                   req->reason_code);
 
        return 0;
 }
index ca405b6b686da37a0e966f09e64b972a2d8ec7e2..ac6ad6238e3ad73421996bf375523df1eda164aa 100644 (file)
@@ -59,9 +59,26 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
        cancel_work_sync(&local->dynamic_ps_enable_work);
        del_timer_sync(&local->dynamic_ps_timer);
 
-       local->wowlan = wowlan && local->open_count;
+       local->wowlan = wowlan;
        if (local->wowlan) {
-               int err = drv_suspend(local, wowlan);
+               int err;
+
+               /* Drivers don't expect to suspend while some operations like
+                * authenticating or associating are in progress. It doesn't
+                * make sense anyway to accept that, since the authentication
+                * or association would never finish since the driver can't do
+                * that on its own.
+                * Thus, clean up in-progress auth/assoc first.
+                */
+               list_for_each_entry(sdata, &local->interfaces, list) {
+                       if (!ieee80211_sdata_running(sdata))
+                               continue;
+                       if (sdata->vif.type != NL80211_IFTYPE_STATION)
+                               continue;
+                       ieee80211_mgd_quiesce(sdata);
+               }
+
+               err = drv_suspend(local, wowlan);
                if (err < 0) {
                        local->quiescing = false;
                        local->wowlan = false;
@@ -80,6 +97,13 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
                        return err;
                } else if (err > 0) {
                        WARN_ON(err != 1);
+                       /* cfg80211 will call back into mac80211 to disconnect
+                        * all interfaces, allow that to proceed properly
+                        */
+                       ieee80211_wake_queues_by_reason(hw,
+                                       IEEE80211_MAX_QUEUE_MAP,
+                                       IEEE80211_QUEUE_STOP_REASON_SUSPEND,
+                                       false);
                        return err;
                } else {
                        goto suspend;
index 80452cfd2dc59f0e9891d30ea2549fc9e958babd..60698fc7042e5d0568f4d65e8d5a10bb05de9e21 100644 (file)
 #include "rc80211_minstrel.h"
 #include "rc80211_minstrel_ht.h"
 
+#define AVG_AMPDU_SIZE 16
 #define AVG_PKT_SIZE   1200
 
 /* Number of bits for an average sized packet */
-#define MCS_NBITS (AVG_PKT_SIZE << 3)
+#define MCS_NBITS ((AVG_PKT_SIZE * AVG_AMPDU_SIZE) << 3)
 
 /* Number of symbols for a packet with (bps) bits per symbol */
 #define MCS_NSYMS(bps) DIV_ROUND_UP(MCS_NBITS, (bps))
@@ -33,7 +34,8 @@
        )
 
 /* Transmit duration for the raw data part of an average sized packet */
-#define MCS_DURATION(streams, sgi, bps) MCS_SYMBOL_TIME(sgi, MCS_NSYMS((streams) * (bps)))
+#define MCS_DURATION(streams, sgi, bps) \
+       (MCS_SYMBOL_TIME(sgi, MCS_NSYMS((streams) * (bps))) / AVG_AMPDU_SIZE)
 
 #define BW_20                  0
 #define BW_40                  1
index 1eb730bf875272831d44ac62c6e5a18e0a1de977..2cd02278d4d4076c580f9253460ce98f677edfe1 100644 (file)
@@ -1913,8 +1913,7 @@ static int ieee80211_drop_unencrypted(struct ieee80211_rx_data *rx, __le16 fc)
        /* Drop unencrypted frames if key is set. */
        if (unlikely(!ieee80211_has_protected(fc) &&
                     !ieee80211_is_nullfunc(fc) &&
-                    ieee80211_is_data(fc) &&
-                    (rx->key || rx->sdata->drop_unencrypted)))
+                    ieee80211_is_data(fc) && rx->key))
                return -EACCES;
 
        return 0;
@@ -2044,6 +2043,9 @@ ieee80211_deliver_skb(struct ieee80211_rx_data *rx)
        struct sta_info *dsta;
        struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
 
+       dev->stats.rx_packets++;
+       dev->stats.rx_bytes += rx->skb->len;
+
        skb = rx->skb;
        xmit_skb = NULL;
 
@@ -2174,8 +2176,6 @@ ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx)
                        dev_kfree_skb(rx->skb);
                        continue;
                }
-               dev->stats.rx_packets++;
-               dev->stats.rx_bytes += rx->skb->len;
 
                ieee80211_deliver_skb(rx);
        }
@@ -2401,9 +2401,6 @@ ieee80211_rx_h_data(struct ieee80211_rx_data *rx)
 
        rx->skb->dev = dev;
 
-       dev->stats.rx_packets++;
-       dev->stats.rx_bytes += rx->skb->len;
-
        if (local->ps_sdata && local->hw.conf.dynamic_ps_timeout > 0 &&
            !is_multicast_ether_addr(
                    ((struct ethhdr *)rx->skb->data)->h_dest) &&
@@ -3129,6 +3126,12 @@ static void ieee80211_rx_handlers(struct ieee80211_rx_data *rx,
                        goto rxh_next;  \
        } while (0);
 
+       /* Lock here to avoid hitting all of the data used in the RX
+        * path (e.g. key data, station data, ...) concurrently when
+        * a frame is released from the reorder buffer due to timeout
+        * from the timer, potentially concurrently with RX from the
+        * driver.
+        */
        spin_lock_bh(&rx->local->rx_path_lock);
 
        while ((skb = __skb_dequeue(frames))) {
index 05f0d711b6d8666701e91262141fb67711d9dad7..7bb6a9383f58ec01b3f9c68964aac3f44f3e5beb 100644 (file)
@@ -928,11 +928,12 @@ int ieee80211_request_scan(struct ieee80211_sub_if_data *sdata,
 
 int ieee80211_request_ibss_scan(struct ieee80211_sub_if_data *sdata,
                                const u8 *ssid, u8 ssid_len,
-                               struct ieee80211_channel *chan,
+                               struct ieee80211_channel **channels,
+                               unsigned int n_channels,
                                enum nl80211_bss_scan_width scan_width)
 {
        struct ieee80211_local *local = sdata->local;
-       int ret = -EBUSY;
+       int ret = -EBUSY, i, n_ch = 0;
        enum ieee80211_band band;
 
        mutex_lock(&local->mtx);
@@ -942,9 +943,8 @@ int ieee80211_request_ibss_scan(struct ieee80211_sub_if_data *sdata,
                goto unlock;
 
        /* fill internal scan request */
-       if (!chan) {
-               int i, max_n;
-               int n_ch = 0;
+       if (!channels) {
+               int max_n;
 
                for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
                        if (!local->hw.wiphy->bands[band])
@@ -969,12 +969,19 @@ int ieee80211_request_ibss_scan(struct ieee80211_sub_if_data *sdata,
 
                local->int_scan_req->n_channels = n_ch;
        } else {
-               if (WARN_ON_ONCE(chan->flags & (IEEE80211_CHAN_NO_IR |
-                                               IEEE80211_CHAN_DISABLED)))
+               for (i = 0; i < n_channels; i++) {
+                       if (channels[i]->flags & (IEEE80211_CHAN_NO_IR |
+                                                 IEEE80211_CHAN_DISABLED))
+                               continue;
+
+                       local->int_scan_req->channels[n_ch] = channels[i];
+                       n_ch++;
+               }
+
+               if (WARN_ON_ONCE(n_ch == 0))
                        goto unlock;
 
-               local->int_scan_req->channels[0] = chan;
-               local->int_scan_req->n_channels = 1;
+               local->int_scan_req->n_channels = n_ch;
        }
 
        local->int_scan_req->ssids = &local->scan_ssid;
index 00ca8dcc2bcf2d924fb24ed0d4ee674295086aff..aacaa1a85e636c4b31df4540c680508be33cdb42 100644 (file)
@@ -229,17 +229,9 @@ struct sta_info *sta_info_get_by_idx(struct ieee80211_sub_if_data *sdata,
  */
 void sta_info_free(struct ieee80211_local *local, struct sta_info *sta)
 {
-       int i;
-
        if (sta->rate_ctrl)
                rate_control_free_sta(sta);
 
-       if (sta->tx_lat) {
-               for (i = 0; i < IEEE80211_NUM_TIDS; i++)
-                       kfree(sta->tx_lat[i].bins);
-               kfree(sta->tx_lat);
-       }
-
        sta_dbg(sta->sdata, "Destroyed STA %pM\n", sta->sta.addr);
 
        kfree(rcu_dereference_raw(sta->sta.rates));
@@ -295,42 +287,12 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
        struct ieee80211_local *local = sdata->local;
        struct sta_info *sta;
        struct timespec uptime;
-       struct ieee80211_tx_latency_bin_ranges *tx_latency;
        int i;
 
        sta = kzalloc(sizeof(*sta) + local->hw.sta_data_size, gfp);
        if (!sta)
                return NULL;
 
-       rcu_read_lock();
-       tx_latency = rcu_dereference(local->tx_latency);
-       /* init stations Tx latency statistics && TID bins */
-       if (tx_latency) {
-               sta->tx_lat = kzalloc(IEEE80211_NUM_TIDS *
-                                     sizeof(struct ieee80211_tx_latency_stat),
-                                     GFP_ATOMIC);
-               if (!sta->tx_lat) {
-                       rcu_read_unlock();
-                       goto free;
-               }
-
-               if (tx_latency->n_ranges) {
-                       for (i = 0; i < IEEE80211_NUM_TIDS; i++) {
-                               /* size of bins is size of the ranges +1 */
-                               sta->tx_lat[i].bin_count =
-                                       tx_latency->n_ranges + 1;
-                               sta->tx_lat[i].bins =
-                                       kcalloc(sta->tx_lat[i].bin_count,
-                                               sizeof(u32), GFP_ATOMIC);
-                               if (!sta->tx_lat[i].bins) {
-                                       rcu_read_unlock();
-                                       goto free;
-                               }
-                       }
-               }
-       }
-       rcu_read_unlock();
-
        spin_lock_init(&sta->lock);
        spin_lock_init(&sta->ps_lock);
        INIT_WORK(&sta->drv_deliver_wk, sta_deliver_ps_frames);
@@ -359,8 +321,10 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
        for (i = 0; i < ARRAY_SIZE(sta->chain_signal_avg); i++)
                ewma_init(&sta->chain_signal_avg[i], 1024, 8);
 
-       if (sta_prepare_rate_control(local, sta, gfp))
-               goto free;
+       if (sta_prepare_rate_control(local, sta, gfp)) {
+               kfree(sta);
+               return NULL;
+       }
 
        for (i = 0; i < IEEE80211_NUM_TIDS; i++) {
                /*
@@ -405,16 +369,8 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
        }
 
        sta_dbg(sdata, "Allocated STA %pM\n", sta->sta.addr);
-       return sta;
 
-free:
-       if (sta->tx_lat) {
-               for (i = 0; i < IEEE80211_NUM_TIDS; i++)
-                       kfree(sta->tx_lat[i].bins);
-               kfree(sta->tx_lat);
-       }
-       kfree(sta);
-       return NULL;
+       return sta;
 }
 
 static int sta_info_insert_check(struct sta_info *sta)
@@ -1275,7 +1231,7 @@ static void ieee80211_send_null_response(struct ieee80211_sub_if_data *sdata,
        }
 
        info->band = chanctx_conf->def.chan->band;
-       ieee80211_xmit(sdata, skb);
+       ieee80211_xmit(sdata, sta, skb);
        rcu_read_unlock();
 }
 
index fb0fc1302a588480cae6649e2e671ffa719de36b..7e2fa4018d41331cc3845639eb7dfc497c13bc27 100644 (file)
@@ -236,25 +236,6 @@ struct sta_ampdu_mlme {
        u8 dialog_token_allocator;
 };
 
-/*
- * struct ieee80211_tx_latency_stat - Tx latency statistics
- *
- * Measures TX latency and jitter for a station per TID.
- *
- * @max: worst case latency
- * @sum: sum of all latencies
- * @counter: amount of Tx frames sent from interface
- * @bins: each bin counts how many frames transmitted within a certain
- * latency range. when disabled it is NULL.
- * @bin_count: amount of bins.
- */
-struct ieee80211_tx_latency_stat {
-       u32 max;
-       u32 sum;
-       u32 counter;
-       u32 *bins;
-       u32 bin_count;
-};
 
 /* Value to indicate no TID reservation */
 #define IEEE80211_TID_UNRESERVED       0xff
@@ -316,7 +297,6 @@ struct ieee80211_tx_latency_stat {
  * @tid_seq: per-TID sequence numbers for sending to this STA
  * @ampdu_mlme: A-MPDU state machine state
  * @timer_to_tid: identity mapping to ID timers
- * @tx_lat: Tx latency statistics
  * @llid: Local link ID
  * @plid: Peer link ID
  * @reason: Cancel reason on PLINK_HOLDING state
@@ -437,8 +417,6 @@ struct sta_info {
        struct sta_ampdu_mlme ampdu_mlme;
        u8 timer_to_tid[IEEE80211_NUM_TIDS];
 
-       struct ieee80211_tx_latency_stat *tx_lat;
-
 #ifdef CONFIG_MAC80211_MESH
        /*
         * Mesh peer link attributes
index e679b7c9b1608747d827d9f781ce06be2810b734..2c51742428d59ebda73b2819b8bfc4391128d36c 100644 (file)
@@ -12,7 +12,6 @@
 
 #include <linux/export.h>
 #include <linux/etherdevice.h>
-#include <linux/time.h>
 #include <net/mac80211.h>
 #include <asm/unaligned.h>
 #include "ieee80211_i.h"
@@ -514,73 +513,6 @@ static void ieee80211_report_used_skb(struct ieee80211_local *local,
        }
 }
 
-/*
- * Measure Tx frame completion and removal time for Tx latency statistics
- * calculation. A single Tx frame latency should be measured from when it
- * is entering the Kernel until we receive Tx complete confirmation indication
- * and remove the skb.
- */
-static void ieee80211_tx_latency_end_msrmnt(struct ieee80211_local *local,
-                                           struct sk_buff *skb,
-                                           struct sta_info *sta,
-                                           struct ieee80211_hdr *hdr)
-{
-       u32 msrmnt;
-       u16 tid;
-       u8 *qc;
-       int i, bin_range_count;
-       u32 *bin_ranges;
-       __le16 fc;
-       struct ieee80211_tx_latency_stat *tx_lat;
-       struct ieee80211_tx_latency_bin_ranges *tx_latency;
-       ktime_t skb_arv = skb->tstamp;
-
-       tx_latency = rcu_dereference(local->tx_latency);
-
-       /* assert Tx latency stats are enabled & frame arrived when enabled */
-       if (!tx_latency || !ktime_to_ns(skb_arv))
-               return;
-
-       fc = hdr->frame_control;
-
-       if (!ieee80211_is_data(fc)) /* make sure it is a data frame */
-               return;
-
-       /* get frame tid */
-       if (ieee80211_is_data_qos(hdr->frame_control)) {
-               qc = ieee80211_get_qos_ctl(hdr);
-               tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
-       } else {
-               tid = 0;
-       }
-
-       tx_lat = &sta->tx_lat[tid];
-
-       /* Calculate the latency */
-       msrmnt = ktime_to_ms(ktime_sub(ktime_get(), skb_arv));
-
-       if (tx_lat->max < msrmnt) /* update stats */
-               tx_lat->max = msrmnt;
-       tx_lat->counter++;
-       tx_lat->sum += msrmnt;
-
-       if (!tx_lat->bins) /* bins not activated */
-               return;
-
-       /* count how many Tx frames transmitted with the appropriate latency */
-       bin_range_count = tx_latency->n_ranges;
-       bin_ranges = tx_latency->ranges;
-
-       for (i = 0; i < bin_range_count; i++) {
-               if (msrmnt <= bin_ranges[i]) {
-                       tx_lat->bins[i]++;
-                       break;
-               }
-       }
-       if (i == bin_range_count) /* msrmnt is bigger than the biggest range */
-               tx_lat->bins[i]++;
-}
-
 /*
  * Use a static threshold for now, best value to be determined
  * by testing ...
@@ -853,12 +785,6 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
 
                if (acked)
                        sta->last_ack_signal = info->status.ack_signal;
-
-               /*
-                * Measure frame removal for tx latency
-                * statistics calculation
-                */
-               ieee80211_tx_latency_end_msrmnt(local, skb, sta, hdr);
        }
 
        rcu_read_unlock();
index c9f9752217ac8230056e90e28a9b0b02883a87d5..fff0d864adfa601da2af75f226c6d5c00affb335 100644 (file)
@@ -136,6 +136,24 @@ ieee80211_tdls_add_supp_channels(struct ieee80211_sub_if_data *sdata,
        *pos = 2 * subband_cnt;
 }
 
+static void ieee80211_tdls_add_oper_classes(struct ieee80211_sub_if_data *sdata,
+                                           struct sk_buff *skb)
+{
+       u8 *pos;
+       u8 op_class;
+
+       if (!ieee80211_chandef_to_operating_class(&sdata->vif.bss_conf.chandef,
+                                                 &op_class))
+               return;
+
+       pos = skb_put(skb, 4);
+       *pos++ = WLAN_EID_SUPPORTED_REGULATORY_CLASSES;
+       *pos++ = 2; /* len */
+
+       *pos++ = op_class;
+       *pos++ = op_class; /* give current operating class as alternate too */
+}
+
 static void ieee80211_tdls_add_bss_coex_ie(struct sk_buff *skb)
 {
        u8 *pos = (void *)skb_put(skb, 3);
@@ -193,6 +211,17 @@ static void ieee80211_tdls_add_link_ie(struct ieee80211_sub_if_data *sdata,
        memcpy(lnkid->resp_sta, rsp_addr, ETH_ALEN);
 }
 
+static void
+ieee80211_tdls_add_aid(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb)
+{
+       struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
+       u8 *pos = (void *)skb_put(skb, 4);
+
+       *pos++ = WLAN_EID_AID;
+       *pos++ = 2; /* len */
+       put_unaligned_le16(ifmgd->aid, pos);
+}
+
 /* translate numbering in the WMM parameter IE to the mac80211 notation */
 static enum ieee80211_ac_numbers ieee80211_ac_from_wmm(int ac)
 {
@@ -271,21 +300,11 @@ ieee80211_tdls_add_setup_start_ies(struct ieee80211_sub_if_data *sdata,
        struct ieee80211_local *local = sdata->local;
        struct ieee80211_supported_band *sband;
        struct ieee80211_sta_ht_cap ht_cap;
+       struct ieee80211_sta_vht_cap vht_cap;
        struct sta_info *sta = NULL;
        size_t offset = 0, noffset;
        u8 *pos;
 
-       rcu_read_lock();
-
-       /* we should have the peer STA if we're already responding */
-       if (action_code == WLAN_TDLS_SETUP_RESPONSE) {
-               sta = sta_info_get(sdata, peer);
-               if (WARN_ON_ONCE(!sta)) {
-                       rcu_read_unlock();
-                       return;
-               }
-       }
-
        ieee80211_add_srates_ie(sdata, skb, false, band);
        ieee80211_add_ext_srates_ie(sdata, skb, false, band);
        ieee80211_tdls_add_supp_channels(sdata, skb);
@@ -338,6 +357,19 @@ ieee80211_tdls_add_setup_start_ies(struct ieee80211_sub_if_data *sdata,
                offset = noffset;
        }
 
+       rcu_read_lock();
+
+       /* we should have the peer STA if we're already responding */
+       if (action_code == WLAN_TDLS_SETUP_RESPONSE) {
+               sta = sta_info_get(sdata, peer);
+               if (WARN_ON_ONCE(!sta)) {
+                       rcu_read_unlock();
+                       return;
+               }
+       }
+
+       ieee80211_tdls_add_oper_classes(sdata, skb);
+
        /*
         * with TDLS we can switch channels, and HT-caps are not necessarily
         * the same on all bands. The specification limits the setup to a
@@ -346,7 +378,9 @@ ieee80211_tdls_add_setup_start_ies(struct ieee80211_sub_if_data *sdata,
        sband = local->hw.wiphy->bands[band];
        memcpy(&ht_cap, &sband->ht_cap, sizeof(ht_cap));
 
-       if (action_code == WLAN_TDLS_SETUP_REQUEST && ht_cap.ht_supported) {
+       if ((action_code == WLAN_TDLS_SETUP_REQUEST ||
+            action_code == WLAN_PUB_ACTION_TDLS_DISCOVER_RES) &&
+           ht_cap.ht_supported) {
                ieee80211_apply_htcap_overrides(sdata, &ht_cap);
 
                /* disable SMPS in TDLS initiator */
@@ -368,12 +402,63 @@ ieee80211_tdls_add_setup_start_ies(struct ieee80211_sub_if_data *sdata,
                ieee80211_ie_build_ht_cap(pos, &ht_cap, ht_cap.cap);
        }
 
-       rcu_read_unlock();
-
        if (ht_cap.ht_supported &&
            (ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40))
                ieee80211_tdls_add_bss_coex_ie(skb);
 
+       ieee80211_tdls_add_link_ie(sdata, skb, peer, initiator);
+
+       /* add any custom IEs that go before VHT capabilities */
+       if (extra_ies_len) {
+               static const u8 before_vht_cap[] = {
+                       WLAN_EID_SUPP_RATES,
+                       WLAN_EID_COUNTRY,
+                       WLAN_EID_EXT_SUPP_RATES,
+                       WLAN_EID_SUPPORTED_CHANNELS,
+                       WLAN_EID_RSN,
+                       WLAN_EID_EXT_CAPABILITY,
+                       WLAN_EID_QOS_CAPA,
+                       WLAN_EID_FAST_BSS_TRANSITION,
+                       WLAN_EID_TIMEOUT_INTERVAL,
+                       WLAN_EID_SUPPORTED_REGULATORY_CLASSES,
+                       WLAN_EID_MULTI_BAND,
+               };
+               noffset = ieee80211_ie_split(extra_ies, extra_ies_len,
+                                            before_vht_cap,
+                                            ARRAY_SIZE(before_vht_cap),
+                                            offset);
+               pos = skb_put(skb, noffset - offset);
+               memcpy(pos, extra_ies + offset, noffset - offset);
+               offset = noffset;
+       }
+
+       /* build the VHT-cap similarly to the HT-cap */
+       memcpy(&vht_cap, &sband->vht_cap, sizeof(vht_cap));
+       if ((action_code == WLAN_TDLS_SETUP_REQUEST ||
+            action_code == WLAN_PUB_ACTION_TDLS_DISCOVER_RES) &&
+           vht_cap.vht_supported) {
+               ieee80211_apply_vhtcap_overrides(sdata, &vht_cap);
+
+               /* the AID is present only when VHT is implemented */
+               if (action_code == WLAN_TDLS_SETUP_REQUEST)
+                       ieee80211_tdls_add_aid(sdata, skb);
+
+               pos = skb_put(skb, sizeof(struct ieee80211_vht_cap) + 2);
+               ieee80211_ie_build_vht_cap(pos, &vht_cap, vht_cap.cap);
+       } else if (action_code == WLAN_TDLS_SETUP_RESPONSE &&
+                  vht_cap.vht_supported && sta->sta.vht_cap.vht_supported) {
+               /* the peer caps are already intersected with our own */
+               memcpy(&vht_cap, &sta->sta.vht_cap, sizeof(vht_cap));
+
+               /* the AID is present only when VHT is implemented */
+               ieee80211_tdls_add_aid(sdata, skb);
+
+               pos = skb_put(skb, sizeof(struct ieee80211_vht_cap) + 2);
+               ieee80211_ie_build_vht_cap(pos, &vht_cap, vht_cap.cap);
+       }
+
+       rcu_read_unlock();
+
        /* add any remaining IEs */
        if (extra_ies_len) {
                noffset = extra_ies_len;
@@ -381,7 +466,6 @@ ieee80211_tdls_add_setup_start_ies(struct ieee80211_sub_if_data *sdata,
                memcpy(pos, extra_ies + offset, noffset - offset);
        }
 
-       ieee80211_tdls_add_link_ie(sdata, skb, peer, initiator);
 }
 
 static void
@@ -394,6 +478,7 @@ ieee80211_tdls_add_setup_cfm_ies(struct ieee80211_sub_if_data *sdata,
        struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
        size_t offset = 0, noffset;
        struct sta_info *sta, *ap_sta;
+       enum ieee80211_band band = ieee80211_get_sdata_band(sdata);
        u8 *pos;
 
        rcu_read_lock();
@@ -453,6 +538,21 @@ ieee80211_tdls_add_setup_cfm_ies(struct ieee80211_sub_if_data *sdata,
                }
        }
 
+       ieee80211_tdls_add_link_ie(sdata, skb, peer, initiator);
+
+       /* only include VHT-operation if not on the 2.4GHz band */
+       if (band != IEEE80211_BAND_2GHZ && !ap_sta->sta.vht_cap.vht_supported &&
+           sta->sta.vht_cap.vht_supported) {
+               struct ieee80211_chanctx_conf *chanctx_conf =
+                               rcu_dereference(sdata->vif.chanctx_conf);
+               if (!WARN_ON(!chanctx_conf)) {
+                       pos = skb_put(skb, 2 +
+                                     sizeof(struct ieee80211_vht_operation));
+                       ieee80211_ie_build_vht_oper(pos, &sta->sta.vht_cap,
+                                                   &chanctx_conf->def);
+               }
+       }
+
        rcu_read_unlock();
 
        /* add any remaining IEs */
@@ -461,8 +561,6 @@ ieee80211_tdls_add_setup_cfm_ies(struct ieee80211_sub_if_data *sdata,
                pos = skb_put(skb, noffset - offset);
                memcpy(pos, extra_ies + offset, noffset - offset);
        }
-
-       ieee80211_tdls_add_link_ie(sdata, skb, peer, initiator);
 }
 
 static void
@@ -708,8 +806,12 @@ ieee80211_tdls_build_mgmt_packet_data(struct ieee80211_sub_if_data *sdata,
                               26 + /* max(WMM-info, WMM-param) */
                               2 + max(sizeof(struct ieee80211_ht_cap),
                                       sizeof(struct ieee80211_ht_operation)) +
+                              2 + max(sizeof(struct ieee80211_vht_cap),
+                                      sizeof(struct ieee80211_vht_operation)) +
                               50 + /* supported channels */
                               3 + /* 40/20 BSS coex */
+                              4 + /* AID */
+                              4 + /* oper classes */
                               extra_ies_len +
                               sizeof(struct ieee80211_tdls_lnkie));
        if (!skb)
@@ -907,7 +1009,7 @@ ieee80211_tdls_mgmt_setup(struct wiphy *wiphy, struct net_device *dev,
        if (!is_zero_ether_addr(sdata->u.mgd.tdls_peer) &&
            !ether_addr_equal(sdata->u.mgd.tdls_peer, peer)) {
                ret = -EBUSY;
-               goto exit;
+               goto out_unlock;
        }
 
        /*
@@ -922,27 +1024,34 @@ ieee80211_tdls_mgmt_setup(struct wiphy *wiphy, struct net_device *dev,
                if (!sta_info_get(sdata, peer)) {
                        rcu_read_unlock();
                        ret = -ENOLINK;
-                       goto exit;
+                       goto out_unlock;
                }
                rcu_read_unlock();
        }
 
        ieee80211_flush_queues(local, sdata, false);
+       memcpy(sdata->u.mgd.tdls_peer, peer, ETH_ALEN);
+       mutex_unlock(&local->mtx);
 
+       /* we cannot take the mutex while preparing the setup packet */
        ret = ieee80211_tdls_prep_mgmt_packet(wiphy, dev, peer, action_code,
                                              dialog_token, status_code,
                                              peer_capability, initiator,
                                              extra_ies, extra_ies_len, 0,
                                              NULL);
-       if (ret < 0)
-               goto exit;
+       if (ret < 0) {
+               mutex_lock(&local->mtx);
+               eth_zero_addr(sdata->u.mgd.tdls_peer);
+               mutex_unlock(&local->mtx);
+               return ret;
+       }
 
-       memcpy(sdata->u.mgd.tdls_peer, peer, ETH_ALEN);
        ieee80211_queue_delayed_work(&sdata->local->hw,
                                     &sdata->u.mgd.tdls_peer_del_work,
                                     TDLS_PEER_SETUP_TIMEOUT);
+       return 0;
 
-exit:
+out_unlock:
        mutex_unlock(&local->mtx);
        return ret;
 }
index 263a9561eb2669e636cffd02fb0acc304fede568..e9e462b349e5f828841398ec869c4ec2bf56e6eb 100644 (file)
@@ -1256,28 +1256,28 @@ TRACE_EVENT(drv_set_rekey_data,
                  LOCAL_PR_ARG, VIF_PR_ARG)
 );
 
-TRACE_EVENT(drv_rssi_callback,
+TRACE_EVENT(drv_event_callback,
        TP_PROTO(struct ieee80211_local *local,
                 struct ieee80211_sub_if_data *sdata,
-                enum ieee80211_rssi_event rssi_event),
+                const struct ieee80211_event *_event),
 
-       TP_ARGS(local, sdata, rssi_event),
+       TP_ARGS(local, sdata, _event),
 
        TP_STRUCT__entry(
                LOCAL_ENTRY
                VIF_ENTRY
-               __field(u32, rssi_event)
+               __field(u32, type)
        ),
 
        TP_fast_assign(
                LOCAL_ASSIGN;
                VIF_ASSIGN;
-               __entry->rssi_event = rssi_event;
+               __entry->type = _event->type;
        ),
 
        TP_printk(
-               LOCAL_PR_FMT VIF_PR_FMT " rssi_event:%d",
-               LOCAL_PR_ARG, VIF_PR_ARG, __entry->rssi_event
+               LOCAL_PR_FMT VIF_PR_FMT " event:%d",
+               LOCAL_PR_ARG, VIF_PR_ARG, __entry->type
        )
 );
 
index 07bd8db00af84b820139c644da95eaf29e474b5f..9f7fb4eec37bde2af4de010e474561513709ff29 100644 (file)
@@ -20,7 +20,6 @@
 #include <linux/bitmap.h>
 #include <linux/rcupdate.h>
 #include <linux/export.h>
-#include <linux/time.h>
 #include <net/net_namespace.h>
 #include <net/ieee80211_radiotap.h>
 #include <net/cfg80211.h>
@@ -595,23 +594,8 @@ ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx)
        else if (!is_multicast_ether_addr(hdr->addr1) &&
                 (key = rcu_dereference(tx->sdata->default_unicast_key)))
                tx->key = key;
-       else if (info->flags & IEEE80211_TX_CTL_INJECTED)
-               tx->key = NULL;
-       else if (!tx->sdata->drop_unencrypted)
-               tx->key = NULL;
-       else if (tx->skb->protocol == tx->sdata->control_port_protocol)
-               tx->key = NULL;
-       else if (ieee80211_is_robust_mgmt_frame(tx->skb) &&
-                !(ieee80211_is_action(hdr->frame_control) &&
-                  tx->sta && test_sta_flag(tx->sta, WLAN_STA_MFP)))
-               tx->key = NULL;
-       else if (ieee80211_is_mgmt(hdr->frame_control) &&
-                !ieee80211_is_robust_mgmt_frame(tx->skb))
+       else
                tx->key = NULL;
-       else {
-               I802_DEBUG_INC(tx->local->tx_handlers_drop_unencrypted);
-               return TX_DROP;
-       }
 
        if (tx->key) {
                bool skip_hw = false;
@@ -1137,11 +1121,13 @@ static bool ieee80211_tx_prep_agg(struct ieee80211_tx_data *tx,
 
 /*
  * initialises @tx
+ * pass %NULL for the station if unknown, a valid pointer if known
+ * or an ERR_PTR() if the station is known not to exist
  */
 static ieee80211_tx_result
 ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata,
                     struct ieee80211_tx_data *tx,
-                    struct sk_buff *skb)
+                    struct sta_info *sta, struct sk_buff *skb)
 {
        struct ieee80211_local *local = sdata->local;
        struct ieee80211_hdr *hdr;
@@ -1164,17 +1150,22 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata,
 
        hdr = (struct ieee80211_hdr *) skb->data;
 
-       if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) {
-               tx->sta = rcu_dereference(sdata->u.vlan.sta);
-               if (!tx->sta && sdata->dev->ieee80211_ptr->use_4addr)
-                       return TX_DROP;
-       } else if (info->flags & (IEEE80211_TX_CTL_INJECTED |
-                                 IEEE80211_TX_INTFL_NL80211_FRAME_TX) ||
-                  tx->sdata->control_port_protocol == tx->skb->protocol) {
-               tx->sta = sta_info_get_bss(sdata, hdr->addr1);
+       if (likely(sta)) {
+               if (!IS_ERR(sta))
+                       tx->sta = sta;
+       } else {
+               if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) {
+                       tx->sta = rcu_dereference(sdata->u.vlan.sta);
+                       if (!tx->sta && sdata->wdev.use_4addr)
+                               return TX_DROP;
+               } else if (info->flags & (IEEE80211_TX_INTFL_NL80211_FRAME_TX |
+                                         IEEE80211_TX_CTL_INJECTED) ||
+                          tx->sdata->control_port_protocol == tx->skb->protocol) {
+                       tx->sta = sta_info_get_bss(sdata, hdr->addr1);
+               }
+               if (!tx->sta && !is_multicast_ether_addr(hdr->addr1))
+                       tx->sta = sta_info_get(sdata, hdr->addr1);
        }
-       if (!tx->sta)
-               tx->sta = sta_info_get(sdata, hdr->addr1);
 
        if (tx->sta && ieee80211_is_data_qos(hdr->frame_control) &&
            !ieee80211_is_qos_nullfunc(hdr->frame_control) &&
@@ -1422,8 +1413,9 @@ bool ieee80211_tx_prepare_skb(struct ieee80211_hw *hw,
        struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
        struct ieee80211_tx_data tx;
+       struct sk_buff *skb2;
 
-       if (ieee80211_tx_prepare(sdata, &tx, skb) == TX_DROP)
+       if (ieee80211_tx_prepare(sdata, &tx, NULL, skb) == TX_DROP)
                return false;
 
        info->band = band;
@@ -1440,6 +1432,14 @@ bool ieee80211_tx_prepare_skb(struct ieee80211_hw *hw,
                        *sta = NULL;
        }
 
+       /* this function isn't suitable for fragmented data frames */
+       skb2 = __skb_dequeue(&tx.skbs);
+       if (WARN_ON(skb2 != skb || !skb_queue_empty(&tx.skbs))) {
+               ieee80211_free_txskb(hw, skb2);
+               ieee80211_purge_tx_queue(hw, &tx.skbs);
+               return false;
+       }
+
        return true;
 }
 EXPORT_SYMBOL(ieee80211_tx_prepare_skb);
@@ -1448,7 +1448,8 @@ EXPORT_SYMBOL(ieee80211_tx_prepare_skb);
  * Returns false if the frame couldn't be transmitted but was queued instead.
  */
 static bool ieee80211_tx(struct ieee80211_sub_if_data *sdata,
-                        struct sk_buff *skb, bool txpending)
+                        struct sta_info *sta, struct sk_buff *skb,
+                        bool txpending)
 {
        struct ieee80211_local *local = sdata->local;
        struct ieee80211_tx_data tx;
@@ -1464,7 +1465,7 @@ static bool ieee80211_tx(struct ieee80211_sub_if_data *sdata,
 
        /* initialises tx */
        led_len = skb->len;
-       res_prepare = ieee80211_tx_prepare(sdata, &tx, skb);
+       res_prepare = ieee80211_tx_prepare(sdata, &tx, sta, skb);
 
        if (unlikely(res_prepare == TX_DROP)) {
                ieee80211_free_txskb(&local->hw, skb);
@@ -1520,7 +1521,8 @@ static int ieee80211_skb_resize(struct ieee80211_sub_if_data *sdata,
        return 0;
 }
 
-void ieee80211_xmit(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb)
+void ieee80211_xmit(struct ieee80211_sub_if_data *sdata,
+                   struct sta_info *sta, struct sk_buff *skb)
 {
        struct ieee80211_local *local = sdata->local;
        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
@@ -1555,7 +1557,7 @@ void ieee80211_xmit(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb)
        }
 
        ieee80211_set_qos_hdr(sdata, skb);
-       ieee80211_tx(sdata, skb, false);
+       ieee80211_tx(sdata, sta, skb, false);
 }
 
 static bool ieee80211_parse_tx_radiotap(struct sk_buff *skb)
@@ -1776,7 +1778,7 @@ netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb,
                goto fail_rcu;
 
        info->band = chandef->chan->band;
-       ieee80211_xmit(sdata, skb);
+       ieee80211_xmit(sdata, NULL, skb);
        rcu_read_unlock();
 
        return NETDEV_TX_OK;
@@ -1788,21 +1790,89 @@ fail:
        return NETDEV_TX_OK; /* meaning, we dealt with the skb */
 }
 
-/*
- * Measure Tx frame arrival time for Tx latency statistics calculation
- * A single Tx frame latency should be measured from when it is entering the
- * Kernel until we receive Tx complete confirmation indication and the skb is
- * freed.
- */
-static void ieee80211_tx_latency_start_msrmnt(struct ieee80211_local *local,
-                                             struct sk_buff *skb)
+static inline bool ieee80211_is_tdls_setup(struct sk_buff *skb)
 {
-       struct ieee80211_tx_latency_bin_ranges *tx_latency;
+       u16 ethertype = (skb->data[12] << 8) | skb->data[13];
 
-       tx_latency = rcu_dereference(local->tx_latency);
-       if (!tx_latency)
-               return;
-       skb->tstamp = ktime_get();
+       return ethertype == ETH_P_TDLS &&
+              skb->len > 14 &&
+              skb->data[14] == WLAN_TDLS_SNAP_RFTYPE;
+}
+
+static int ieee80211_lookup_ra_sta(struct ieee80211_sub_if_data *sdata,
+                                  struct sk_buff *skb,
+                                  struct sta_info **sta_out)
+{
+       struct sta_info *sta;
+
+       switch (sdata->vif.type) {
+       case NL80211_IFTYPE_AP_VLAN:
+               sta = rcu_dereference(sdata->u.vlan.sta);
+               if (sta) {
+                       *sta_out = sta;
+                       return 0;
+               } else if (sdata->wdev.use_4addr) {
+                       return -ENOLINK;
+               }
+               /* fall through */
+       case NL80211_IFTYPE_AP:
+       case NL80211_IFTYPE_OCB:
+       case NL80211_IFTYPE_ADHOC:
+               if (is_multicast_ether_addr(skb->data)) {
+                       *sta_out = ERR_PTR(-ENOENT);
+                       return 0;
+               }
+               sta = sta_info_get_bss(sdata, skb->data);
+               break;
+       case NL80211_IFTYPE_WDS:
+               sta = sta_info_get(sdata, sdata->u.wds.remote_addr);
+               break;
+#ifdef CONFIG_MAC80211_MESH
+       case NL80211_IFTYPE_MESH_POINT:
+               /* determined much later */
+               *sta_out = NULL;
+               return 0;
+#endif
+       case NL80211_IFTYPE_STATION:
+               if (sdata->wdev.wiphy->flags & WIPHY_FLAG_SUPPORTS_TDLS) {
+                       sta = sta_info_get(sdata, skb->data);
+                       if (sta) {
+                               bool tdls_peer, tdls_auth;
+
+                               tdls_peer = test_sta_flag(sta,
+                                                         WLAN_STA_TDLS_PEER);
+                               tdls_auth = test_sta_flag(sta,
+                                               WLAN_STA_TDLS_PEER_AUTH);
+
+                               if (tdls_peer && tdls_auth) {
+                                       *sta_out = sta;
+                                       return 0;
+                               }
+
+                               /*
+                                * TDLS link during setup - throw out frames to
+                                * peer. Allow TDLS-setup frames to unauthorized
+                                * peers for the special case of a link teardown
+                                * after a TDLS sta is removed due to being
+                                * unreachable.
+                                */
+                               if (tdls_peer && !tdls_auth &&
+                                   !ieee80211_is_tdls_setup(skb))
+                                       return -EINVAL;
+                       }
+
+               }
+
+               sta = sta_info_get(sdata, sdata->u.mgd.bssid);
+               if (!sta)
+                       return -ENOLINK;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       *sta_out = sta ?: ERR_PTR(-ENOENT);
+       return 0;
 }
 
 /**
@@ -1824,7 +1894,8 @@ static void ieee80211_tx_latency_start_msrmnt(struct ieee80211_local *local,
  * Returns: the (possibly reallocated) skb or an ERR_PTR() code
  */
 static struct sk_buff *ieee80211_build_hdr(struct ieee80211_sub_if_data *sdata,
-                                          struct sk_buff *skb, u32 info_flags)
+                                          struct sk_buff *skb, u32 info_flags,
+                                          struct sta_info *sta)
 {
        struct ieee80211_local *local = sdata->local;
        struct ieee80211_tx_info *info;
@@ -1837,9 +1908,8 @@ static struct sk_buff *ieee80211_build_hdr(struct ieee80211_sub_if_data *sdata,
        const u8 *encaps_data;
        int encaps_len, skip_header_bytes;
        int nh_pos, h_pos;
-       struct sta_info *sta = NULL;
-       bool wme_sta = false, authorized = false, tdls_auth = false;
-       bool tdls_peer = false, tdls_setup_frame = false;
+       bool wme_sta = false, authorized = false;
+       bool tdls_peer;
        bool multicast;
        u16 info_id = 0;
        struct ieee80211_chanctx_conf *chanctx_conf;
@@ -1847,6 +1917,9 @@ static struct sk_buff *ieee80211_build_hdr(struct ieee80211_sub_if_data *sdata,
        enum ieee80211_band band;
        int ret;
 
+       if (IS_ERR(sta))
+               sta = NULL;
+
        /* convert Ethernet header to proper 802.11 header (based on
         * operation mode) */
        ethertype = (skb->data[12] << 8) | skb->data[13];
@@ -1854,8 +1927,7 @@ static struct sk_buff *ieee80211_build_hdr(struct ieee80211_sub_if_data *sdata,
 
        switch (sdata->vif.type) {
        case NL80211_IFTYPE_AP_VLAN:
-               sta = rcu_dereference(sdata->u.vlan.sta);
-               if (sta) {
+               if (sdata->wdev.use_4addr) {
                        fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS);
                        /* RA TA DA SA */
                        memcpy(hdr.addr1, sta->sta.addr, ETH_ALEN);
@@ -1874,7 +1946,7 @@ static struct sk_buff *ieee80211_build_hdr(struct ieee80211_sub_if_data *sdata,
                        goto free;
                }
                band = chanctx_conf->def.chan->band;
-               if (sta)
+               if (sdata->wdev.use_4addr)
                        break;
                /* fall through */
        case NL80211_IFTYPE_AP:
@@ -1978,38 +2050,10 @@ static struct sk_buff *ieee80211_build_hdr(struct ieee80211_sub_if_data *sdata,
                break;
 #endif
        case NL80211_IFTYPE_STATION:
-               if (sdata->wdev.wiphy->flags & WIPHY_FLAG_SUPPORTS_TDLS) {
-                       sta = sta_info_get(sdata, skb->data);
-                       if (sta) {
-                               authorized = test_sta_flag(sta,
-                                                       WLAN_STA_AUTHORIZED);
-                               wme_sta = sta->sta.wme;
-                               tdls_peer = test_sta_flag(sta,
-                                                         WLAN_STA_TDLS_PEER);
-                               tdls_auth = test_sta_flag(sta,
-                                               WLAN_STA_TDLS_PEER_AUTH);
-                       }
-
-                       if (tdls_peer)
-                               tdls_setup_frame =
-                                       ethertype == ETH_P_TDLS &&
-                                       skb->len > 14 &&
-                                       skb->data[14] == WLAN_TDLS_SNAP_RFTYPE;
-               }
+               /* we already did checks when looking up the RA STA */
+               tdls_peer = test_sta_flag(sta, WLAN_STA_TDLS_PEER);
 
-               /*
-                * TDLS link during setup - throw out frames to peer. We allow
-                * TDLS-setup frames to unauthorized peers for the special case
-                * of a link teardown after a TDLS sta is removed due to being
-                * unreachable.
-                */
-               if (tdls_peer && !tdls_auth && !tdls_setup_frame) {
-                       ret = -EINVAL;
-                       goto free;
-               }
-
-               /* send direct packets to authorized TDLS peers */
-               if (tdls_peer && tdls_auth) {
+               if (tdls_peer) {
                        /* DA SA BSSID */
                        memcpy(hdr.addr1, skb->data, ETH_ALEN);
                        memcpy(hdr.addr2, skb->data + ETH_ALEN, ETH_ALEN);
@@ -2071,26 +2115,19 @@ static struct sk_buff *ieee80211_build_hdr(struct ieee80211_sub_if_data *sdata,
                goto free;
        }
 
-       /*
-        * There's no need to try to look up the destination
-        * if it is a multicast address (which can only happen
-        * in AP mode)
-        */
        multicast = is_multicast_ether_addr(hdr.addr1);
-       if (!multicast) {
-               sta = sta_info_get(sdata, hdr.addr1);
-               if (sta) {
-                       authorized = test_sta_flag(sta, WLAN_STA_AUTHORIZED);
-                       wme_sta = sta->sta.wme;
-               }
-       }
 
-       /* For mesh, the use of the QoS header is mandatory */
-       if (ieee80211_vif_is_mesh(&sdata->vif))
+       /* sta is always NULL for mesh */
+       if (sta) {
+               authorized = test_sta_flag(sta, WLAN_STA_AUTHORIZED);
+               wme_sta = sta->sta.wme;
+       } else if (ieee80211_vif_is_mesh(&sdata->vif)) {
+               /* For mesh, the use of the QoS header is mandatory */
                wme_sta = true;
+       }
 
-       /* receiver and we are QoS enabled, use a QoS type frame */
-       if (wme_sta && local->hw.queues >= IEEE80211_NUM_ACS) {
+       /* receiver does QoS (which also means we do) use it */
+       if (wme_sta) {
                fc |= cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
                hdrlen += 2;
        }
@@ -2260,7 +2297,7 @@ void __ieee80211_subif_start_xmit(struct sk_buff *skb,
                                  u32 info_flags)
 {
        struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
-       struct ieee80211_local *local = sdata->local;
+       struct sta_info *sta;
 
        if (unlikely(skb->len < ETH_HLEN)) {
                kfree_skb(skb);
@@ -2269,10 +2306,12 @@ void __ieee80211_subif_start_xmit(struct sk_buff *skb,
 
        rcu_read_lock();
 
-       /* Measure frame arrival for Tx latency statistics calculation */
-       ieee80211_tx_latency_start_msrmnt(local, skb);
+       if (ieee80211_lookup_ra_sta(sdata, skb, &sta)) {
+               kfree_skb(skb);
+               goto out;
+       }
 
-       skb = ieee80211_build_hdr(sdata, skb, info_flags);
+       skb = ieee80211_build_hdr(sdata, skb, info_flags, sta);
        if (IS_ERR(skb))
                goto out;
 
@@ -2280,7 +2319,7 @@ void __ieee80211_subif_start_xmit(struct sk_buff *skb,
        dev->stats.tx_bytes += skb->len;
        dev->trans_start = jiffies;
 
-       ieee80211_xmit(sdata, skb);
+       ieee80211_xmit(sdata, sta, skb);
  out:
        rcu_read_unlock();
 }
@@ -2308,10 +2347,17 @@ ieee80211_build_data_template(struct ieee80211_sub_if_data *sdata,
                .local = sdata->local,
                .sdata = sdata,
        };
+       struct sta_info *sta;
 
        rcu_read_lock();
 
-       skb = ieee80211_build_hdr(sdata, skb, info_flags);
+       if (ieee80211_lookup_ra_sta(sdata, skb, &sta)) {
+               kfree_skb(skb);
+               skb = ERR_PTR(-EINVAL);
+               goto out;
+       }
+
+       skb = ieee80211_build_hdr(sdata, skb, info_flags, sta);
        if (IS_ERR(skb))
                goto out;
 
@@ -2369,7 +2415,7 @@ static bool ieee80211_tx_pending_skb(struct ieee80211_local *local,
                        return true;
                }
                info->band = chanctx_conf->def.chan->band;
-               result = ieee80211_tx(sdata, skb, true);
+               result = ieee80211_tx(sdata, NULL, skb, true);
        } else {
                struct sk_buff_head skbs;
 
@@ -3107,7 +3153,7 @@ ieee80211_get_buffered_bc(struct ieee80211_hw *hw,
 
                if (sdata->vif.type == NL80211_IFTYPE_AP)
                        sdata = IEEE80211_DEV_TO_SUB_IF(skb->dev);
-               if (!ieee80211_tx_prepare(sdata, &tx, skb))
+               if (!ieee80211_tx_prepare(sdata, &tx, NULL, skb))
                        break;
                dev_kfree_skb_any(skb);
        }
@@ -3239,6 +3285,6 @@ void __ieee80211_tx_skb_tid_band(struct ieee80211_sub_if_data *sdata,
         */
        local_bh_disable();
        IEEE80211_SKB_CB(skb)->band = band;
-       ieee80211_xmit(sdata, skb);
+       ieee80211_xmit(sdata, NULL, skb);
        local_bh_enable();
 }
index 747bdcf72e92788574ec3e2e635b5e6b96cb75f0..d1742a7d9ea497f248c4987d712507dd4d851ced 100644 (file)
@@ -625,13 +625,14 @@ void ieee80211_wake_vif_queues(struct ieee80211_local *local,
                                        reason, true);
 }
 
-static void __iterate_active_interfaces(struct ieee80211_local *local,
-                                       u32 iter_flags,
-                                       void (*iterator)(void *data, u8 *mac,
-                                               struct ieee80211_vif *vif),
-                                       void *data)
+static void __iterate_interfaces(struct ieee80211_local *local,
+                                u32 iter_flags,
+                                void (*iterator)(void *data, u8 *mac,
+                                                 struct ieee80211_vif *vif),
+                                void *data)
 {
        struct ieee80211_sub_if_data *sdata;
+       bool active_only = iter_flags & IEEE80211_IFACE_ITER_ACTIVE;
 
        list_for_each_entry_rcu(sdata, &local->interfaces, list) {
                switch (sdata->vif.type) {
@@ -645,9 +646,9 @@ static void __iterate_active_interfaces(struct ieee80211_local *local,
                        break;
                }
                if (!(iter_flags & IEEE80211_IFACE_ITER_RESUME_ALL) &&
-                   !(sdata->flags & IEEE80211_SDATA_IN_DRIVER))
+                   active_only && !(sdata->flags & IEEE80211_SDATA_IN_DRIVER))
                        continue;
-               if (ieee80211_sdata_running(sdata))
+               if (ieee80211_sdata_running(sdata) || !active_only)
                        iterator(data, sdata->vif.addr,
                                 &sdata->vif);
        }
@@ -656,12 +657,12 @@ static void __iterate_active_interfaces(struct ieee80211_local *local,
                                      lockdep_is_held(&local->iflist_mtx) ||
                                      lockdep_rtnl_is_held());
        if (sdata &&
-           (iter_flags & IEEE80211_IFACE_ITER_RESUME_ALL ||
+           (iter_flags & IEEE80211_IFACE_ITER_RESUME_ALL || !active_only ||
             sdata->flags & IEEE80211_SDATA_IN_DRIVER))
                iterator(data, sdata->vif.addr, &sdata->vif);
 }
 
-void ieee80211_iterate_active_interfaces(
+void ieee80211_iterate_interfaces(
        struct ieee80211_hw *hw, u32 iter_flags,
        void (*iterator)(void *data, u8 *mac,
                         struct ieee80211_vif *vif),
@@ -670,10 +671,10 @@ void ieee80211_iterate_active_interfaces(
        struct ieee80211_local *local = hw_to_local(hw);
 
        mutex_lock(&local->iflist_mtx);
-       __iterate_active_interfaces(local, iter_flags, iterator, data);
+       __iterate_interfaces(local, iter_flags, iterator, data);
        mutex_unlock(&local->iflist_mtx);
 }
-EXPORT_SYMBOL_GPL(ieee80211_iterate_active_interfaces);
+EXPORT_SYMBOL_GPL(ieee80211_iterate_interfaces);
 
 void ieee80211_iterate_active_interfaces_atomic(
        struct ieee80211_hw *hw, u32 iter_flags,
@@ -684,7 +685,8 @@ void ieee80211_iterate_active_interfaces_atomic(
        struct ieee80211_local *local = hw_to_local(hw);
 
        rcu_read_lock();
-       __iterate_active_interfaces(local, iter_flags, iterator, data);
+       __iterate_interfaces(local, iter_flags | IEEE80211_IFACE_ITER_ACTIVE,
+                            iterator, data);
        rcu_read_unlock();
 }
 EXPORT_SYMBOL_GPL(ieee80211_iterate_active_interfaces_atomic);
@@ -699,7 +701,8 @@ void ieee80211_iterate_active_interfaces_rtnl(
 
        ASSERT_RTNL();
 
-       __iterate_active_interfaces(local, iter_flags, iterator, data);
+       __iterate_interfaces(local, iter_flags | IEEE80211_IFACE_ITER_ACTIVE,
+                            iterator, data);
 }
 EXPORT_SYMBOL_GPL(ieee80211_iterate_active_interfaces_rtnl);
 
@@ -742,6 +745,18 @@ struct ieee80211_vif *wdev_to_ieee80211_vif(struct wireless_dev *wdev)
 }
 EXPORT_SYMBOL_GPL(wdev_to_ieee80211_vif);
 
+struct wireless_dev *ieee80211_vif_to_wdev(struct ieee80211_vif *vif)
+{
+       struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
+
+       if (!ieee80211_sdata_running(sdata) ||
+           !(sdata->flags & IEEE80211_SDATA_IN_DRIVER))
+               return NULL;
+
+       return &sdata->wdev;
+}
+EXPORT_SYMBOL_GPL(ieee80211_vif_to_wdev);
+
 /*
  * Nothing should have been stuffed into the workqueue during
  * the suspend->resume cycle. Since we can't check each caller
@@ -1811,8 +1826,25 @@ int ieee80211_reconfig(struct ieee80211_local *local)
        list_for_each_entry(sdata, &local->interfaces, list) {
                if (sdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
                    sdata->vif.type != NL80211_IFTYPE_MONITOR &&
-                   ieee80211_sdata_running(sdata))
+                   ieee80211_sdata_running(sdata)) {
                        res = drv_add_interface(local, sdata);
+                       if (WARN_ON(res))
+                               break;
+               }
+       }
+
+       /* If adding any of the interfaces failed above, roll back and
+        * report failure.
+        */
+       if (res) {
+               list_for_each_entry_continue_reverse(sdata, &local->interfaces,
+                                                    list)
+                       if (sdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
+                           sdata->vif.type != NL80211_IFTYPE_MONITOR &&
+                           ieee80211_sdata_running(sdata))
+                               drv_remove_interface(local, sdata);
+               ieee80211_handle_reconfig_failure(local);
+               return res;
        }
 
        /* add channel contexts */
@@ -2344,6 +2376,41 @@ u8 *ieee80211_ie_build_ht_oper(u8 *pos, struct ieee80211_sta_ht_cap *ht_cap,
        return pos + sizeof(struct ieee80211_ht_operation);
 }
 
+u8 *ieee80211_ie_build_vht_oper(u8 *pos, struct ieee80211_sta_vht_cap *vht_cap,
+                               const struct cfg80211_chan_def *chandef)
+{
+       struct ieee80211_vht_operation *vht_oper;
+
+       *pos++ = WLAN_EID_VHT_OPERATION;
+       *pos++ = sizeof(struct ieee80211_vht_operation);
+       vht_oper = (struct ieee80211_vht_operation *)pos;
+       vht_oper->center_freq_seg1_idx = ieee80211_frequency_to_channel(
+                                                       chandef->center_freq1);
+       if (chandef->center_freq2)
+               vht_oper->center_freq_seg2_idx =
+                       ieee80211_frequency_to_channel(chandef->center_freq2);
+
+       switch (chandef->width) {
+       case NL80211_CHAN_WIDTH_160:
+               vht_oper->chan_width = IEEE80211_VHT_CHANWIDTH_160MHZ;
+               break;
+       case NL80211_CHAN_WIDTH_80P80:
+               vht_oper->chan_width = IEEE80211_VHT_CHANWIDTH_80P80MHZ;
+               break;
+       case NL80211_CHAN_WIDTH_80:
+               vht_oper->chan_width = IEEE80211_VHT_CHANWIDTH_80MHZ;
+               break;
+       default:
+               vht_oper->chan_width = IEEE80211_VHT_CHANWIDTH_USE_HT;
+               break;
+       }
+
+       /* don't require special VHT peer rates */
+       vht_oper->basic_mcs_set = cpu_to_le16(0xffff);
+
+       return pos + sizeof(struct ieee80211_vht_operation);
+}
+
 void ieee80211_ht_oper_to_chandef(struct ieee80211_channel *control_chan,
                                  const struct ieee80211_ht_operation *ht_oper,
                                  struct cfg80211_chan_def *chandef)
@@ -2373,6 +2440,39 @@ void ieee80211_ht_oper_to_chandef(struct ieee80211_channel *control_chan,
        cfg80211_chandef_create(chandef, control_chan, channel_type);
 }
 
+void ieee80211_vht_oper_to_chandef(struct ieee80211_channel *control_chan,
+                                  const struct ieee80211_vht_operation *oper,
+                                  struct cfg80211_chan_def *chandef)
+{
+       if (!oper)
+               return;
+
+       chandef->chan = control_chan;
+
+       switch (oper->chan_width) {
+       case IEEE80211_VHT_CHANWIDTH_USE_HT:
+               break;
+       case IEEE80211_VHT_CHANWIDTH_80MHZ:
+               chandef->width = NL80211_CHAN_WIDTH_80;
+               break;
+       case IEEE80211_VHT_CHANWIDTH_160MHZ:
+               chandef->width = NL80211_CHAN_WIDTH_160;
+               break;
+       case IEEE80211_VHT_CHANWIDTH_80P80MHZ:
+               chandef->width = NL80211_CHAN_WIDTH_80P80;
+               break;
+       default:
+               break;
+       }
+
+       chandef->center_freq1 =
+               ieee80211_channel_to_frequency(oper->center_freq_seg1_idx,
+                                              control_chan->band);
+       chandef->center_freq2 =
+               ieee80211_channel_to_frequency(oper->center_freq_seg2_idx,
+                                              control_chan->band);
+}
+
 int ieee80211_parse_bitrates(struct cfg80211_chan_def *chandef,
                             const struct ieee80211_supported_band *sband,
                             const u8 *srates, int srates_len, u32 *rates)
index 85f9596da07b8916908c2ccfc3353c5f1676a140..80694d55db7404079212761ff23083bb9aa35169 100644 (file)
@@ -129,10 +129,6 @@ ieee80211_vht_cap_ie_to_sta_vht_cap(struct ieee80211_sub_if_data *sdata,
        if (!vht_cap_ie || !sband->vht_cap.vht_supported)
                return;
 
-       /* don't support VHT for TDLS peers for now */
-       if (test_sta_flag(sta, WLAN_STA_TDLS_PEER))
-               return;
-
        /*
         * A VHT STA must support 40 MHz, but if we verify that here
         * then we break a few things - some APs (e.g. Netgear R6300v2
index 75de6fac40d1533fbb9836ab138d71e838ffb6af..9d63d93c836ebbd6d8396964556e1e0a054e4578 100644 (file)
@@ -780,9 +780,8 @@ ieee80211_crypto_cs_encrypt(struct ieee80211_tx_data *tx,
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
        struct ieee80211_key *key = tx->key;
        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
-       const struct ieee80211_cipher_scheme *cs = key->sta->cipher_scheme;
        int hdrlen;
-       u8 *pos;
+       u8 *pos, iv_len = key->conf.iv_len;
 
        if (info->control.hw_key &&
            !(info->control.hw_key->flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE)) {
@@ -790,14 +789,14 @@ ieee80211_crypto_cs_encrypt(struct ieee80211_tx_data *tx,
                return TX_CONTINUE;
        }
 
-       if (unlikely(skb_headroom(skb) < cs->hdr_len &&
-                    pskb_expand_head(skb, cs->hdr_len, 0, GFP_ATOMIC)))
+       if (unlikely(skb_headroom(skb) < iv_len &&
+                    pskb_expand_head(skb, iv_len, 0, GFP_ATOMIC)))
                return TX_DROP;
 
        hdrlen = ieee80211_hdrlen(hdr->frame_control);
 
-       pos = skb_push(skb, cs->hdr_len);
-       memmove(pos, pos + cs->hdr_len, hdrlen);
+       pos = skb_push(skb, iv_len);
+       memmove(pos, pos + iv_len, hdrlen);
 
        return TX_CONTINUE;
 }
@@ -1217,7 +1216,7 @@ ieee80211_crypto_hw_encrypt(struct ieee80211_tx_data *tx)
                if (!info->control.hw_key)
                        return TX_DROP;
 
-               if (tx->key->sta->cipher_scheme) {
+               if (tx->key->flags & KEY_FLAG_CIPHER_SCHEME) {
                        res = ieee80211_crypto_cs_encrypt(tx, skb);
                        if (res != TX_CONTINUE)
                                return res;
index 98180a9fff4adc565f7ac9fe7fe711ef549396da..a0533357b9eaca22df9e7b105f17ce3440b03d4e 100644 (file)
@@ -1,4 +1,4 @@
-#ifndef __MAC802154_DRVIER_OPS
+#ifndef __MAC802154_DRIVER_OPS
 #define __MAC802154_DRIVER_OPS
 
 #include <linux/types.h>
@@ -220,4 +220,4 @@ drv_set_promiscuous_mode(struct ieee802154_local *local, bool on)
        return local->ops->set_promiscuous_mode(&local->hw, on);
 }
 
-#endif /* __MAC802154_DRVIER_OPS */
+#endif /* __MAC802154_DRIVER_OPS */
index 6fb6bdf9868c7684b63635a23c2a8b45ae07e4d8..38b56f9d9386a4821e50cfdd6059fa115c5e4358 100644 (file)
@@ -174,24 +174,16 @@ ieee802154_check_mac_settings(struct ieee802154_local *local,
        }
 
        if (local->hw.flags & IEEE802154_HW_AFILT) {
-               if (wpan_dev->pan_id != nwpan_dev->pan_id)
-                       return -EBUSY;
-
-               if (wpan_dev->short_addr != nwpan_dev->short_addr)
-                       return -EBUSY;
-
-               if (wpan_dev->extended_addr != nwpan_dev->extended_addr)
+               if (wpan_dev->pan_id != nwpan_dev->pan_id ||
+                   wpan_dev->short_addr != nwpan_dev->short_addr ||
+                   wpan_dev->extended_addr != nwpan_dev->extended_addr)
                        return -EBUSY;
        }
 
        if (local->hw.flags & IEEE802154_HW_CSMA_PARAMS) {
-               if (wpan_dev->min_be != nwpan_dev->min_be)
-                       return -EBUSY;
-
-               if (wpan_dev->max_be != nwpan_dev->max_be)
-                       return -EBUSY;
-
-               if (wpan_dev->csma_retries != nwpan_dev->csma_retries)
+               if (wpan_dev->min_be != nwpan_dev->min_be ||
+                   wpan_dev->max_be != nwpan_dev->max_be ||
+                   wpan_dev->csma_retries != nwpan_dev->csma_retries)
                        return -EBUSY;
        }
 
index 5fc979027919749a604a47bcd45148fbe5505f03..150bf807e572eb85458371d1c8e930e2cb7ec0b2 100644 (file)
@@ -65,8 +65,19 @@ void ieee802154_xmit_complete(struct ieee802154_hw *hw, struct sk_buff *skb,
 {
        if (ifs_handling) {
                struct ieee802154_local *local = hw_to_local(hw);
+               u8 max_sifs_size;
 
-               if (skb->len > 18)
+               /* If transceiver sets CRC on his own we need to use lifs
+                * threshold len above 16 otherwise 18, because it's not
+                * part of skb->len.
+                */
+               if (hw->flags & IEEE802154_HW_TX_OMIT_CKSUM)
+                       max_sifs_size = IEEE802154_MAX_SIFS_FRAME_SIZE -
+                                       IEEE802154_FCS_LEN;
+               else
+                       max_sifs_size = IEEE802154_MAX_SIFS_FRAME_SIZE;
+
+               if (skb->len > max_sifs_size)
                        hrtimer_start(&local->ifs_timer,
                                      ktime_set(0, hw->phy->lifs_period * NSEC_PER_USEC),
                                      HRTIMER_MODE_REL);
index 37421db8896524299ea4648c187f7a235de10d6c..17bde799c8548e46a791fabc8b9619c57f2b2f99 100644 (file)
@@ -1,9 +1,30 @@
 #
 # MPLS configuration
 #
+
+menuconfig MPLS
+       bool "MultiProtocol Label Switching"
+       default n
+       ---help---
+         MultiProtocol Label Switching routes packets through logical
+         circuits.  Originally conceived as a way of routing packets at
+         hardware speeds (before hardware was capable of routing ipv4 packets),
+         MPLS remains a simple way of making tunnels.
+
+         If you have not heard of MPLS you probably want to say N here.
+
+if MPLS
+
 config NET_MPLS_GSO
        tristate "MPLS: GSO support"
        help
         This is helper module to allow segmentation of non-MPLS GSO packets
         that have had MPLS stack entries pushed onto them and thus
         become MPLS GSO packets.
+
+config MPLS_ROUTING
+       tristate "MPLS: routing support"
+       help
+        Add support for forwarding of mpls packets.
+
+endif # MPLS
index 6dec088c2d0f77dff06bf6bbd8557d2f325bafa1..65bbe68c72e66ad44463ed465eaafe01e4c2bd10 100644 (file)
@@ -2,3 +2,6 @@
 # Makefile for MPLS.
 #
 obj-$(CONFIG_NET_MPLS_GSO) += mpls_gso.o
+obj-$(CONFIG_MPLS_ROUTING) += mpls_router.o
+
+mpls_router-y := af_mpls.o
diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c
new file mode 100644 (file)
index 0000000..db8a2ea
--- /dev/null
@@ -0,0 +1,1023 @@
+#include <linux/types.h>
+#include <linux/skbuff.h>
+#include <linux/socket.h>
+#include <linux/sysctl.h>
+#include <linux/net.h>
+#include <linux/module.h>
+#include <linux/if_arp.h>
+#include <linux/ipv6.h>
+#include <linux/mpls.h>
+#include <linux/vmalloc.h>
+#include <net/ip.h>
+#include <net/dst.h>
+#include <net/sock.h>
+#include <net/arp.h>
+#include <net/ip_fib.h>
+#include <net/netevent.h>
+#include <net/netns/generic.h>
+#include "internal.h"
+
+#define LABEL_NOT_SPECIFIED (1<<20)
+#define MAX_NEW_LABELS 2
+
+/* This maximum ha length copied from the definition of struct neighbour */
+#define MAX_VIA_ALEN (ALIGN(MAX_ADDR_LEN, sizeof(unsigned long)))
+
+struct mpls_route { /* next hop label forwarding entry */
+       struct net_device __rcu *rt_dev;
+       struct rcu_head         rt_rcu;
+       u32                     rt_label[MAX_NEW_LABELS];
+       u8                      rt_protocol; /* routing protocol that set this entry */
+       u8                      rt_labels;
+       u8                      rt_via_alen;
+       u8                      rt_via_table;
+       u8                      rt_via[0];
+};
+
+static int zero = 0;
+static int label_limit = (1 << 20) - 1;
+
+static void rtmsg_lfib(int event, u32 label, struct mpls_route *rt,
+                      struct nlmsghdr *nlh, struct net *net, u32 portid,
+                      unsigned int nlm_flags);
+
+static struct mpls_route *mpls_route_input_rcu(struct net *net, unsigned index)
+{
+       struct mpls_route *rt = NULL;
+
+       if (index < net->mpls.platform_labels) {
+               struct mpls_route __rcu **platform_label =
+                       rcu_dereference(net->mpls.platform_label);
+               rt = rcu_dereference(platform_label[index]);
+       }
+       return rt;
+}
+
+static bool mpls_output_possible(const struct net_device *dev)
+{
+       return dev && (dev->flags & IFF_UP) && netif_carrier_ok(dev);
+}
+
+static unsigned int mpls_rt_header_size(const struct mpls_route *rt)
+{
+       /* The size of the layer 2.5 labels to be added for this route */
+       return rt->rt_labels * sizeof(struct mpls_shim_hdr);
+}
+
+static unsigned int mpls_dev_mtu(const struct net_device *dev)
+{
+       /* The amount of data the layer 2 frame can hold */
+       return dev->mtu;
+}
+
+static bool mpls_pkt_too_big(const struct sk_buff *skb, unsigned int mtu)
+{
+       if (skb->len <= mtu)
+               return false;
+
+       if (skb_is_gso(skb) && skb_gso_network_seglen(skb) <= mtu)
+               return false;
+
+       return true;
+}
+
+static bool mpls_egress(struct mpls_route *rt, struct sk_buff *skb,
+                       struct mpls_entry_decoded dec)
+{
+       /* RFC4385 and RFC5586 encode other packets in mpls such that
+        * they don't conflict with the ip version number, making
+        * decoding by examining the ip version correct in everything
+        * except for the strangest cases.
+        *
+        * The strange cases if we choose to support them will require
+        * manual configuration.
+        */
+       struct iphdr *hdr4;
+       bool success = true;
+
+       /* The IPv4 code below accesses through the IPv4 header
+        * checksum, which is 12 bytes into the packet.
+        * The IPv6 code below accesses through the IPv6 hop limit
+        * which is 8 bytes into the packet.
+        *
+        * For all supported cases there should always be at least 12
+        * bytes of packet data present.  The IPv4 header is 20 bytes
+        * without options and the IPv6 header is always 40 bytes
+        * long.
+        */
+       if (!pskb_may_pull(skb, 12))
+               return false;
+
+       /* Use ip_hdr to find the ip protocol version */
+       hdr4 = ip_hdr(skb);
+       if (hdr4->version == 4) {
+               skb->protocol = htons(ETH_P_IP);
+               csum_replace2(&hdr4->check,
+                             htons(hdr4->ttl << 8),
+                             htons(dec.ttl << 8));
+               hdr4->ttl = dec.ttl;
+       }
+       else if (hdr4->version == 6) {
+               struct ipv6hdr *hdr6 = ipv6_hdr(skb);
+               skb->protocol = htons(ETH_P_IPV6);
+               hdr6->hop_limit = dec.ttl;
+       }
+       else
+               /* version 0 and version 1 are used by pseudo wires */
+               success = false;
+       return success;
+}
+
+static int mpls_forward(struct sk_buff *skb, struct net_device *dev,
+                       struct packet_type *pt, struct net_device *orig_dev)
+{
+       struct net *net = dev_net(dev);
+       struct mpls_shim_hdr *hdr;
+       struct mpls_route *rt;
+       struct mpls_entry_decoded dec;
+       struct net_device *out_dev;
+       unsigned int hh_len;
+       unsigned int new_header_size;
+       unsigned int mtu;
+       int err;
+
+       /* Careful this entire function runs inside of an rcu critical section */
+
+       if (skb->pkt_type != PACKET_HOST)
+               goto drop;
+
+       if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL)
+               goto drop;
+
+       if (!pskb_may_pull(skb, sizeof(*hdr)))
+               goto drop;
+
+       /* Read and decode the label */
+       hdr = mpls_hdr(skb);
+       dec = mpls_entry_decode(hdr);
+
+       /* Pop the label */
+       skb_pull(skb, sizeof(*hdr));
+       skb_reset_network_header(skb);
+
+       skb_orphan(skb);
+
+       rt = mpls_route_input_rcu(net, dec.label);
+       if (!rt)
+               goto drop;
+
+       /* Find the output device */
+       out_dev = rcu_dereference(rt->rt_dev);
+       if (!mpls_output_possible(out_dev))
+               goto drop;
+
+       if (skb_warn_if_lro(skb))
+               goto drop;
+
+       skb_forward_csum(skb);
+
+       /* Verify ttl is valid */
+       if (dec.ttl <= 1)
+               goto drop;
+       dec.ttl -= 1;
+
+       /* Verify the destination can hold the packet */
+       new_header_size = mpls_rt_header_size(rt);
+       mtu = mpls_dev_mtu(out_dev);
+       if (mpls_pkt_too_big(skb, mtu - new_header_size))
+               goto drop;
+
+       hh_len = LL_RESERVED_SPACE(out_dev);
+       if (!out_dev->header_ops)
+               hh_len = 0;
+
+       /* Ensure there is enough space for the headers in the skb */
+       if (skb_cow(skb, hh_len + new_header_size))
+               goto drop;
+
+       skb->dev = out_dev;
+       skb->protocol = htons(ETH_P_MPLS_UC);
+
+       if (unlikely(!new_header_size && dec.bos)) {
+               /* Penultimate hop popping */
+               if (!mpls_egress(rt, skb, dec))
+                       goto drop;
+       } else {
+               bool bos;
+               int i;
+               skb_push(skb, new_header_size);
+               skb_reset_network_header(skb);
+               /* Push the new labels */
+               hdr = mpls_hdr(skb);
+               bos = dec.bos;
+               for (i = rt->rt_labels - 1; i >= 0; i--) {
+                       hdr[i] = mpls_entry_encode(rt->rt_label[i], dec.ttl, 0, bos);
+                       bos = false;
+               }
+       }
+
+       err = neigh_xmit(rt->rt_via_table, out_dev, rt->rt_via, skb);
+       if (err)
+               net_dbg_ratelimited("%s: packet transmission failed: %d\n",
+                                   __func__, err);
+       return 0;
+
+drop:
+       kfree_skb(skb);
+       return NET_RX_DROP;
+}
+
+static struct packet_type mpls_packet_type __read_mostly = {
+       .type = cpu_to_be16(ETH_P_MPLS_UC),
+       .func = mpls_forward,
+};
+
+static const struct nla_policy rtm_mpls_policy[RTA_MAX+1] = {
+       [RTA_DST]               = { .type = NLA_U32 },
+       [RTA_OIF]               = { .type = NLA_U32 },
+};
+
+struct mpls_route_config {
+       u32             rc_protocol;
+       u32             rc_ifindex;
+       u16             rc_via_table;
+       u16             rc_via_alen;
+       u8              rc_via[MAX_VIA_ALEN];
+       u32             rc_label;
+       u32             rc_output_labels;
+       u32             rc_output_label[MAX_NEW_LABELS];
+       u32             rc_nlflags;
+       struct nl_info  rc_nlinfo;
+};
+
+static struct mpls_route *mpls_rt_alloc(size_t alen)
+{
+       struct mpls_route *rt;
+
+       rt = kzalloc(sizeof(*rt) + alen, GFP_KERNEL);
+       if (rt)
+               rt->rt_via_alen = alen;
+       return rt;
+}
+
+static void mpls_rt_free(struct mpls_route *rt)
+{
+       if (rt)
+               kfree_rcu(rt, rt_rcu);
+}
+
+static void mpls_notify_route(struct net *net, unsigned index,
+                             struct mpls_route *old, struct mpls_route *new,
+                             const struct nl_info *info)
+{
+       struct nlmsghdr *nlh = info ? info->nlh : NULL;
+       unsigned portid = info ? info->portid : 0;
+       int event = new ? RTM_NEWROUTE : RTM_DELROUTE;
+       struct mpls_route *rt = new ? new : old;
+       unsigned nlm_flags = (old && new) ? NLM_F_REPLACE : 0;
+       /* Ignore reserved labels for now */
+       if (rt && (index >= 16))
+               rtmsg_lfib(event, index, rt, nlh, net, portid, nlm_flags);
+}
+
+static void mpls_route_update(struct net *net, unsigned index,
+                             struct net_device *dev, struct mpls_route *new,
+                             const struct nl_info *info)
+{
+       struct mpls_route __rcu **platform_label;
+       struct mpls_route *rt, *old = NULL;
+
+       ASSERT_RTNL();
+
+       platform_label = rtnl_dereference(net->mpls.platform_label);
+       rt = rtnl_dereference(platform_label[index]);
+       if (!dev || (rt && (rtnl_dereference(rt->rt_dev) == dev))) {
+               rcu_assign_pointer(platform_label[index], new);
+               old = rt;
+       }
+
+       mpls_notify_route(net, index, old, new, info);
+
+       /* If we removed a route free it now */
+       mpls_rt_free(old);
+}
+
+static unsigned find_free_label(struct net *net)
+{
+       struct mpls_route __rcu **platform_label;
+       size_t platform_labels;
+       unsigned index;
+
+       platform_label = rtnl_dereference(net->mpls.platform_label);
+       platform_labels = net->mpls.platform_labels;
+       for (index = 16; index < platform_labels; index++) {
+               if (!rtnl_dereference(platform_label[index]))
+                       return index;
+       }
+       return LABEL_NOT_SPECIFIED;
+}
+
+static int mpls_route_add(struct mpls_route_config *cfg)
+{
+       struct mpls_route __rcu **platform_label;
+       struct net *net = cfg->rc_nlinfo.nl_net;
+       struct net_device *dev = NULL;
+       struct mpls_route *rt, *old;
+       unsigned index;
+       int i;
+       int err = -EINVAL;
+
+       index = cfg->rc_label;
+
+       /* If a label was not specified during insert pick one */
+       if ((index == LABEL_NOT_SPECIFIED) &&
+           (cfg->rc_nlflags & NLM_F_CREATE)) {
+               index = find_free_label(net);
+       }
+
+       /* The first 16 labels are reserved, and may not be set */
+       if (index < 16)
+               goto errout;
+
+       /* The full 20 bit range may not be supported. */
+       if (index >= net->mpls.platform_labels)
+               goto errout;
+
+       /* Ensure only a supported number of labels are present */
+       if (cfg->rc_output_labels > MAX_NEW_LABELS)
+               goto errout;
+
+       err = -ENODEV;
+       dev = dev_get_by_index(net, cfg->rc_ifindex);
+       if (!dev)
+               goto errout;
+
+       /* For now just support ethernet devices */
+       err = -EINVAL;
+       if ((dev->type != ARPHRD_ETHER) && (dev->type != ARPHRD_LOOPBACK))
+               goto errout;
+
+       err = -EINVAL;
+       if ((cfg->rc_via_table == NEIGH_LINK_TABLE) &&
+           (dev->addr_len != cfg->rc_via_alen))
+               goto errout;
+
+       /* Append makes no sense with mpls */
+       err = -EOPNOTSUPP;
+       if (cfg->rc_nlflags & NLM_F_APPEND)
+               goto errout;
+
+       err = -EEXIST;
+       platform_label = rtnl_dereference(net->mpls.platform_label);
+       old = rtnl_dereference(platform_label[index]);
+       if ((cfg->rc_nlflags & NLM_F_EXCL) && old)
+               goto errout;
+
+       err = -EEXIST;
+       if (!(cfg->rc_nlflags & NLM_F_REPLACE) && old)
+               goto errout;
+
+       err = -ENOENT;
+       if (!(cfg->rc_nlflags & NLM_F_CREATE) && !old)
+               goto errout;
+
+       err = -ENOMEM;
+       rt = mpls_rt_alloc(cfg->rc_via_alen);
+       if (!rt)
+               goto errout;
+
+       rt->rt_labels = cfg->rc_output_labels;
+       for (i = 0; i < rt->rt_labels; i++)
+               rt->rt_label[i] = cfg->rc_output_label[i];
+       rt->rt_protocol = cfg->rc_protocol;
+       RCU_INIT_POINTER(rt->rt_dev, dev);
+       rt->rt_via_table = cfg->rc_via_table;
+       memcpy(rt->rt_via, cfg->rc_via, cfg->rc_via_alen);
+
+       mpls_route_update(net, index, NULL, rt, &cfg->rc_nlinfo);
+
+       dev_put(dev);
+       return 0;
+
+errout:
+       if (dev)
+               dev_put(dev);
+       return err;
+}
+
+static int mpls_route_del(struct mpls_route_config *cfg)
+{
+       struct net *net = cfg->rc_nlinfo.nl_net;
+       unsigned index;
+       int err = -EINVAL;
+
+       index = cfg->rc_label;
+
+       /* The first 16 labels are reserved, and may not be removed */
+       if (index < 16)
+               goto errout;
+
+       /* The full 20 bit range may not be supported */
+       if (index >= net->mpls.platform_labels)
+               goto errout;
+
+       mpls_route_update(net, index, NULL, NULL, &cfg->rc_nlinfo);
+
+       err = 0;
+errout:
+       return err;
+}
+
+static void mpls_ifdown(struct net_device *dev)
+{
+       struct mpls_route __rcu **platform_label;
+       struct net *net = dev_net(dev);
+       unsigned index;
+
+       platform_label = rtnl_dereference(net->mpls.platform_label);
+       for (index = 0; index < net->mpls.platform_labels; index++) {
+               struct mpls_route *rt = rtnl_dereference(platform_label[index]);
+               if (!rt)
+                       continue;
+               if (rtnl_dereference(rt->rt_dev) != dev)
+                       continue;
+               rt->rt_dev = NULL;
+       }
+}
+
+static int mpls_dev_notify(struct notifier_block *this, unsigned long event,
+                          void *ptr)
+{
+       struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+
+       switch(event) {
+       case NETDEV_UNREGISTER:
+               mpls_ifdown(dev);
+               break;
+       }
+       return NOTIFY_OK;
+}
+
+static struct notifier_block mpls_dev_notifier = {
+       .notifier_call = mpls_dev_notify,
+};
+
+static int nla_put_via(struct sk_buff *skb,
+                      u8 table, const void *addr, int alen)
+{
+       static const int table_to_family[NEIGH_NR_TABLES + 1] = {
+               AF_INET, AF_INET6, AF_DECnet, AF_PACKET,
+       };
+       struct nlattr *nla;
+       struct rtvia *via;
+       int family = AF_UNSPEC;
+
+       nla = nla_reserve(skb, RTA_VIA, alen + 2);
+       if (!nla)
+               return -EMSGSIZE;
+
+       if (table <= NEIGH_NR_TABLES)
+               family = table_to_family[table];
+
+       via = nla_data(nla);
+       via->rtvia_family = family;
+       memcpy(via->rtvia_addr, addr, alen);
+       return 0;
+}
+
+int nla_put_labels(struct sk_buff *skb, int attrtype,
+                  u8 labels, const u32 label[])
+{
+       struct nlattr *nla;
+       struct mpls_shim_hdr *nla_label;
+       bool bos;
+       int i;
+       nla = nla_reserve(skb, attrtype, labels*4);
+       if (!nla)
+               return -EMSGSIZE;
+
+       nla_label = nla_data(nla);
+       bos = true;
+       for (i = labels - 1; i >= 0; i--) {
+               nla_label[i] = mpls_entry_encode(label[i], 0, 0, bos);
+               bos = false;
+       }
+
+       return 0;
+}
+
+int nla_get_labels(const struct nlattr *nla,
+                  u32 max_labels, u32 *labels, u32 label[])
+{
+       unsigned len = nla_len(nla);
+       unsigned nla_labels;
+       struct mpls_shim_hdr *nla_label;
+       bool bos;
+       int i;
+
+       /* len needs to be an even multiple of 4 (the label size) */
+       if (len & 3)
+               return -EINVAL;
+
+       /* Limit the number of new labels allowed */
+       nla_labels = len/4;
+       if (nla_labels > max_labels)
+               return -EINVAL;
+
+       nla_label = nla_data(nla);
+       bos = true;
+       for (i = nla_labels - 1; i >= 0; i--, bos = false) {
+               struct mpls_entry_decoded dec;
+               dec = mpls_entry_decode(nla_label + i);
+
+               /* Ensure the bottom of stack flag is properly set
+                * and ttl and tc are both clear.
+                */
+               if ((dec.bos != bos) || dec.ttl || dec.tc)
+                       return -EINVAL;
+
+               label[i] = dec.label;
+       }
+       *labels = nla_labels;
+       return 0;
+}
+
+static int rtm_to_route_config(struct sk_buff *skb,  struct nlmsghdr *nlh,
+                              struct mpls_route_config *cfg)
+{
+       struct rtmsg *rtm;
+       struct nlattr *tb[RTA_MAX+1];
+       int index;
+       int err;
+
+       err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_mpls_policy);
+       if (err < 0)
+               goto errout;
+
+       err = -EINVAL;
+       rtm = nlmsg_data(nlh);
+       memset(cfg, 0, sizeof(*cfg));
+
+       if (rtm->rtm_family != AF_MPLS)
+               goto errout;
+       if (rtm->rtm_dst_len != 20)
+               goto errout;
+       if (rtm->rtm_src_len != 0)
+               goto errout;
+       if (rtm->rtm_tos != 0)
+               goto errout;
+       if (rtm->rtm_table != RT_TABLE_MAIN)
+               goto errout;
+       /* Any value is acceptable for rtm_protocol */
+
+       /* As mpls uses destination specific addresses
+        * (or source specific address in the case of multicast)
+        * all addresses have universal scope.
+        */
+       if (rtm->rtm_scope != RT_SCOPE_UNIVERSE)
+               goto errout;
+       if (rtm->rtm_type != RTN_UNICAST)
+               goto errout;
+       if (rtm->rtm_flags != 0)
+               goto errout;
+
+       cfg->rc_label           = LABEL_NOT_SPECIFIED;
+       cfg->rc_protocol        = rtm->rtm_protocol;
+       cfg->rc_nlflags         = nlh->nlmsg_flags;
+       cfg->rc_nlinfo.portid   = NETLINK_CB(skb).portid;
+       cfg->rc_nlinfo.nlh      = nlh;
+       cfg->rc_nlinfo.nl_net   = sock_net(skb->sk);
+
+       for (index = 0; index <= RTA_MAX; index++) {
+               struct nlattr *nla = tb[index];
+               if (!nla)
+                       continue;
+
+               switch(index) {
+               case RTA_OIF:
+                       cfg->rc_ifindex = nla_get_u32(nla);
+                       break;
+               case RTA_NEWDST:
+                       if (nla_get_labels(nla, MAX_NEW_LABELS,
+                                          &cfg->rc_output_labels,
+                                          cfg->rc_output_label))
+                               goto errout;
+                       break;
+               case RTA_DST:
+               {
+                       u32 label_count;
+                       if (nla_get_labels(nla, 1, &label_count,
+                                          &cfg->rc_label))
+                               goto errout;
+
+                       /* The first 16 labels are reserved, and may not be set */
+                       if (cfg->rc_label < 16)
+                               goto errout;
+
+                       break;
+               }
+               case RTA_VIA:
+               {
+                       struct rtvia *via = nla_data(nla);
+                       if (nla_len(nla) < offsetof(struct rtvia, rtvia_addr))
+                               goto errout;
+                       cfg->rc_via_alen   = nla_len(nla) -
+                               offsetof(struct rtvia, rtvia_addr);
+                       if (cfg->rc_via_alen > MAX_VIA_ALEN)
+                               goto errout;
+
+                       /* Validate the address family */
+                       switch(via->rtvia_family) {
+                       case AF_PACKET:
+                               cfg->rc_via_table = NEIGH_LINK_TABLE;
+                               break;
+                       case AF_INET:
+                               cfg->rc_via_table = NEIGH_ARP_TABLE;
+                               if (cfg->rc_via_alen != 4)
+                                       goto errout;
+                               break;
+                       case AF_INET6:
+                               cfg->rc_via_table = NEIGH_ND_TABLE;
+                               if (cfg->rc_via_alen != 16)
+                                       goto errout;
+                               break;
+                       default:
+                               /* Unsupported address family */
+                               goto errout;
+                       }
+
+                       memcpy(cfg->rc_via, via->rtvia_addr, cfg->rc_via_alen);
+                       break;
+               }
+               default:
+                       /* Unsupported attribute */
+                       goto errout;
+               }
+       }
+
+       err = 0;
+errout:
+       return err;
+}
+
+static int mpls_rtm_delroute(struct sk_buff *skb, struct nlmsghdr *nlh)
+{
+       struct mpls_route_config cfg;
+       int err;
+
+       err = rtm_to_route_config(skb, nlh, &cfg);
+       if (err < 0)
+               return err;
+
+       return mpls_route_del(&cfg);
+}
+
+
+static int mpls_rtm_newroute(struct sk_buff *skb, struct nlmsghdr *nlh)
+{
+       struct mpls_route_config cfg;
+       int err;
+
+       err = rtm_to_route_config(skb, nlh, &cfg);
+       if (err < 0)
+               return err;
+
+       return mpls_route_add(&cfg);
+}
+
+static int mpls_dump_route(struct sk_buff *skb, u32 portid, u32 seq, int event,
+                          u32 label, struct mpls_route *rt, int flags)
+{
+       struct net_device *dev;
+       struct nlmsghdr *nlh;
+       struct rtmsg *rtm;
+
+       nlh = nlmsg_put(skb, portid, seq, event, sizeof(*rtm), flags);
+       if (nlh == NULL)
+               return -EMSGSIZE;
+
+       rtm = nlmsg_data(nlh);
+       rtm->rtm_family = AF_MPLS;
+       rtm->rtm_dst_len = 20;
+       rtm->rtm_src_len = 0;
+       rtm->rtm_tos = 0;
+       rtm->rtm_table = RT_TABLE_MAIN;
+       rtm->rtm_protocol = rt->rt_protocol;
+       rtm->rtm_scope = RT_SCOPE_UNIVERSE;
+       rtm->rtm_type = RTN_UNICAST;
+       rtm->rtm_flags = 0;
+
+       if (rt->rt_labels &&
+           nla_put_labels(skb, RTA_NEWDST, rt->rt_labels, rt->rt_label))
+               goto nla_put_failure;
+       if (nla_put_via(skb, rt->rt_via_table, rt->rt_via, rt->rt_via_alen))
+               goto nla_put_failure;
+       dev = rtnl_dereference(rt->rt_dev);
+       if (dev && nla_put_u32(skb, RTA_OIF, dev->ifindex))
+               goto nla_put_failure;
+       if (nla_put_labels(skb, RTA_DST, 1, &label))
+               goto nla_put_failure;
+
+       nlmsg_end(skb, nlh);
+       return 0;
+
+nla_put_failure:
+       nlmsg_cancel(skb, nlh);
+       return -EMSGSIZE;
+}
+
+static int mpls_dump_routes(struct sk_buff *skb, struct netlink_callback *cb)
+{
+       struct net *net = sock_net(skb->sk);
+       struct mpls_route __rcu **platform_label;
+       size_t platform_labels;
+       unsigned int index;
+
+       ASSERT_RTNL();
+
+       index = cb->args[0];
+       if (index < 16)
+               index = 16;
+
+       platform_label = rtnl_dereference(net->mpls.platform_label);
+       platform_labels = net->mpls.platform_labels;
+       for (; index < platform_labels; index++) {
+               struct mpls_route *rt;
+               rt = rtnl_dereference(platform_label[index]);
+               if (!rt)
+                       continue;
+
+               if (mpls_dump_route(skb, NETLINK_CB(cb->skb).portid,
+                                   cb->nlh->nlmsg_seq, RTM_NEWROUTE,
+                                   index, rt, NLM_F_MULTI) < 0)
+                       break;
+       }
+       cb->args[0] = index;
+
+       return skb->len;
+}
+
+static inline size_t lfib_nlmsg_size(struct mpls_route *rt)
+{
+       size_t payload =
+               NLMSG_ALIGN(sizeof(struct rtmsg))
+               + nla_total_size(2 + rt->rt_via_alen)   /* RTA_VIA */
+               + nla_total_size(4);                    /* RTA_DST */
+       if (rt->rt_labels)                              /* RTA_NEWDST */
+               payload += nla_total_size(rt->rt_labels * 4);
+       if (rt->rt_dev)                                 /* RTA_OIF */
+               payload += nla_total_size(4);
+       return payload;
+}
+
+static void rtmsg_lfib(int event, u32 label, struct mpls_route *rt,
+                      struct nlmsghdr *nlh, struct net *net, u32 portid,
+                      unsigned int nlm_flags)
+{
+       struct sk_buff *skb;
+       u32 seq = nlh ? nlh->nlmsg_seq : 0;
+       int err = -ENOBUFS;
+
+       skb = nlmsg_new(lfib_nlmsg_size(rt), GFP_KERNEL);
+       if (skb == NULL)
+               goto errout;
+
+       err = mpls_dump_route(skb, portid, seq, event, label, rt, nlm_flags);
+       if (err < 0) {
+               /* -EMSGSIZE implies BUG in lfib_nlmsg_size */
+               WARN_ON(err == -EMSGSIZE);
+               kfree_skb(skb);
+               goto errout;
+       }
+       rtnl_notify(skb, net, portid, RTNLGRP_MPLS_ROUTE, nlh, GFP_KERNEL);
+
+       return;
+errout:
+       if (err < 0)
+               rtnl_set_sk_err(net, RTNLGRP_MPLS_ROUTE, err);
+}
+
+static int resize_platform_label_table(struct net *net, size_t limit)
+{
+       size_t size = sizeof(struct mpls_route *) * limit;
+       size_t old_limit;
+       size_t cp_size;
+       struct mpls_route __rcu **labels = NULL, **old;
+       struct mpls_route *rt0 = NULL, *rt2 = NULL;
+       unsigned index;
+
+       if (size) {
+               labels = kzalloc(size, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
+               if (!labels)
+                       labels = vzalloc(size);
+
+               if (!labels)
+                       goto nolabels;
+       }
+
+       /* In case the predefined labels need to be populated */
+       if (limit > LABEL_IPV4_EXPLICIT_NULL) {
+               struct net_device *lo = net->loopback_dev;
+               rt0 = mpls_rt_alloc(lo->addr_len);
+               if (!rt0)
+                       goto nort0;
+               RCU_INIT_POINTER(rt0->rt_dev, lo);
+               rt0->rt_protocol = RTPROT_KERNEL;
+               rt0->rt_via_table = NEIGH_LINK_TABLE;
+               memcpy(rt0->rt_via, lo->dev_addr, lo->addr_len);
+       }
+       if (limit > LABEL_IPV6_EXPLICIT_NULL) {
+               struct net_device *lo = net->loopback_dev;
+               rt2 = mpls_rt_alloc(lo->addr_len);
+               if (!rt2)
+                       goto nort2;
+               RCU_INIT_POINTER(rt2->rt_dev, lo);
+               rt2->rt_protocol = RTPROT_KERNEL;
+               rt2->rt_via_table = NEIGH_LINK_TABLE;
+               memcpy(rt2->rt_via, lo->dev_addr, lo->addr_len);
+       }
+
+       rtnl_lock();
+       /* Remember the original table */
+       old = rtnl_dereference(net->mpls.platform_label);
+       old_limit = net->mpls.platform_labels;
+
+       /* Free any labels beyond the new table */
+       for (index = limit; index < old_limit; index++)
+               mpls_route_update(net, index, NULL, NULL, NULL);
+
+       /* Copy over the old labels */
+       cp_size = size;
+       if (old_limit < limit)
+               cp_size = old_limit * sizeof(struct mpls_route *);
+
+       memcpy(labels, old, cp_size);
+
+       /* If needed set the predefined labels */
+       if ((old_limit <= LABEL_IPV6_EXPLICIT_NULL) &&
+           (limit > LABEL_IPV6_EXPLICIT_NULL)) {
+               RCU_INIT_POINTER(labels[LABEL_IPV6_EXPLICIT_NULL], rt2);
+               rt2 = NULL;
+       }
+
+       if ((old_limit <= LABEL_IPV4_EXPLICIT_NULL) &&
+           (limit > LABEL_IPV4_EXPLICIT_NULL)) {
+               RCU_INIT_POINTER(labels[LABEL_IPV4_EXPLICIT_NULL], rt0);
+               rt0 = NULL;
+       }
+
+       /* Update the global pointers */
+       net->mpls.platform_labels = limit;
+       rcu_assign_pointer(net->mpls.platform_label, labels);
+
+       rtnl_unlock();
+
+       mpls_rt_free(rt2);
+       mpls_rt_free(rt0);
+
+       if (old) {
+               synchronize_rcu();
+               kvfree(old);
+       }
+       return 0;
+
+nort2:
+       mpls_rt_free(rt0);
+nort0:
+       kvfree(labels);
+nolabels:
+       return -ENOMEM;
+}
+
+static int mpls_platform_labels(struct ctl_table *table, int write,
+                               void __user *buffer, size_t *lenp, loff_t *ppos)
+{
+       struct net *net = table->data;
+       int platform_labels = net->mpls.platform_labels;
+       int ret;
+       struct ctl_table tmp = {
+               .procname       = table->procname,
+               .data           = &platform_labels,
+               .maxlen         = sizeof(int),
+               .mode           = table->mode,
+               .extra1         = &zero,
+               .extra2         = &label_limit,
+       };
+
+       ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
+
+       if (write && ret == 0)
+               ret = resize_platform_label_table(net, platform_labels);
+
+       return ret;
+}
+
+static struct ctl_table mpls_table[] = {
+       {
+               .procname       = "platform_labels",
+               .data           = NULL,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = mpls_platform_labels,
+       },
+       { }
+};
+
+static int mpls_net_init(struct net *net)
+{
+       struct ctl_table *table;
+
+       net->mpls.platform_labels = 0;
+       net->mpls.platform_label = NULL;
+
+       table = kmemdup(mpls_table, sizeof(mpls_table), GFP_KERNEL);
+       if (table == NULL)
+               return -ENOMEM;
+
+       table[0].data = net;
+       net->mpls.ctl = register_net_sysctl(net, "net/mpls", table);
+       if (net->mpls.ctl == NULL)
+               return -ENOMEM;
+
+       return 0;
+}
+
+static void mpls_net_exit(struct net *net)
+{
+       struct mpls_route __rcu **platform_label;
+       size_t platform_labels;
+       struct ctl_table *table;
+       unsigned int index;
+
+       table = net->mpls.ctl->ctl_table_arg;
+       unregister_net_sysctl_table(net->mpls.ctl);
+       kfree(table);
+
+       /* An rcu grace period has passed since there was a device in
+        * the network namespace (and thus the last in flight packet)
+        * left this network namespace.  This is because
+        * unregister_netdevice_many and netdev_run_todo has completed
+        * for each network device that was in this network namespace.
+        *
+        * As such no additional rcu synchronization is necessary when
+        * freeing the platform_label table.
+        */
+       rtnl_lock();
+       platform_label = rtnl_dereference(net->mpls.platform_label);
+       platform_labels = net->mpls.platform_labels;
+       for (index = 0; index < platform_labels; index++) {
+               struct mpls_route *rt = rtnl_dereference(platform_label[index]);
+               RCU_INIT_POINTER(platform_label[index], NULL);
+               mpls_rt_free(rt);
+       }
+       rtnl_unlock();
+
+       kvfree(platform_label);
+}
+
+static struct pernet_operations mpls_net_ops = {
+       .init = mpls_net_init,
+       .exit = mpls_net_exit,
+};
+
+static int __init mpls_init(void)
+{
+       int err;
+
+       BUILD_BUG_ON(sizeof(struct mpls_shim_hdr) != 4);
+
+       err = register_pernet_subsys(&mpls_net_ops);
+       if (err)
+               goto out;
+
+       err = register_netdevice_notifier(&mpls_dev_notifier);
+       if (err)
+               goto out_unregister_pernet;
+
+       dev_add_pack(&mpls_packet_type);
+
+       rtnl_register(PF_MPLS, RTM_NEWROUTE, mpls_rtm_newroute, NULL, NULL);
+       rtnl_register(PF_MPLS, RTM_DELROUTE, mpls_rtm_delroute, NULL, NULL);
+       rtnl_register(PF_MPLS, RTM_GETROUTE, NULL, mpls_dump_routes, NULL);
+       err = 0;
+out:
+       return err;
+
+out_unregister_pernet:
+       unregister_pernet_subsys(&mpls_net_ops);
+       goto out;
+}
+module_init(mpls_init);
+
+static void __exit mpls_exit(void)
+{
+       rtnl_unregister_all(PF_MPLS);
+       dev_remove_pack(&mpls_packet_type);
+       unregister_netdevice_notifier(&mpls_dev_notifier);
+       unregister_pernet_subsys(&mpls_net_ops);
+}
+module_exit(mpls_exit);
+
+MODULE_DESCRIPTION("MultiProtocol Label Switching");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS_NETPROTO(PF_MPLS);
diff --git a/net/mpls/internal.h b/net/mpls/internal.h
new file mode 100644 (file)
index 0000000..fb6de92
--- /dev/null
@@ -0,0 +1,59 @@
+#ifndef MPLS_INTERNAL_H
+#define MPLS_INTERNAL_H
+
+#define LABEL_IPV4_EXPLICIT_NULL       0 /* RFC3032 */
+#define LABEL_ROUTER_ALERT_LABEL       1 /* RFC3032 */
+#define LABEL_IPV6_EXPLICIT_NULL       2 /* RFC3032 */
+#define LABEL_IMPLICIT_NULL            3 /* RFC3032 */
+#define LABEL_ENTROPY_INDICATOR                7 /* RFC6790 */
+#define LABEL_GAL                      13 /* RFC5586 */
+#define LABEL_OAM_ALERT                        14 /* RFC3429 */
+#define LABEL_EXTENSION                        15 /* RFC7274 */
+
+
+struct mpls_shim_hdr {
+       __be32 label_stack_entry;
+};
+
+struct mpls_entry_decoded {
+       u32 label;
+       u8 ttl;
+       u8 tc;
+       u8 bos;
+};
+
+struct sk_buff;
+
+static inline struct mpls_shim_hdr *mpls_hdr(const struct sk_buff *skb)
+{
+       return (struct mpls_shim_hdr *)skb_network_header(skb);
+}
+
+static inline struct mpls_shim_hdr mpls_entry_encode(u32 label, unsigned ttl, unsigned tc, bool bos)
+{
+       struct mpls_shim_hdr result;
+       result.label_stack_entry =
+               cpu_to_be32((label << MPLS_LS_LABEL_SHIFT) |
+                           (tc << MPLS_LS_TC_SHIFT) |
+                           (bos ? (1 << MPLS_LS_S_SHIFT) : 0) |
+                           (ttl << MPLS_LS_TTL_SHIFT));
+       return result;
+}
+
+static inline struct mpls_entry_decoded mpls_entry_decode(struct mpls_shim_hdr *hdr)
+{
+       struct mpls_entry_decoded result;
+       unsigned entry = be32_to_cpu(hdr->label_stack_entry);
+
+       result.label = (entry & MPLS_LS_LABEL_MASK) >> MPLS_LS_LABEL_SHIFT;
+       result.ttl = (entry & MPLS_LS_TTL_MASK) >> MPLS_LS_TTL_SHIFT;
+       result.tc =  (entry & MPLS_LS_TC_MASK) >> MPLS_LS_TC_SHIFT;
+       result.bos = (entry & MPLS_LS_S_MASK) >> MPLS_LS_S_SHIFT;
+
+       return result;
+}
+
+int nla_put_labels(struct sk_buff *skb, int attrtype,  u8 labels, const u32 label[]);
+int nla_get_labels(const struct nlattr *nla, u32 max_labels, u32 *labels, u32 label[]);
+
+#endif /* MPLS_INTERNAL_H */
index b02660fa9eb00cd28aeb01f10b76f6604eae8096..f70e34a68f702ab39c43e27d4b8e8127b49525f6 100644 (file)
@@ -438,8 +438,10 @@ config NF_TABLES
 
          To compile it as a module, choose M here.
 
+if NF_TABLES
+
 config NF_TABLES_INET
-       depends on NF_TABLES && IPV6
+       depends on IPV6
        select NF_TABLES_IPV4
        select NF_TABLES_IPV6
        tristate "Netfilter nf_tables mixed IPv4/IPv6 tables support"
@@ -447,21 +449,18 @@ config NF_TABLES_INET
          This option enables support for a mixed IPv4/IPv6 "inet" table.
 
 config NFT_EXTHDR
-       depends on NF_TABLES
        tristate "Netfilter nf_tables IPv6 exthdr module"
        help
          This option adds the "exthdr" expression that you can use to match
          IPv6 extension headers.
 
 config NFT_META
-       depends on NF_TABLES
        tristate "Netfilter nf_tables meta module"
        help
          This option adds the "meta" expression that you can use to match and
          to set packet metainformation such as the packet mark.
 
 config NFT_CT
-       depends on NF_TABLES
        depends on NF_CONNTRACK
        tristate "Netfilter nf_tables conntrack module"
        help
@@ -469,42 +468,36 @@ config NFT_CT
          connection tracking information such as the flow state.
 
 config NFT_RBTREE
-       depends on NF_TABLES
        tristate "Netfilter nf_tables rbtree set module"
        help
          This option adds the "rbtree" set type (Red Black tree) that is used
          to build interval-based sets.
 
 config NFT_HASH
-       depends on NF_TABLES
        tristate "Netfilter nf_tables hash set module"
        help
          This option adds the "hash" set type that is used to build one-way
          mappings between matchings and actions.
 
 config NFT_COUNTER
-       depends on NF_TABLES
        tristate "Netfilter nf_tables counter module"
        help
          This option adds the "counter" expression that you can use to
          include packet and byte counters in a rule.
 
 config NFT_LOG
-       depends on NF_TABLES
        tristate "Netfilter nf_tables log module"
        help
          This option adds the "log" expression that you can use to log
          packets matching some criteria.
 
 config NFT_LIMIT
-       depends on NF_TABLES
        tristate "Netfilter nf_tables limit module"
        help
          This option adds the "limit" expression that you can use to
          ratelimit rule matchings.
 
 config NFT_MASQ
-       depends on NF_TABLES
        depends on NF_CONNTRACK
        depends on NF_NAT
        tristate "Netfilter nf_tables masquerade support"
@@ -513,7 +506,6 @@ config NFT_MASQ
          to perform NAT in the masquerade flavour.
 
 config NFT_REDIR
-       depends on NF_TABLES
        depends on NF_CONNTRACK
        depends on NF_NAT
        tristate "Netfilter nf_tables redirect support"
@@ -522,7 +514,6 @@ config NFT_REDIR
          to perform NAT in the redirect flavour.
 
 config NFT_NAT
-       depends on NF_TABLES
        depends on NF_CONNTRACK
        select NF_NAT
        tristate "Netfilter nf_tables nat module"
@@ -531,8 +522,6 @@ config NFT_NAT
          typical Network Address Translation (NAT) packet transformations.
 
 config NFT_QUEUE
-       depends on NF_TABLES
-       depends on NETFILTER_XTABLES
        depends on NETFILTER_NETLINK_QUEUE
        tristate "Netfilter nf_tables queue module"
        help
@@ -540,7 +529,6 @@ config NFT_QUEUE
          infrastructure (also known as NFQUEUE) from nftables.
 
 config NFT_REJECT
-       depends on NF_TABLES
        default m if NETFILTER_ADVANCED=n
        tristate "Netfilter nf_tables reject support"
        help
@@ -554,7 +542,6 @@ config NFT_REJECT_INET
        tristate
 
 config NFT_COMPAT
-       depends on NF_TABLES
        depends on NETFILTER_XTABLES
        tristate "Netfilter x_tables over nf_tables module"
        help
@@ -562,6 +549,8 @@ config NFT_COMPAT
          x_tables match/target extensions over the nf_tables
          framework.
 
+endif # NF_TABLES
+
 config NETFILTER_XTABLES
        tristate "Netfilter Xtables support (required for ip_tables)"
        default m if NETFILTER_ADVANCED=n
@@ -951,7 +940,7 @@ comment "Xtables matches"
 
 config NETFILTER_XT_MATCH_ADDRTYPE
        tristate '"addrtype" address type match support'
-       depends on NETFILTER_ADVANCED
+       default m if NETFILTER_ADVANCED=n
        ---help---
          This option allows you to match what routing thinks of an address,
          eg. UNICAST, LOCAL, BROADCAST, ...
index fea9ef566427f9295adac5c05ef020463af046cd..e6163017c42db2a1d553bc7b8ac812e5c401fcba 100644 (file)
@@ -120,12 +120,8 @@ EXPORT_SYMBOL(nf_unregister_hooks);
 
 unsigned int nf_iterate(struct list_head *head,
                        struct sk_buff *skb,
-                       unsigned int hook,
-                       const struct net_device *indev,
-                       const struct net_device *outdev,
-                       struct nf_hook_ops **elemp,
-                       int (*okfn)(struct sk_buff *),
-                       int hook_thresh)
+                       struct nf_hook_state *state,
+                       struct nf_hook_ops **elemp)
 {
        unsigned int verdict;
 
@@ -134,19 +130,19 @@ unsigned int nf_iterate(struct list_head *head,
         * function because of risk of continuing from deleted element.
         */
        list_for_each_entry_continue_rcu((*elemp), head, list) {
-               if (hook_thresh > (*elemp)->priority)
+               if (state->thresh > (*elemp)->priority)
                        continue;
 
                /* Optimization: we don't need to hold module
                   reference here, since function can't sleep. --RR */
 repeat:
-               verdict = (*elemp)->hook(*elemp, skb, indev, outdev, okfn);
+               verdict = (*elemp)->hook(*elemp, skb, state);
                if (verdict != NF_ACCEPT) {
 #ifdef CONFIG_NETFILTER_DEBUG
                        if (unlikely((verdict & NF_VERDICT_MASK)
                                                        > NF_MAX_VERDICT)) {
                                NFDEBUG("Evil return from %p(%u).\n",
-                                       (*elemp)->hook, hook);
+                                       (*elemp)->hook, state->hook);
                                continue;
                        }
 #endif
@@ -161,11 +157,7 @@ repeat:
 
 /* Returns 1 if okfn() needs to be executed by the caller,
  * -EPERM for NF_DROP, 0 otherwise. */
-int nf_hook_slow(u_int8_t pf, unsigned int hook, struct sk_buff *skb,
-                struct net_device *indev,
-                struct net_device *outdev,
-                int (*okfn)(struct sk_buff *),
-                int hook_thresh)
+int nf_hook_slow(struct sk_buff *skb, struct nf_hook_state *state)
 {
        struct nf_hook_ops *elem;
        unsigned int verdict;
@@ -174,10 +166,11 @@ int nf_hook_slow(u_int8_t pf, unsigned int hook, struct sk_buff *skb,
        /* We may already have this, but read-locks nest anyway */
        rcu_read_lock();
 
-       elem = list_entry_rcu(&nf_hooks[pf][hook], struct nf_hook_ops, list);
+       elem = list_entry_rcu(&nf_hooks[state->pf][state->hook],
+                             struct nf_hook_ops, list);
 next_hook:
-       verdict = nf_iterate(&nf_hooks[pf][hook], skb, hook, indev,
-                            outdev, &elem, okfn, hook_thresh);
+       verdict = nf_iterate(&nf_hooks[state->pf][state->hook], skb, state,
+                            &elem);
        if (verdict == NF_ACCEPT || verdict == NF_STOP) {
                ret = 1;
        } else if ((verdict & NF_VERDICT_MASK) == NF_DROP) {
@@ -186,8 +179,8 @@ next_hook:
                if (ret == 0)
                        ret = -EPERM;
        } else if ((verdict & NF_VERDICT_MASK) == NF_QUEUE) {
-               int err = nf_queue(skb, elem, pf, hook, indev, outdev, okfn,
-                                               verdict >> NF_VERDICT_QBITS);
+               int err = nf_queue(skb, elem, state,
+                                  verdict >> NF_VERDICT_QBITS);
                if (err < 0) {
                        if (err == -ECANCELED)
                                goto next_hook;
index b87ca32efa0b4e6edc7f251c2c32c4ba3b55659c..5d2b806a862e6834ff6c61aee5c0e0a899bbe4b8 100644 (file)
@@ -119,24 +119,24 @@ ip_vs_in_stats(struct ip_vs_conn *cp, struct sk_buff *skb)
                struct ip_vs_service *svc;
 
                s = this_cpu_ptr(dest->stats.cpustats);
-               s->ustats.inpkts++;
                u64_stats_update_begin(&s->syncp);
-               s->ustats.inbytes += skb->len;
+               s->cnt.inpkts++;
+               s->cnt.inbytes += skb->len;
                u64_stats_update_end(&s->syncp);
 
                rcu_read_lock();
                svc = rcu_dereference(dest->svc);
                s = this_cpu_ptr(svc->stats.cpustats);
-               s->ustats.inpkts++;
                u64_stats_update_begin(&s->syncp);
-               s->ustats.inbytes += skb->len;
+               s->cnt.inpkts++;
+               s->cnt.inbytes += skb->len;
                u64_stats_update_end(&s->syncp);
                rcu_read_unlock();
 
                s = this_cpu_ptr(ipvs->tot_stats.cpustats);
-               s->ustats.inpkts++;
                u64_stats_update_begin(&s->syncp);
-               s->ustats.inbytes += skb->len;
+               s->cnt.inpkts++;
+               s->cnt.inbytes += skb->len;
                u64_stats_update_end(&s->syncp);
        }
 }
@@ -153,24 +153,24 @@ ip_vs_out_stats(struct ip_vs_conn *cp, struct sk_buff *skb)
                struct ip_vs_service *svc;
 
                s = this_cpu_ptr(dest->stats.cpustats);
-               s->ustats.outpkts++;
                u64_stats_update_begin(&s->syncp);
-               s->ustats.outbytes += skb->len;
+               s->cnt.outpkts++;
+               s->cnt.outbytes += skb->len;
                u64_stats_update_end(&s->syncp);
 
                rcu_read_lock();
                svc = rcu_dereference(dest->svc);
                s = this_cpu_ptr(svc->stats.cpustats);
-               s->ustats.outpkts++;
                u64_stats_update_begin(&s->syncp);
-               s->ustats.outbytes += skb->len;
+               s->cnt.outpkts++;
+               s->cnt.outbytes += skb->len;
                u64_stats_update_end(&s->syncp);
                rcu_read_unlock();
 
                s = this_cpu_ptr(ipvs->tot_stats.cpustats);
-               s->ustats.outpkts++;
                u64_stats_update_begin(&s->syncp);
-               s->ustats.outbytes += skb->len;
+               s->cnt.outpkts++;
+               s->cnt.outbytes += skb->len;
                u64_stats_update_end(&s->syncp);
        }
 }
@@ -183,13 +183,19 @@ ip_vs_conn_stats(struct ip_vs_conn *cp, struct ip_vs_service *svc)
        struct ip_vs_cpu_stats *s;
 
        s = this_cpu_ptr(cp->dest->stats.cpustats);
-       s->ustats.conns++;
+       u64_stats_update_begin(&s->syncp);
+       s->cnt.conns++;
+       u64_stats_update_end(&s->syncp);
 
        s = this_cpu_ptr(svc->stats.cpustats);
-       s->ustats.conns++;
+       u64_stats_update_begin(&s->syncp);
+       s->cnt.conns++;
+       u64_stats_update_end(&s->syncp);
 
        s = this_cpu_ptr(ipvs->tot_stats.cpustats);
-       s->ustats.conns++;
+       u64_stats_update_begin(&s->syncp);
+       s->cnt.conns++;
+       u64_stats_update_end(&s->syncp);
 }
 
 
@@ -1046,6 +1052,26 @@ static inline bool is_new_conn(const struct sk_buff *skb,
        }
 }
 
+static inline bool is_new_conn_expected(const struct ip_vs_conn *cp,
+                                       int conn_reuse_mode)
+{
+       /* Controlled (FTP DATA or persistence)? */
+       if (cp->control)
+               return false;
+
+       switch (cp->protocol) {
+       case IPPROTO_TCP:
+               return (cp->state == IP_VS_TCP_S_TIME_WAIT) ||
+                       ((conn_reuse_mode & 2) &&
+                        (cp->state == IP_VS_TCP_S_FIN_WAIT) &&
+                        (cp->flags & IP_VS_CONN_F_NOOUTPUT));
+       case IPPROTO_SCTP:
+               return cp->state == IP_VS_SCTP_S_CLOSED;
+       default:
+               return false;
+       }
+}
+
 /* Handle response packets: rewrite addresses and send away...
  */
 static unsigned int
@@ -1246,8 +1272,7 @@ ip_vs_out(unsigned int hooknum, struct sk_buff *skb, int af)
  */
 static unsigned int
 ip_vs_reply4(const struct nf_hook_ops *ops, struct sk_buff *skb,
-            const struct net_device *in, const struct net_device *out,
-            int (*okfn)(struct sk_buff *))
+            const struct nf_hook_state *state)
 {
        return ip_vs_out(ops->hooknum, skb, AF_INET);
 }
@@ -1258,8 +1283,7 @@ ip_vs_reply4(const struct nf_hook_ops *ops, struct sk_buff *skb,
  */
 static unsigned int
 ip_vs_local_reply4(const struct nf_hook_ops *ops, struct sk_buff *skb,
-                  const struct net_device *in, const struct net_device *out,
-                  int (*okfn)(struct sk_buff *))
+                  const struct nf_hook_state *state)
 {
        return ip_vs_out(ops->hooknum, skb, AF_INET);
 }
@@ -1273,8 +1297,7 @@ ip_vs_local_reply4(const struct nf_hook_ops *ops, struct sk_buff *skb,
  */
 static unsigned int
 ip_vs_reply6(const struct nf_hook_ops *ops, struct sk_buff *skb,
-            const struct net_device *in, const struct net_device *out,
-            int (*okfn)(struct sk_buff *))
+            const struct nf_hook_state *state)
 {
        return ip_vs_out(ops->hooknum, skb, AF_INET6);
 }
@@ -1285,8 +1308,7 @@ ip_vs_reply6(const struct nf_hook_ops *ops, struct sk_buff *skb,
  */
 static unsigned int
 ip_vs_local_reply6(const struct nf_hook_ops *ops, struct sk_buff *skb,
-                  const struct net_device *in, const struct net_device *out,
-                  int (*okfn)(struct sk_buff *))
+                  const struct nf_hook_state *state)
 {
        return ip_vs_out(ops->hooknum, skb, AF_INET6);
 }
@@ -1585,6 +1607,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
        struct ip_vs_conn *cp;
        int ret, pkts;
        struct netns_ipvs *ipvs;
+       int conn_reuse_mode;
 
        /* Already marked as IPVS request or reply? */
        if (skb->ipvs_property)
@@ -1653,10 +1676,14 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
         */
        cp = pp->conn_in_get(af, skb, &iph, 0);
 
-       if (unlikely(sysctl_expire_nodest_conn(ipvs)) && cp && cp->dest &&
-           unlikely(!atomic_read(&cp->dest->weight)) && !iph.fragoffs &&
-           is_new_conn(skb, &iph)) {
-               ip_vs_conn_expire_now(cp);
+       conn_reuse_mode = sysctl_conn_reuse_mode(ipvs);
+       if (conn_reuse_mode && !iph.fragoffs &&
+           is_new_conn(skb, &iph) && cp &&
+           ((unlikely(sysctl_expire_nodest_conn(ipvs)) && cp->dest &&
+             unlikely(!atomic_read(&cp->dest->weight))) ||
+            unlikely(is_new_conn_expected(cp, conn_reuse_mode)))) {
+               if (!atomic_read(&cp->n_control))
+                       ip_vs_conn_expire_now(cp);
                __ip_vs_conn_put(cp);
                cp = NULL;
        }
@@ -1738,9 +1765,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
  */
 static unsigned int
 ip_vs_remote_request4(const struct nf_hook_ops *ops, struct sk_buff *skb,
-                     const struct net_device *in,
-                     const struct net_device *out,
-                     int (*okfn)(struct sk_buff *))
+                     const struct nf_hook_state *state)
 {
        return ip_vs_in(ops->hooknum, skb, AF_INET);
 }
@@ -1751,8 +1776,7 @@ ip_vs_remote_request4(const struct nf_hook_ops *ops, struct sk_buff *skb,
  */
 static unsigned int
 ip_vs_local_request4(const struct nf_hook_ops *ops, struct sk_buff *skb,
-                    const struct net_device *in, const struct net_device *out,
-                    int (*okfn)(struct sk_buff *))
+                    const struct nf_hook_state *state)
 {
        return ip_vs_in(ops->hooknum, skb, AF_INET);
 }
@@ -1765,9 +1789,7 @@ ip_vs_local_request4(const struct nf_hook_ops *ops, struct sk_buff *skb,
  */
 static unsigned int
 ip_vs_remote_request6(const struct nf_hook_ops *ops, struct sk_buff *skb,
-                     const struct net_device *in,
-                     const struct net_device *out,
-                     int (*okfn)(struct sk_buff *))
+                     const struct nf_hook_state *state)
 {
        return ip_vs_in(ops->hooknum, skb, AF_INET6);
 }
@@ -1778,8 +1800,7 @@ ip_vs_remote_request6(const struct nf_hook_ops *ops, struct sk_buff *skb,
  */
 static unsigned int
 ip_vs_local_request6(const struct nf_hook_ops *ops, struct sk_buff *skb,
-                    const struct net_device *in, const struct net_device *out,
-                    int (*okfn)(struct sk_buff *))
+                    const struct nf_hook_state *state)
 {
        return ip_vs_in(ops->hooknum, skb, AF_INET6);
 }
@@ -1798,8 +1819,7 @@ ip_vs_local_request6(const struct nf_hook_ops *ops, struct sk_buff *skb,
  */
 static unsigned int
 ip_vs_forward_icmp(const struct nf_hook_ops *ops, struct sk_buff *skb,
-                  const struct net_device *in, const struct net_device *out,
-                  int (*okfn)(struct sk_buff *))
+                  const struct nf_hook_state *state)
 {
        int r;
        struct net *net;
@@ -1820,8 +1840,7 @@ ip_vs_forward_icmp(const struct nf_hook_ops *ops, struct sk_buff *skb,
 #ifdef CONFIG_IP_VS_IPV6
 static unsigned int
 ip_vs_forward_icmp_v6(const struct nf_hook_ops *ops, struct sk_buff *skb,
-                     const struct net_device *in, const struct net_device *out,
-                     int (*okfn)(struct sk_buff *))
+                     const struct nf_hook_state *state)
 {
        int r;
        struct net *net;
index ed99448671c3003374fc947bee6e91ab0f0d3fce..49532672f66dad0c3bae1b923993b0d1f518b25b 100644 (file)
@@ -729,9 +729,9 @@ static void ip_vs_trash_cleanup(struct net *net)
 }
 
 static void
-ip_vs_copy_stats(struct ip_vs_stats_user *dst, struct ip_vs_stats *src)
+ip_vs_copy_stats(struct ip_vs_kstats *dst, struct ip_vs_stats *src)
 {
-#define IP_VS_SHOW_STATS_COUNTER(c) dst->c = src->ustats.c - src->ustats0.c
+#define IP_VS_SHOW_STATS_COUNTER(c) dst->c = src->kstats.c - src->kstats0.c
 
        spin_lock_bh(&src->lock);
 
@@ -746,6 +746,21 @@ ip_vs_copy_stats(struct ip_vs_stats_user *dst, struct ip_vs_stats *src)
        spin_unlock_bh(&src->lock);
 }
 
+static void
+ip_vs_export_stats_user(struct ip_vs_stats_user *dst, struct ip_vs_kstats *src)
+{
+       dst->conns = (u32)src->conns;
+       dst->inpkts = (u32)src->inpkts;
+       dst->outpkts = (u32)src->outpkts;
+       dst->inbytes = src->inbytes;
+       dst->outbytes = src->outbytes;
+       dst->cps = (u32)src->cps;
+       dst->inpps = (u32)src->inpps;
+       dst->outpps = (u32)src->outpps;
+       dst->inbps = (u32)src->inbps;
+       dst->outbps = (u32)src->outbps;
+}
+
 static void
 ip_vs_zero_stats(struct ip_vs_stats *stats)
 {
@@ -753,7 +768,7 @@ ip_vs_zero_stats(struct ip_vs_stats *stats)
 
        /* get current counters as zero point, rates are zeroed */
 
-#define IP_VS_ZERO_STATS_COUNTER(c) stats->ustats0.c = stats->ustats.c
+#define IP_VS_ZERO_STATS_COUNTER(c) stats->kstats0.c = stats->kstats.c
 
        IP_VS_ZERO_STATS_COUNTER(conns);
        IP_VS_ZERO_STATS_COUNTER(inpkts);
@@ -1808,6 +1823,12 @@ static struct ctl_table vs_vars[] = {
                .mode           = 0644,
                .proc_handler   = proc_dointvec,
        },
+       {
+               .procname       = "conn_reuse_mode",
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec,
+       },
 #ifdef CONFIG_IP_VS_DEBUG
        {
                .procname       = "debug_level",
@@ -2044,7 +2065,7 @@ static const struct file_operations ip_vs_info_fops = {
 static int ip_vs_stats_show(struct seq_file *seq, void *v)
 {
        struct net *net = seq_file_single_net(seq);
-       struct ip_vs_stats_user show;
+       struct ip_vs_kstats show;
 
 /*               01234567 01234567 01234567 0123456701234567 0123456701234567 */
        seq_puts(seq,
@@ -2053,17 +2074,22 @@ static int ip_vs_stats_show(struct seq_file *seq, void *v)
                   "   Conns  Packets  Packets            Bytes            Bytes\n");
 
        ip_vs_copy_stats(&show, &net_ipvs(net)->tot_stats);
-       seq_printf(seq, "%8X %8X %8X %16LX %16LX\n\n", show.conns,
-                  show.inpkts, show.outpkts,
-                  (unsigned long long) show.inbytes,
-                  (unsigned long long) show.outbytes);
-
-/*                 01234567 01234567 01234567 0123456701234567 0123456701234567 */
+       seq_printf(seq, "%8LX %8LX %8LX %16LX %16LX\n\n",
+                  (unsigned long long)show.conns,
+                  (unsigned long long)show.inpkts,
+                  (unsigned long long)show.outpkts,
+                  (unsigned long long)show.inbytes,
+                  (unsigned long long)show.outbytes);
+
+/*                01234567 01234567 01234567 0123456701234567 0123456701234567*/
        seq_puts(seq,
-                  " Conns/s   Pkts/s   Pkts/s          Bytes/s          Bytes/s\n");
-       seq_printf(seq, "%8X %8X %8X %16X %16X\n",
-                       show.cps, show.inpps, show.outpps,
-                       show.inbps, show.outbps);
+                " Conns/s   Pkts/s   Pkts/s          Bytes/s          Bytes/s\n");
+       seq_printf(seq, "%8LX %8LX %8LX %16LX %16LX\n",
+                  (unsigned long long)show.cps,
+                  (unsigned long long)show.inpps,
+                  (unsigned long long)show.outpps,
+                  (unsigned long long)show.inbps,
+                  (unsigned long long)show.outbps);
 
        return 0;
 }
@@ -2086,7 +2112,7 @@ static int ip_vs_stats_percpu_show(struct seq_file *seq, void *v)
        struct net *net = seq_file_single_net(seq);
        struct ip_vs_stats *tot_stats = &net_ipvs(net)->tot_stats;
        struct ip_vs_cpu_stats __percpu *cpustats = tot_stats->cpustats;
-       struct ip_vs_stats_user rates;
+       struct ip_vs_kstats kstats;
        int i;
 
 /*               01234567 01234567 01234567 0123456701234567 0123456701234567 */
@@ -2098,41 +2124,41 @@ static int ip_vs_stats_percpu_show(struct seq_file *seq, void *v)
        for_each_possible_cpu(i) {
                struct ip_vs_cpu_stats *u = per_cpu_ptr(cpustats, i);
                unsigned int start;
-               __u64 inbytes, outbytes;
+               u64 conns, inpkts, outpkts, inbytes, outbytes;
 
                do {
                        start = u64_stats_fetch_begin_irq(&u->syncp);
-                       inbytes = u->ustats.inbytes;
-                       outbytes = u->ustats.outbytes;
+                       conns = u->cnt.conns;
+                       inpkts = u->cnt.inpkts;
+                       outpkts = u->cnt.outpkts;
+                       inbytes = u->cnt.inbytes;
+                       outbytes = u->cnt.outbytes;
                } while (u64_stats_fetch_retry_irq(&u->syncp, start));
 
-               seq_printf(seq, "%3X %8X %8X %8X %16LX %16LX\n",
-                          i, u->ustats.conns, u->ustats.inpkts,
-                          u->ustats.outpkts, (__u64)inbytes,
-                          (__u64)outbytes);
+               seq_printf(seq, "%3X %8LX %8LX %8LX %16LX %16LX\n",
+                          i, (u64)conns, (u64)inpkts,
+                          (u64)outpkts, (u64)inbytes,
+                          (u64)outbytes);
        }
 
-       spin_lock_bh(&tot_stats->lock);
-
-       seq_printf(seq, "  ~ %8X %8X %8X %16LX %16LX\n\n",
-                  tot_stats->ustats.conns, tot_stats->ustats.inpkts,
-                  tot_stats->ustats.outpkts,
-                  (unsigned long long) tot_stats->ustats.inbytes,
-                  (unsigned long long) tot_stats->ustats.outbytes);
-
-       ip_vs_read_estimator(&rates, tot_stats);
+       ip_vs_copy_stats(&kstats, tot_stats);
 
-       spin_unlock_bh(&tot_stats->lock);
+       seq_printf(seq, "  ~ %8LX %8LX %8LX %16LX %16LX\n\n",
+                  (unsigned long long)kstats.conns,
+                  (unsigned long long)kstats.inpkts,
+                  (unsigned long long)kstats.outpkts,
+                  (unsigned long long)kstats.inbytes,
+                  (unsigned long long)kstats.outbytes);
 
-/*                 01234567 01234567 01234567 0123456701234567 0123456701234567 */
+/*                ... 01234567 01234567 01234567 0123456701234567 0123456701234567 */
        seq_puts(seq,
-                  "     Conns/s   Pkts/s   Pkts/s          Bytes/s          Bytes/s\n");
-       seq_printf(seq, "    %8X %8X %8X %16X %16X\n",
-                       rates.cps,
-                       rates.inpps,
-                       rates.outpps,
-                       rates.inbps,
-                       rates.outbps);
+                "     Conns/s   Pkts/s   Pkts/s          Bytes/s          Bytes/s\n");
+       seq_printf(seq, "    %8LX %8LX %8LX %16LX %16LX\n",
+                  kstats.cps,
+                  kstats.inpps,
+                  kstats.outpps,
+                  kstats.inbps,
+                  kstats.outbps);
 
        return 0;
 }
@@ -2400,6 +2426,7 @@ static void
 ip_vs_copy_service(struct ip_vs_service_entry *dst, struct ip_vs_service *src)
 {
        struct ip_vs_scheduler *sched;
+       struct ip_vs_kstats kstats;
 
        sched = rcu_dereference_protected(src->scheduler, 1);
        dst->protocol = src->protocol;
@@ -2411,7 +2438,8 @@ ip_vs_copy_service(struct ip_vs_service_entry *dst, struct ip_vs_service *src)
        dst->timeout = src->timeout / HZ;
        dst->netmask = src->netmask;
        dst->num_dests = src->num_dests;
-       ip_vs_copy_stats(&dst->stats, &src->stats);
+       ip_vs_copy_stats(&kstats, &src->stats);
+       ip_vs_export_stats_user(&dst->stats, &kstats);
 }
 
 static inline int
@@ -2485,6 +2513,7 @@ __ip_vs_get_dest_entries(struct net *net, const struct ip_vs_get_dests *get,
                int count = 0;
                struct ip_vs_dest *dest;
                struct ip_vs_dest_entry entry;
+               struct ip_vs_kstats kstats;
 
                memset(&entry, 0, sizeof(entry));
                list_for_each_entry(dest, &svc->destinations, n_list) {
@@ -2506,7 +2535,8 @@ __ip_vs_get_dest_entries(struct net *net, const struct ip_vs_get_dests *get,
                        entry.activeconns = atomic_read(&dest->activeconns);
                        entry.inactconns = atomic_read(&dest->inactconns);
                        entry.persistconns = atomic_read(&dest->persistconns);
-                       ip_vs_copy_stats(&entry.stats, &dest->stats);
+                       ip_vs_copy_stats(&kstats, &dest->stats);
+                       ip_vs_export_stats_user(&entry.stats, &kstats);
                        if (copy_to_user(&uptr->entrytable[count],
                                         &entry, sizeof(entry))) {
                                ret = -EFAULT;
@@ -2798,25 +2828,51 @@ static const struct nla_policy ip_vs_dest_policy[IPVS_DEST_ATTR_MAX + 1] = {
 };
 
 static int ip_vs_genl_fill_stats(struct sk_buff *skb, int container_type,
-                                struct ip_vs_stats *stats)
+                                struct ip_vs_kstats *kstats)
 {
-       struct ip_vs_stats_user ustats;
        struct nlattr *nl_stats = nla_nest_start(skb, container_type);
+
        if (!nl_stats)
                return -EMSGSIZE;
 
-       ip_vs_copy_stats(&ustats, stats);
-
-       if (nla_put_u32(skb, IPVS_STATS_ATTR_CONNS, ustats.conns) ||
-           nla_put_u32(skb, IPVS_STATS_ATTR_INPKTS, ustats.inpkts) ||
-           nla_put_u32(skb, IPVS_STATS_ATTR_OUTPKTS, ustats.outpkts) ||
-           nla_put_u64(skb, IPVS_STATS_ATTR_INBYTES, ustats.inbytes) ||
-           nla_put_u64(skb, IPVS_STATS_ATTR_OUTBYTES, ustats.outbytes) ||
-           nla_put_u32(skb, IPVS_STATS_ATTR_CPS, ustats.cps) ||
-           nla_put_u32(skb, IPVS_STATS_ATTR_INPPS, ustats.inpps) ||
-           nla_put_u32(skb, IPVS_STATS_ATTR_OUTPPS, ustats.outpps) ||
-           nla_put_u32(skb, IPVS_STATS_ATTR_INBPS, ustats.inbps) ||
-           nla_put_u32(skb, IPVS_STATS_ATTR_OUTBPS, ustats.outbps))
+       if (nla_put_u32(skb, IPVS_STATS_ATTR_CONNS, (u32)kstats->conns) ||
+           nla_put_u32(skb, IPVS_STATS_ATTR_INPKTS, (u32)kstats->inpkts) ||
+           nla_put_u32(skb, IPVS_STATS_ATTR_OUTPKTS, (u32)kstats->outpkts) ||
+           nla_put_u64(skb, IPVS_STATS_ATTR_INBYTES, kstats->inbytes) ||
+           nla_put_u64(skb, IPVS_STATS_ATTR_OUTBYTES, kstats->outbytes) ||
+           nla_put_u32(skb, IPVS_STATS_ATTR_CPS, (u32)kstats->cps) ||
+           nla_put_u32(skb, IPVS_STATS_ATTR_INPPS, (u32)kstats->inpps) ||
+           nla_put_u32(skb, IPVS_STATS_ATTR_OUTPPS, (u32)kstats->outpps) ||
+           nla_put_u32(skb, IPVS_STATS_ATTR_INBPS, (u32)kstats->inbps) ||
+           nla_put_u32(skb, IPVS_STATS_ATTR_OUTBPS, (u32)kstats->outbps))
+               goto nla_put_failure;
+       nla_nest_end(skb, nl_stats);
+
+       return 0;
+
+nla_put_failure:
+       nla_nest_cancel(skb, nl_stats);
+       return -EMSGSIZE;
+}
+
+static int ip_vs_genl_fill_stats64(struct sk_buff *skb, int container_type,
+                                  struct ip_vs_kstats *kstats)
+{
+       struct nlattr *nl_stats = nla_nest_start(skb, container_type);
+
+       if (!nl_stats)
+               return -EMSGSIZE;
+
+       if (nla_put_u64(skb, IPVS_STATS_ATTR_CONNS, kstats->conns) ||
+           nla_put_u64(skb, IPVS_STATS_ATTR_INPKTS, kstats->inpkts) ||
+           nla_put_u64(skb, IPVS_STATS_ATTR_OUTPKTS, kstats->outpkts) ||
+           nla_put_u64(skb, IPVS_STATS_ATTR_INBYTES, kstats->inbytes) ||
+           nla_put_u64(skb, IPVS_STATS_ATTR_OUTBYTES, kstats->outbytes) ||
+           nla_put_u64(skb, IPVS_STATS_ATTR_CPS, kstats->cps) ||
+           nla_put_u64(skb, IPVS_STATS_ATTR_INPPS, kstats->inpps) ||
+           nla_put_u64(skb, IPVS_STATS_ATTR_OUTPPS, kstats->outpps) ||
+           nla_put_u64(skb, IPVS_STATS_ATTR_INBPS, kstats->inbps) ||
+           nla_put_u64(skb, IPVS_STATS_ATTR_OUTBPS, kstats->outbps))
                goto nla_put_failure;
        nla_nest_end(skb, nl_stats);
 
@@ -2835,6 +2891,7 @@ static int ip_vs_genl_fill_service(struct sk_buff *skb,
        struct nlattr *nl_service;
        struct ip_vs_flags flags = { .flags = svc->flags,
                                     .mask = ~0 };
+       struct ip_vs_kstats kstats;
 
        nl_service = nla_nest_start(skb, IPVS_CMD_ATTR_SERVICE);
        if (!nl_service)
@@ -2860,7 +2917,10 @@ static int ip_vs_genl_fill_service(struct sk_buff *skb,
            nla_put_u32(skb, IPVS_SVC_ATTR_TIMEOUT, svc->timeout / HZ) ||
            nla_put_be32(skb, IPVS_SVC_ATTR_NETMASK, svc->netmask))
                goto nla_put_failure;
-       if (ip_vs_genl_fill_stats(skb, IPVS_SVC_ATTR_STATS, &svc->stats))
+       ip_vs_copy_stats(&kstats, &svc->stats);
+       if (ip_vs_genl_fill_stats(skb, IPVS_SVC_ATTR_STATS, &kstats))
+               goto nla_put_failure;
+       if (ip_vs_genl_fill_stats64(skb, IPVS_SVC_ATTR_STATS64, &kstats))
                goto nla_put_failure;
 
        nla_nest_end(skb, nl_service);
@@ -3032,6 +3092,7 @@ static struct ip_vs_service *ip_vs_genl_find_service(struct net *net,
 static int ip_vs_genl_fill_dest(struct sk_buff *skb, struct ip_vs_dest *dest)
 {
        struct nlattr *nl_dest;
+       struct ip_vs_kstats kstats;
 
        nl_dest = nla_nest_start(skb, IPVS_CMD_ATTR_DEST);
        if (!nl_dest)
@@ -3054,7 +3115,10 @@ static int ip_vs_genl_fill_dest(struct sk_buff *skb, struct ip_vs_dest *dest)
                        atomic_read(&dest->persistconns)) ||
            nla_put_u16(skb, IPVS_DEST_ATTR_ADDR_FAMILY, dest->af))
                goto nla_put_failure;
-       if (ip_vs_genl_fill_stats(skb, IPVS_DEST_ATTR_STATS, &dest->stats))
+       ip_vs_copy_stats(&kstats, &dest->stats);
+       if (ip_vs_genl_fill_stats(skb, IPVS_DEST_ATTR_STATS, &kstats))
+               goto nla_put_failure;
+       if (ip_vs_genl_fill_stats64(skb, IPVS_DEST_ATTR_STATS64, &kstats))
                goto nla_put_failure;
 
        nla_nest_end(skb, nl_dest);
@@ -3732,6 +3796,8 @@ static int __net_init ip_vs_control_net_init_sysctl(struct net *net)
        ipvs->sysctl_pmtu_disc = 1;
        tbl[idx++].data = &ipvs->sysctl_pmtu_disc;
        tbl[idx++].data = &ipvs->sysctl_backup_only;
+       ipvs->sysctl_conn_reuse_mode = 1;
+       tbl[idx++].data = &ipvs->sysctl_conn_reuse_mode;
 
 
        ipvs->sysctl_hdr = register_net_sysctl(net, "net/ipv4/vs", tbl);
index 1425e9a924c4f64429637bc49cbde204b0bb1921..ef0eb0a8d552944c6149559848e73c9fdd821700 100644 (file)
 
   NOTES.
 
-  * The stored value for average bps is scaled by 2^5, so that maximal
-    rate is ~2.15Gbits/s, average pps and cps are scaled by 2^10.
+  * Average bps is scaled by 2^5, while average pps and cps are scaled by 2^10.
 
-  * A lot code is taken from net/sched/estimator.c
+  * Netlink users can see 64-bit values but sockopt users are restricted
+    to 32-bit values for conns, packets, bps, cps and pps.
+
+  * A lot of code is taken from net/core/gen_estimator.c
  */
 
 
 /*
  * Make a summary from each cpu
  */
-static void ip_vs_read_cpu_stats(struct ip_vs_stats_user *sum,
+static void ip_vs_read_cpu_stats(struct ip_vs_kstats *sum,
                                 struct ip_vs_cpu_stats __percpu *stats)
 {
        int i;
@@ -64,27 +66,31 @@ static void ip_vs_read_cpu_stats(struct ip_vs_stats_user *sum,
        for_each_possible_cpu(i) {
                struct ip_vs_cpu_stats *s = per_cpu_ptr(stats, i);
                unsigned int start;
-               __u64 inbytes, outbytes;
+               u64 conns, inpkts, outpkts, inbytes, outbytes;
+
                if (add) {
-                       sum->conns += s->ustats.conns;
-                       sum->inpkts += s->ustats.inpkts;
-                       sum->outpkts += s->ustats.outpkts;
                        do {
                                start = u64_stats_fetch_begin(&s->syncp);
-                               inbytes = s->ustats.inbytes;
-                               outbytes = s->ustats.outbytes;
+                               conns = s->cnt.conns;
+                               inpkts = s->cnt.inpkts;
+                               outpkts = s->cnt.outpkts;
+                               inbytes = s->cnt.inbytes;
+                               outbytes = s->cnt.outbytes;
                        } while (u64_stats_fetch_retry(&s->syncp, start));
+                       sum->conns += conns;
+                       sum->inpkts += inpkts;
+                       sum->outpkts += outpkts;
                        sum->inbytes += inbytes;
                        sum->outbytes += outbytes;
                } else {
                        add = true;
-                       sum->conns = s->ustats.conns;
-                       sum->inpkts = s->ustats.inpkts;
-                       sum->outpkts = s->ustats.outpkts;
                        do {
                                start = u64_stats_fetch_begin(&s->syncp);
-                               sum->inbytes = s->ustats.inbytes;
-                               sum->outbytes = s->ustats.outbytes;
+                               sum->conns = s->cnt.conns;
+                               sum->inpkts = s->cnt.inpkts;
+                               sum->outpkts = s->cnt.outpkts;
+                               sum->inbytes = s->cnt.inbytes;
+                               sum->outbytes = s->cnt.outbytes;
                        } while (u64_stats_fetch_retry(&s->syncp, start));
                }
        }
@@ -95,10 +101,7 @@ static void estimation_timer(unsigned long arg)
 {
        struct ip_vs_estimator *e;
        struct ip_vs_stats *s;
-       u32 n_conns;
-       u32 n_inpkts, n_outpkts;
-       u64 n_inbytes, n_outbytes;
-       u32 rate;
+       u64 rate;
        struct net *net = (struct net *)arg;
        struct netns_ipvs *ipvs;
 
@@ -108,33 +111,29 @@ static void estimation_timer(unsigned long arg)
                s = container_of(e, struct ip_vs_stats, est);
 
                spin_lock(&s->lock);
-               ip_vs_read_cpu_stats(&s->ustats, s->cpustats);
-               n_conns = s->ustats.conns;
-               n_inpkts = s->ustats.inpkts;
-               n_outpkts = s->ustats.outpkts;
-               n_inbytes = s->ustats.inbytes;
-               n_outbytes = s->ustats.outbytes;
+               ip_vs_read_cpu_stats(&s->kstats, s->cpustats);
 
                /* scaled by 2^10, but divided 2 seconds */
-               rate = (n_conns - e->last_conns) << 9;
-               e->last_conns = n_conns;
-               e->cps += ((long)rate - (long)e->cps) >> 2;
-
-               rate = (n_inpkts - e->last_inpkts) << 9;
-               e->last_inpkts = n_inpkts;
-               e->inpps += ((long)rate - (long)e->inpps) >> 2;
-
-               rate = (n_outpkts - e->last_outpkts) << 9;
-               e->last_outpkts = n_outpkts;
-               e->outpps += ((long)rate - (long)e->outpps) >> 2;
-
-               rate = (n_inbytes - e->last_inbytes) << 4;
-               e->last_inbytes = n_inbytes;
-               e->inbps += ((long)rate - (long)e->inbps) >> 2;
-
-               rate = (n_outbytes - e->last_outbytes) << 4;
-               e->last_outbytes = n_outbytes;
-               e->outbps += ((long)rate - (long)e->outbps) >> 2;
+               rate = (s->kstats.conns - e->last_conns) << 9;
+               e->last_conns = s->kstats.conns;
+               e->cps += ((s64)rate - (s64)e->cps) >> 2;
+
+               rate = (s->kstats.inpkts - e->last_inpkts) << 9;
+               e->last_inpkts = s->kstats.inpkts;
+               e->inpps += ((s64)rate - (s64)e->inpps) >> 2;
+
+               rate = (s->kstats.outpkts - e->last_outpkts) << 9;
+               e->last_outpkts = s->kstats.outpkts;
+               e->outpps += ((s64)rate - (s64)e->outpps) >> 2;
+
+               /* scaled by 2^5, but divided 2 seconds */
+               rate = (s->kstats.inbytes - e->last_inbytes) << 4;
+               e->last_inbytes = s->kstats.inbytes;
+               e->inbps += ((s64)rate - (s64)e->inbps) >> 2;
+
+               rate = (s->kstats.outbytes - e->last_outbytes) << 4;
+               e->last_outbytes = s->kstats.outbytes;
+               e->outbps += ((s64)rate - (s64)e->outbps) >> 2;
                spin_unlock(&s->lock);
        }
        spin_unlock(&ipvs->est_lock);
@@ -166,14 +165,14 @@ void ip_vs_stop_estimator(struct net *net, struct ip_vs_stats *stats)
 void ip_vs_zero_estimator(struct ip_vs_stats *stats)
 {
        struct ip_vs_estimator *est = &stats->est;
-       struct ip_vs_stats_user *u = &stats->ustats;
+       struct ip_vs_kstats *k = &stats->kstats;
 
        /* reset counters, caller must hold the stats->lock lock */
-       est->last_inbytes = u->inbytes;
-       est->last_outbytes = u->outbytes;
-       est->last_conns = u->conns;
-       est->last_inpkts = u->inpkts;
-       est->last_outpkts = u->outpkts;
+       est->last_inbytes = k->inbytes;
+       est->last_outbytes = k->outbytes;
+       est->last_conns = k->conns;
+       est->last_inpkts = k->inpkts;
+       est->last_outpkts = k->outpkts;
        est->cps = 0;
        est->inpps = 0;
        est->outpps = 0;
@@ -182,8 +181,7 @@ void ip_vs_zero_estimator(struct ip_vs_stats *stats)
 }
 
 /* Get decoded rates */
-void ip_vs_read_estimator(struct ip_vs_stats_user *dst,
-                         struct ip_vs_stats *stats)
+void ip_vs_read_estimator(struct ip_vs_kstats *dst, struct ip_vs_stats *stats)
 {
        struct ip_vs_estimator *e = &stats->est;
 
index d93ceeb3ef04822427004ef0a70549f389d17354..19b9cce6c210c425f3e577a22e0ac8c2e1c71804 100644 (file)
@@ -845,10 +845,27 @@ static void ip_vs_proc_conn(struct net *net, struct ip_vs_conn_param *param,
        struct ip_vs_conn *cp;
        struct netns_ipvs *ipvs = net_ipvs(net);
 
-       if (!(flags & IP_VS_CONN_F_TEMPLATE))
+       if (!(flags & IP_VS_CONN_F_TEMPLATE)) {
                cp = ip_vs_conn_in_get(param);
-       else
+               if (cp && ((cp->dport != dport) ||
+                          !ip_vs_addr_equal(cp->daf, &cp->daddr, daddr))) {
+                       if (!(flags & IP_VS_CONN_F_INACTIVE)) {
+                               ip_vs_conn_expire_now(cp);
+                               __ip_vs_conn_put(cp);
+                               cp = NULL;
+                       } else {
+                               /* This is the expiration message for the
+                                * connection that was already replaced, so we
+                                * just ignore it.
+                                */
+                               __ip_vs_conn_put(cp);
+                               kfree(param->pe_data);
+                               return;
+                       }
+               }
+       } else {
                cp = ip_vs_ct_in_get(param);
+       }
 
        if (cp) {
                /* Free pe_data */
@@ -1388,9 +1405,11 @@ join_mcast_group(struct sock *sk, struct in_addr *addr, char *ifname)
 
        mreq.imr_ifindex = dev->ifindex;
 
+       rtnl_lock();
        lock_sock(sk);
        ret = ip_mc_join_group(sk, &mreq);
        release_sock(sk);
+       rtnl_unlock();
 
        return ret;
 }
index 3aedbda7658a4fbbbd54fcc683a09f089ffe6de5..bf02932b7188d71ea0c0f1ea438c1b33766b7979 100644 (file)
@@ -209,7 +209,7 @@ static inline void maybe_update_pmtu(int skb_af, struct sk_buff *skb, int mtu)
        struct sock *sk = skb->sk;
        struct rtable *ort = skb_rtable(skb);
 
-       if (!skb->dev && sk && sk->sk_state != TCP_TIME_WAIT)
+       if (!skb->dev && sk && sk_fullsock(sk))
                ort->dst.ops->update_pmtu(&ort->dst, sk, NULL, mtu);
 }
 
@@ -924,7 +924,8 @@ int
 ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
                  struct ip_vs_protocol *pp, struct ip_vs_iphdr *ipvsh)
 {
-       struct netns_ipvs *ipvs = net_ipvs(skb_net(skb));
+       struct net *net = skb_net(skb);
+       struct netns_ipvs *ipvs = net_ipvs(net);
        struct rtable *rt;                      /* Route to the other host */
        __be32 saddr;                           /* Source for tunnel */
        struct net_device *tdev;                /* Device to other host */
@@ -991,7 +992,7 @@ ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
        iph->daddr              =       cp->daddr.ip;
        iph->saddr              =       saddr;
        iph->ttl                =       ttl;
-       ip_select_ident(skb, NULL);
+       ip_select_ident(net, skb, NULL);
 
        /* Another hack: avoid icmp_send in ip_fragment */
        skb->ignore_df = 1;
index a4b5e2a435acb4c2fafaf26ccb4fce349a151f9a..45da11afa785b2779857b3786881158c821263b3 100644 (file)
@@ -47,9 +47,11 @@ seq_print_acct(struct seq_file *s, const struct nf_conn *ct, int dir)
                return 0;
 
        counter = acct->counter;
-       return seq_printf(s, "packets=%llu bytes=%llu ",
-                         (unsigned long long)atomic64_read(&counter[dir].packets),
-                         (unsigned long long)atomic64_read(&counter[dir].bytes));
+       seq_printf(s, "packets=%llu bytes=%llu ",
+                  (unsigned long long)atomic64_read(&counter[dir].packets),
+                  (unsigned long long)atomic64_read(&counter[dir].bytes));
+
+       return 0;
 };
 EXPORT_SYMBOL_GPL(seq_print_acct);
 
index b8b95f4027caf8b177d4e5550047fc11e05d8f69..57a26cc90c9fada2be251d6718f8b814e059beea 100644 (file)
@@ -88,7 +88,6 @@ static int amanda_help(struct sk_buff *skb,
                       struct nf_conn *ct,
                       enum ip_conntrack_info ctinfo)
 {
-       struct ts_state ts;
        struct nf_conntrack_expect *exp;
        struct nf_conntrack_tuple *tuple;
        unsigned int dataoff, start, stop, off, i;
@@ -113,23 +112,20 @@ static int amanda_help(struct sk_buff *skb,
                return NF_ACCEPT;
        }
 
-       memset(&ts, 0, sizeof(ts));
        start = skb_find_text(skb, dataoff, skb->len,
-                             search[SEARCH_CONNECT].ts, &ts);
+                             search[SEARCH_CONNECT].ts);
        if (start == UINT_MAX)
                goto out;
        start += dataoff + search[SEARCH_CONNECT].len;
 
-       memset(&ts, 0, sizeof(ts));
        stop = skb_find_text(skb, start, skb->len,
-                            search[SEARCH_NEWLINE].ts, &ts);
+                            search[SEARCH_NEWLINE].ts);
        if (stop == UINT_MAX)
                goto out;
        stop += start;
 
        for (i = SEARCH_DATA; i <= SEARCH_INDEX; i++) {
-               memset(&ts, 0, sizeof(ts));
-               off = skb_find_text(skb, start, stop, search[i].ts, &ts);
+               off = skb_find_text(skb, start, stop, search[i].ts);
                if (off == UINT_MAX)
                        continue;
                off += start + search[i].len;
index 91a1837acd0e8fb981ccea73ae262197afecfb33..7a17070c5dabb979c2cb90b7b62f35a79cdc92c0 100644 (file)
@@ -561,7 +561,9 @@ static int exp_seq_show(struct seq_file *s, void *v)
                                   helper->expect_policy[expect->class].name);
        }
 
-       return seq_putc(s, '\n');
+       seq_putc(s, '\n');
+
+       return 0;
 }
 
 static const struct seq_operations exp_seq_ops = {
index 61a3c927e63cf1c9f0f9b596155e5c8a74bcd71a..ea7f36784b3dae0d34121107607b0ca8763ff411 100644 (file)
 
 /* core.c */
 unsigned int nf_iterate(struct list_head *head, struct sk_buff *skb,
-                       unsigned int hook, const struct net_device *indev,
-                       const struct net_device *outdev,
-                       struct nf_hook_ops **elemp,
-                       int (*okfn)(struct sk_buff *), int hook_thresh);
+                       struct nf_hook_state *state, struct nf_hook_ops **elemp);
 
 /* nf_queue.c */
-int nf_queue(struct sk_buff *skb, struct nf_hook_ops *elem, u_int8_t pf,
-            unsigned int hook, struct net_device *indev,
-            struct net_device *outdev, int (*okfn)(struct sk_buff *),
-            unsigned int queuenum);
+int nf_queue(struct sk_buff *skb, struct nf_hook_ops *elem,
+            struct nf_hook_state *state, unsigned int queuenum);
 int __init netfilter_queue_init(void);
 
 /* nf_log.c */
index a2233e77cf3990d8bbd2bde72747d2005fe2c1d4..2631876ac55be96aeec77ab1d15f5db8c5a80c49 100644 (file)
@@ -133,7 +133,7 @@ EXPORT_SYMBOL_GPL(nf_log_dump_tcp_header);
 
 void nf_log_dump_sk_uid_gid(struct nf_log_buf *m, struct sock *sk)
 {
-       if (!sk || sk->sk_state == TCP_TIME_WAIT)
+       if (!sk || !sk_fullsock(sk))
                return;
 
        read_lock_bh(&sk->sk_callback_lock);
index 4c8b68e5fa164fd71b6f613b66b36d52e6717bf4..d3cd37edca182707fb587c89fd54610ca5c93a8a 100644 (file)
@@ -47,11 +47,13 @@ EXPORT_SYMBOL(nf_unregister_queue_handler);
 
 void nf_queue_entry_release_refs(struct nf_queue_entry *entry)
 {
+       struct nf_hook_state *state = &entry->state;
+
        /* Release those devices we held, or Alexey will kill me. */
-       if (entry->indev)
-               dev_put(entry->indev);
-       if (entry->outdev)
-               dev_put(entry->outdev);
+       if (state->in)
+               dev_put(state->in);
+       if (state->out)
+               dev_put(state->out);
 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
        if (entry->skb->nf_bridge) {
                struct nf_bridge_info *nf_bridge = entry->skb->nf_bridge;
@@ -70,13 +72,15 @@ EXPORT_SYMBOL_GPL(nf_queue_entry_release_refs);
 /* Bump dev refs so they don't vanish while packet is out */
 bool nf_queue_entry_get_refs(struct nf_queue_entry *entry)
 {
+       struct nf_hook_state *state = &entry->state;
+
        if (!try_module_get(entry->elem->owner))
                return false;
 
-       if (entry->indev)
-               dev_hold(entry->indev);
-       if (entry->outdev)
-               dev_hold(entry->outdev);
+       if (state->in)
+               dev_hold(state->in);
+       if (state->out)
+               dev_hold(state->out);
 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
        if (entry->skb->nf_bridge) {
                struct nf_bridge_info *nf_bridge = entry->skb->nf_bridge;
@@ -100,12 +104,9 @@ EXPORT_SYMBOL_GPL(nf_queue_entry_get_refs);
  * through nf_reinject().
  */
 int nf_queue(struct sk_buff *skb,
-                     struct nf_hook_ops *elem,
-                     u_int8_t pf, unsigned int hook,
-                     struct net_device *indev,
-                     struct net_device *outdev,
-                     int (*okfn)(struct sk_buff *),
-                     unsigned int queuenum)
+            struct nf_hook_ops *elem,
+            struct nf_hook_state *state,
+            unsigned int queuenum)
 {
        int status = -ENOENT;
        struct nf_queue_entry *entry = NULL;
@@ -121,7 +122,7 @@ int nf_queue(struct sk_buff *skb,
                goto err_unlock;
        }
 
-       afinfo = nf_get_afinfo(pf);
+       afinfo = nf_get_afinfo(state->pf);
        if (!afinfo)
                goto err_unlock;
 
@@ -134,11 +135,7 @@ int nf_queue(struct sk_buff *skb,
        *entry = (struct nf_queue_entry) {
                .skb    = skb,
                .elem   = elem,
-               .pf     = pf,
-               .hook   = hook,
-               .indev  = indev,
-               .outdev = outdev,
-               .okfn   = okfn,
+               .state  = *state,
                .size   = sizeof(*entry) + afinfo->route_key_size,
        };
 
@@ -184,30 +181,29 @@ void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict)
        }
 
        if (verdict == NF_ACCEPT) {
-               afinfo = nf_get_afinfo(entry->pf);
+               afinfo = nf_get_afinfo(entry->state.pf);
                if (!afinfo || afinfo->reroute(skb, entry) < 0)
                        verdict = NF_DROP;
        }
 
+       entry->state.thresh = INT_MIN;
+
        if (verdict == NF_ACCEPT) {
        next_hook:
-               verdict = nf_iterate(&nf_hooks[entry->pf][entry->hook],
-                                    skb, entry->hook,
-                                    entry->indev, entry->outdev, &elem,
-                                    entry->okfn, INT_MIN);
+               verdict = nf_iterate(&nf_hooks[entry->state.pf][entry->state.hook],
+                                    skb, &entry->state, &elem);
        }
 
        switch (verdict & NF_VERDICT_MASK) {
        case NF_ACCEPT:
        case NF_STOP:
                local_bh_disable();
-               entry->okfn(skb);
+               entry->state.okfn(skb);
                local_bh_enable();
                break;
        case NF_QUEUE:
-               err = nf_queue(skb, elem, entry->pf, entry->hook,
-                               entry->indev, entry->outdev, entry->okfn,
-                               verdict >> NF_VERDICT_QBITS);
+               err = nf_queue(skb, elem, &entry->state,
+                              verdict >> NF_VERDICT_QBITS);
                if (err < 0) {
                        if (err == -ECANCELED)
                                goto next_hook;
index ac1a9528dbf2e4af0d33fec5667369d23ed179e4..5604c2df05d1a40f2e812d18d2a72fd895359100 100644 (file)
@@ -198,36 +198,31 @@ static int nft_delchain(struct nft_ctx *ctx)
 static inline bool
 nft_rule_is_active(struct net *net, const struct nft_rule *rule)
 {
-       return (rule->genmask & (1 << net->nft.gencursor)) == 0;
-}
-
-static inline int gencursor_next(struct net *net)
-{
-       return net->nft.gencursor+1 == 1 ? 1 : 0;
+       return (rule->genmask & nft_genmask_cur(net)) == 0;
 }
 
 static inline int
 nft_rule_is_active_next(struct net *net, const struct nft_rule *rule)
 {
-       return (rule->genmask & (1 << gencursor_next(net))) == 0;
+       return (rule->genmask & nft_genmask_next(net)) == 0;
 }
 
 static inline void
 nft_rule_activate_next(struct net *net, struct nft_rule *rule)
 {
        /* Now inactive, will be active in the future */
-       rule->genmask = (1 << net->nft.gencursor);
+       rule->genmask = nft_genmask_cur(net);
 }
 
 static inline void
 nft_rule_deactivate_next(struct net *net, struct nft_rule *rule)
 {
-       rule->genmask = (1 << gencursor_next(net));
+       rule->genmask = nft_genmask_next(net);
 }
 
 static inline void nft_rule_clear(struct net *net, struct nft_rule *rule)
 {
-       rule->genmask &= ~(1 << gencursor_next(net));
+       rule->genmask &= ~nft_genmask_next(net);
 }
 
 static int
@@ -401,7 +396,8 @@ nf_tables_chain_type_lookup(const struct nft_af_info *afi,
 }
 
 static const struct nla_policy nft_table_policy[NFTA_TABLE_MAX + 1] = {
-       [NFTA_TABLE_NAME]       = { .type = NLA_STRING },
+       [NFTA_TABLE_NAME]       = { .type = NLA_STRING,
+                                   .len = NFT_TABLE_MAXNAMELEN - 1 },
        [NFTA_TABLE_FLAGS]      = { .type = NLA_U32 },
 };
 
@@ -686,26 +682,28 @@ static int nf_tables_newtable(struct sock *nlsk, struct sk_buff *skb,
        if (!try_module_get(afi->owner))
                return -EAFNOSUPPORT;
 
-       table = kzalloc(sizeof(*table) + nla_len(name), GFP_KERNEL);
-       if (table == NULL) {
-               module_put(afi->owner);
-               return -ENOMEM;
-       }
+       err = -ENOMEM;
+       table = kzalloc(sizeof(*table), GFP_KERNEL);
+       if (table == NULL)
+               goto err1;
 
-       nla_strlcpy(table->name, name, nla_len(name));
+       nla_strlcpy(table->name, name, NFT_TABLE_MAXNAMELEN);
        INIT_LIST_HEAD(&table->chains);
        INIT_LIST_HEAD(&table->sets);
        table->flags = flags;
 
        nft_ctx_init(&ctx, skb, nlh, afi, table, NULL, nla);
        err = nft_trans_table_add(&ctx, NFT_MSG_NEWTABLE);
-       if (err < 0) {
-               kfree(table);
-               module_put(afi->owner);
-               return err;
-       }
+       if (err < 0)
+               goto err2;
+
        list_add_tail_rcu(&table->list, &afi->tables);
        return 0;
+err2:
+       kfree(table);
+err1:
+       module_put(afi->owner);
+       return err;
 }
 
 static int nft_flush_table(struct nft_ctx *ctx)
@@ -1351,6 +1349,7 @@ static int nf_tables_newchain(struct sock *nlsk, struct sk_buff *skb,
                        rcu_assign_pointer(basechain->stats, stats);
                }
 
+               write_pnet(&basechain->pnet, net);
                basechain->type = type;
                chain = &basechain->chain;
 
@@ -1378,7 +1377,6 @@ static int nf_tables_newchain(struct sock *nlsk, struct sk_buff *skb,
 
        INIT_LIST_HEAD(&chain->rules);
        chain->handle = nf_tables_alloc_handle(table);
-       chain->net = net;
        chain->table = table;
        nla_strlcpy(chain->name, name, NFT_CHAIN_MAXNAMELEN);
 
@@ -2692,6 +2690,7 @@ static int nf_tables_newset(struct sock *nlsk, struct sk_buff *skb,
                goto err2;
 
        INIT_LIST_HEAD(&set->bindings);
+       write_pnet(&set->pnet, net);
        set->ops   = ops;
        set->ktype = ktype;
        set->klen  = desc.klen;
@@ -2768,10 +2767,11 @@ static int nf_tables_bind_check_setelem(const struct nft_ctx *ctx,
                                        const struct nft_set_iter *iter,
                                        const struct nft_set_elem *elem)
 {
+       const struct nft_set_ext *ext = nft_set_elem_ext(set, elem->priv);
        enum nft_registers dreg;
 
        dreg = nft_type_to_reg(set->dtype);
-       return nft_validate_data_load(ctx, dreg, &elem->data,
+       return nft_validate_data_load(ctx, dreg, nft_set_ext_data(ext),
                                      set->dtype == NFT_DATA_VERDICT ?
                                      NFT_DATA_VERDICT : NFT_DATA_VALUE);
 }
@@ -2824,6 +2824,22 @@ void nf_tables_unbind_set(const struct nft_ctx *ctx, struct nft_set *set,
                nf_tables_set_destroy(ctx, set);
 }
 
+const struct nft_set_ext_type nft_set_ext_types[] = {
+       [NFT_SET_EXT_KEY]               = {
+               .len    = sizeof(struct nft_data),
+               .align  = __alignof__(struct nft_data),
+       },
+       [NFT_SET_EXT_DATA]              = {
+               .len    = sizeof(struct nft_data),
+               .align  = __alignof__(struct nft_data),
+       },
+       [NFT_SET_EXT_FLAGS]             = {
+               .len    = sizeof(u8),
+               .align  = __alignof__(u8),
+       },
+};
+EXPORT_SYMBOL_GPL(nft_set_ext_types);
+
 /*
  * Set elements
  */
@@ -2870,6 +2886,7 @@ static int nf_tables_fill_setelem(struct sk_buff *skb,
                                  const struct nft_set *set,
                                  const struct nft_set_elem *elem)
 {
+       const struct nft_set_ext *ext = nft_set_elem_ext(set, elem->priv);
        unsigned char *b = skb_tail_pointer(skb);
        struct nlattr *nest;
 
@@ -2877,20 +2894,20 @@ static int nf_tables_fill_setelem(struct sk_buff *skb,
        if (nest == NULL)
                goto nla_put_failure;
 
-       if (nft_data_dump(skb, NFTA_SET_ELEM_KEY, &elem->key, NFT_DATA_VALUE,
-                         set->klen) < 0)
+       if (nft_data_dump(skb, NFTA_SET_ELEM_KEY, nft_set_ext_key(ext),
+                         NFT_DATA_VALUE, set->klen) < 0)
                goto nla_put_failure;
 
-       if (set->flags & NFT_SET_MAP &&
-           !(elem->flags & NFT_SET_ELEM_INTERVAL_END) &&
-           nft_data_dump(skb, NFTA_SET_ELEM_DATA, &elem->data,
+       if (nft_set_ext_exists(ext, NFT_SET_EXT_DATA) &&
+           nft_data_dump(skb, NFTA_SET_ELEM_DATA, nft_set_ext_data(ext),
                          set->dtype == NFT_DATA_VERDICT ? NFT_DATA_VERDICT : NFT_DATA_VALUE,
                          set->dlen) < 0)
                goto nla_put_failure;
 
-       if (elem->flags != 0)
-               if (nla_put_be32(skb, NFTA_SET_ELEM_FLAGS, htonl(elem->flags)))
-                       goto nla_put_failure;
+       if (nft_set_ext_exists(ext, NFT_SET_EXT_FLAGS) &&
+           nla_put_be32(skb, NFTA_SET_ELEM_FLAGS,
+                        htonl(*nft_set_ext_flags(ext))))
+               goto nla_put_failure;
 
        nla_nest_end(skb, nest);
        return 0;
@@ -3111,15 +3128,54 @@ static struct nft_trans *nft_trans_elem_alloc(struct nft_ctx *ctx,
        return trans;
 }
 
+static void *nft_set_elem_init(const struct nft_set *set,
+                              const struct nft_set_ext_tmpl *tmpl,
+                              const struct nft_data *key,
+                              const struct nft_data *data,
+                              gfp_t gfp)
+{
+       struct nft_set_ext *ext;
+       void *elem;
+
+       elem = kzalloc(set->ops->elemsize + tmpl->len, gfp);
+       if (elem == NULL)
+               return NULL;
+
+       ext = nft_set_elem_ext(set, elem);
+       nft_set_ext_init(ext, tmpl);
+
+       memcpy(nft_set_ext_key(ext), key, set->klen);
+       if (nft_set_ext_exists(ext, NFT_SET_EXT_DATA))
+               memcpy(nft_set_ext_data(ext), data, set->dlen);
+
+       return elem;
+}
+
+void nft_set_elem_destroy(const struct nft_set *set, void *elem)
+{
+       struct nft_set_ext *ext = nft_set_elem_ext(set, elem);
+
+       nft_data_uninit(nft_set_ext_key(ext), NFT_DATA_VALUE);
+       if (nft_set_ext_exists(ext, NFT_SET_EXT_DATA))
+               nft_data_uninit(nft_set_ext_data(ext), set->dtype);
+
+       kfree(elem);
+}
+EXPORT_SYMBOL_GPL(nft_set_elem_destroy);
+
 static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
                            const struct nlattr *attr)
 {
        struct nlattr *nla[NFTA_SET_ELEM_MAX + 1];
        struct nft_data_desc d1, d2;
+       struct nft_set_ext_tmpl tmpl;
+       struct nft_set_ext *ext;
        struct nft_set_elem elem;
        struct nft_set_binding *binding;
+       struct nft_data data;
        enum nft_registers dreg;
        struct nft_trans *trans;
+       u32 flags;
        int err;
 
        if (set->size && set->nelems == set->size)
@@ -3133,19 +3189,26 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
        if (nla[NFTA_SET_ELEM_KEY] == NULL)
                return -EINVAL;
 
-       elem.flags = 0;
+       nft_set_ext_prepare(&tmpl);
+
+       flags = 0;
        if (nla[NFTA_SET_ELEM_FLAGS] != NULL) {
-               elem.flags = ntohl(nla_get_be32(nla[NFTA_SET_ELEM_FLAGS]));
-               if (elem.flags & ~NFT_SET_ELEM_INTERVAL_END)
+               flags = ntohl(nla_get_be32(nla[NFTA_SET_ELEM_FLAGS]));
+               if (flags & ~NFT_SET_ELEM_INTERVAL_END)
+                       return -EINVAL;
+               if (!(set->flags & NFT_SET_INTERVAL) &&
+                   flags & NFT_SET_ELEM_INTERVAL_END)
                        return -EINVAL;
+               if (flags != 0)
+                       nft_set_ext_add(&tmpl, NFT_SET_EXT_FLAGS);
        }
 
        if (set->flags & NFT_SET_MAP) {
                if (nla[NFTA_SET_ELEM_DATA] == NULL &&
-                   !(elem.flags & NFT_SET_ELEM_INTERVAL_END))
+                   !(flags & NFT_SET_ELEM_INTERVAL_END))
                        return -EINVAL;
                if (nla[NFTA_SET_ELEM_DATA] != NULL &&
-                   elem.flags & NFT_SET_ELEM_INTERVAL_END)
+                   flags & NFT_SET_ELEM_INTERVAL_END)
                        return -EINVAL;
        } else {
                if (nla[NFTA_SET_ELEM_DATA] != NULL)
@@ -3159,12 +3222,10 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
        if (d1.type != NFT_DATA_VALUE || d1.len != set->klen)
                goto err2;
 
-       err = -EEXIST;
-       if (set->ops->get(set, &elem) == 0)
-               goto err2;
+       nft_set_ext_add(&tmpl, NFT_SET_EXT_KEY);
 
        if (nla[NFTA_SET_ELEM_DATA] != NULL) {
-               err = nft_data_init(ctx, &elem.data, &d2, nla[NFTA_SET_ELEM_DATA]);
+               err = nft_data_init(ctx, &data, &d2, nla[NFTA_SET_ELEM_DATA]);
                if (err < 0)
                        goto err2;
 
@@ -3181,29 +3242,43 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
                        };
 
                        err = nft_validate_data_load(&bind_ctx, dreg,
-                                                    &elem.data, d2.type);
+                                                    &data, d2.type);
                        if (err < 0)
                                goto err3;
                }
+
+               nft_set_ext_add(&tmpl, NFT_SET_EXT_DATA);
        }
 
+       err = -ENOMEM;
+       elem.priv = nft_set_elem_init(set, &tmpl, &elem.key, &data, GFP_KERNEL);
+       if (elem.priv == NULL)
+               goto err3;
+
+       ext = nft_set_elem_ext(set, elem.priv);
+       if (flags)
+               *nft_set_ext_flags(ext) = flags;
+
        trans = nft_trans_elem_alloc(ctx, NFT_MSG_NEWSETELEM, set);
        if (trans == NULL)
-               goto err3;
+               goto err4;
 
+       ext->genmask = nft_genmask_cur(ctx->net);
        err = set->ops->insert(set, &elem);
        if (err < 0)
-               goto err4;
+               goto err5;
 
        nft_trans_elem(trans) = elem;
        list_add_tail(&trans->list, &ctx->net->nft.commit_list);
        return 0;
 
-err4:
+err5:
        kfree(trans);
+err4:
+       kfree(elem.priv);
 err3:
        if (nla[NFTA_SET_ELEM_DATA] != NULL)
-               nft_data_uninit(&elem.data, d2.type);
+               nft_data_uninit(&data, d2.type);
 err2:
        nft_data_uninit(&elem.key, d1.type);
 err1:
@@ -3276,19 +3351,24 @@ static int nft_del_setelem(struct nft_ctx *ctx, struct nft_set *set,
        if (desc.type != NFT_DATA_VALUE || desc.len != set->klen)
                goto err2;
 
-       err = set->ops->get(set, &elem);
-       if (err < 0)
-               goto err2;
-
        trans = nft_trans_elem_alloc(ctx, NFT_MSG_DELSETELEM, set);
        if (trans == NULL) {
                err = -ENOMEM;
                goto err2;
        }
 
+       elem.priv = set->ops->deactivate(set, &elem);
+       if (elem.priv == NULL) {
+               err = -ENOENT;
+               goto err3;
+       }
+
        nft_trans_elem(trans) = elem;
        list_add_tail(&trans->list, &ctx->net->nft.commit_list);
        return 0;
+
+err3:
+       kfree(trans);
 err2:
        nft_data_uninit(&elem.key, desc.type);
 err1:
@@ -3526,6 +3606,10 @@ static void nf_tables_commit_release(struct nft_trans *trans)
        case NFT_MSG_DELSET:
                nft_set_destroy(nft_trans_set(trans));
                break;
+       case NFT_MSG_DELSETELEM:
+               nft_set_elem_destroy(nft_trans_elem_set(trans),
+                                    nft_trans_elem(trans).priv);
+               break;
        }
        kfree(trans);
 }
@@ -3540,7 +3624,7 @@ static int nf_tables_commit(struct sk_buff *skb)
        while (++net->nft.base_seq == 0);
 
        /* A new generation has just started */
-       net->nft.gencursor = gencursor_next(net);
+       net->nft.gencursor = nft_gencursor_next(net);
 
        /* Make sure all packets have left the previous generation before
         * purging old rules.
@@ -3611,24 +3695,21 @@ static int nf_tables_commit(struct sk_buff *skb)
                                             NFT_MSG_DELSET, GFP_KERNEL);
                        break;
                case NFT_MSG_NEWSETELEM:
-                       nf_tables_setelem_notify(&trans->ctx,
-                                                nft_trans_elem_set(trans),
-                                                &nft_trans_elem(trans),
+                       te = (struct nft_trans_elem *)trans->data;
+
+                       te->set->ops->activate(te->set, &te->elem);
+                       nf_tables_setelem_notify(&trans->ctx, te->set,
+                                                &te->elem,
                                                 NFT_MSG_NEWSETELEM, 0);
                        nft_trans_destroy(trans);
                        break;
                case NFT_MSG_DELSETELEM:
                        te = (struct nft_trans_elem *)trans->data;
+
                        nf_tables_setelem_notify(&trans->ctx, te->set,
                                                 &te->elem,
                                                 NFT_MSG_DELSETELEM, 0);
-                       te->set->ops->get(te->set, &te->elem);
-                       nft_data_uninit(&te->elem.key, NFT_DATA_VALUE);
-                       if (te->set->flags & NFT_SET_MAP &&
-                           !(te->elem.flags & NFT_SET_ELEM_INTERVAL_END))
-                               nft_data_uninit(&te->elem.data, te->set->dtype);
                        te->set->ops->remove(te->set, &te->elem);
-                       nft_trans_destroy(trans);
                        break;
                }
        }
@@ -3660,6 +3741,10 @@ static void nf_tables_abort_release(struct nft_trans *trans)
        case NFT_MSG_NEWSET:
                nft_set_destroy(nft_trans_set(trans));
                break;
+       case NFT_MSG_NEWSETELEM:
+               nft_set_elem_destroy(nft_trans_elem_set(trans),
+                                    nft_trans_elem(trans).priv);
+               break;
        }
        kfree(trans);
 }
@@ -3730,16 +3815,15 @@ static int nf_tables_abort(struct sk_buff *skb)
                case NFT_MSG_NEWSETELEM:
                        nft_trans_elem_set(trans)->nelems--;
                        te = (struct nft_trans_elem *)trans->data;
-                       te->set->ops->get(te->set, &te->elem);
-                       nft_data_uninit(&te->elem.key, NFT_DATA_VALUE);
-                       if (te->set->flags & NFT_SET_MAP &&
-                           !(te->elem.flags & NFT_SET_ELEM_INTERVAL_END))
-                               nft_data_uninit(&te->elem.data, te->set->dtype);
+
                        te->set->ops->remove(te->set, &te->elem);
-                       nft_trans_destroy(trans);
                        break;
                case NFT_MSG_DELSETELEM:
+                       te = (struct nft_trans_elem *)trans->data;
+
                        nft_trans_elem_set(trans)->nelems++;
+                       te->set->ops->activate(te->set, &te->elem);
+
                        nft_trans_destroy(trans);
                        break;
                }
@@ -3814,13 +3898,18 @@ static int nf_tables_loop_check_setelem(const struct nft_ctx *ctx,
                                        const struct nft_set_iter *iter,
                                        const struct nft_set_elem *elem)
 {
-       if (elem->flags & NFT_SET_ELEM_INTERVAL_END)
+       const struct nft_set_ext *ext = nft_set_elem_ext(set, elem->priv);
+       const struct nft_data *data;
+
+       if (nft_set_ext_exists(ext, NFT_SET_EXT_FLAGS) &&
+           *nft_set_ext_flags(ext) & NFT_SET_ELEM_INTERVAL_END)
                return 0;
 
-       switch (elem->data.verdict) {
+       data = nft_set_ext_data(ext);
+       switch (data->verdict) {
        case NFT_JUMP:
        case NFT_GOTO:
-               return nf_tables_check_loops(ctx, elem->data.chain);
+               return nf_tables_check_loops(ctx, data->chain);
        default:
                return 0;
        }
index 2d298dccb6dd3fc5589be16021c843d741a4c025..ef4dfcbaf149f4c207f0096ceb6b8a6c1aa3d924 100644 (file)
@@ -8,6 +8,7 @@
  * Development of this code funded by Astaro AG (http://www.astaro.com/)
  */
 
+#include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/list.h>
 #include <net/netfilter/nf_tables.h>
 #include <net/netfilter/nf_log.h>
 
+enum nft_trace {
+       NFT_TRACE_RULE,
+       NFT_TRACE_RETURN,
+       NFT_TRACE_POLICY,
+};
+
+static const char *const comments[] = {
+       [NFT_TRACE_RULE]        = "rule",
+       [NFT_TRACE_RETURN]      = "return",
+       [NFT_TRACE_POLICY]      = "policy",
+};
+
+static struct nf_loginfo trace_loginfo = {
+       .type = NF_LOG_TYPE_LOG,
+       .u = {
+               .log = {
+                       .level = LOGLEVEL_WARNING,
+                       .logflags = NF_LOG_MASK,
+               },
+       },
+};
+
+static void __nft_trace_packet(const struct nft_pktinfo *pkt,
+                              const struct nft_chain *chain,
+                              int rulenum, enum nft_trace type)
+{
+       struct net *net = dev_net(pkt->in ? pkt->in : pkt->out);
+
+       nf_log_trace(net, pkt->xt.family, pkt->ops->hooknum, pkt->skb, pkt->in,
+                    pkt->out, &trace_loginfo, "TRACE: %s:%s:%s:%u ",
+                    chain->table->name, chain->name, comments[type],
+                    rulenum);
+}
+
+static inline void nft_trace_packet(const struct nft_pktinfo *pkt,
+                                   const struct nft_chain *chain,
+                                   int rulenum, enum nft_trace type)
+{
+       if (unlikely(pkt->skb->nf_trace))
+               __nft_trace_packet(pkt, chain, rulenum, type);
+}
+
 static void nft_cmp_fast_eval(const struct nft_expr *expr,
                              struct nft_data data[NFT_REG_MAX + 1])
 {
@@ -66,44 +109,11 @@ struct nft_jumpstack {
        int                     rulenum;
 };
 
-enum nft_trace {
-       NFT_TRACE_RULE,
-       NFT_TRACE_RETURN,
-       NFT_TRACE_POLICY,
-};
-
-static const char *const comments[] = {
-       [NFT_TRACE_RULE]        = "rule",
-       [NFT_TRACE_RETURN]      = "return",
-       [NFT_TRACE_POLICY]      = "policy",
-};
-
-static struct nf_loginfo trace_loginfo = {
-       .type = NF_LOG_TYPE_LOG,
-       .u = {
-               .log = {
-                       .level = 4,
-                       .logflags = NF_LOG_MASK,
-               },
-       },
-};
-
-static void nft_trace_packet(const struct nft_pktinfo *pkt,
-                            const struct nft_chain *chain,
-                            int rulenum, enum nft_trace type)
-{
-       struct net *net = dev_net(pkt->in ? pkt->in : pkt->out);
-
-       nf_log_trace(net, pkt->xt.family, pkt->ops->hooknum, pkt->skb, pkt->in,
-                    pkt->out, &trace_loginfo, "TRACE: %s:%s:%s:%u ",
-                    chain->table->name, chain->name, comments[type],
-                    rulenum);
-}
-
 unsigned int
 nft_do_chain(struct nft_pktinfo *pkt, const struct nf_hook_ops *ops)
 {
        const struct nft_chain *chain = ops->priv, *basechain = chain;
+       const struct net *net = read_pnet(&nft_base_chain(basechain)->pnet);
        const struct nft_rule *rule;
        const struct nft_expr *expr, *last;
        struct nft_data data[NFT_REG_MAX + 1];
@@ -111,11 +121,7 @@ nft_do_chain(struct nft_pktinfo *pkt, const struct nf_hook_ops *ops)
        struct nft_jumpstack jumpstack[NFT_JUMP_STACK_SIZE];
        struct nft_stats *stats;
        int rulenum;
-       /*
-        * Cache cursor to avoid problems in case that the cursor is updated
-        * while traversing the ruleset.
-        */
-       unsigned int gencursor = ACCESS_ONCE(chain->net->nft.gencursor);
+       unsigned int gencursor = nft_genmask_cur(net);
 
 do_chain:
        rulenum = 0;
@@ -146,8 +152,7 @@ next_rule:
                        data[NFT_REG_VERDICT].verdict = NFT_CONTINUE;
                        continue;
                case NFT_CONTINUE:
-                       if (unlikely(pkt->skb->nf_trace))
-                               nft_trace_packet(pkt, chain, rulenum, NFT_TRACE_RULE);
+                       nft_trace_packet(pkt, chain, rulenum, NFT_TRACE_RULE);
                        continue;
                }
                break;
@@ -157,37 +162,28 @@ next_rule:
        case NF_ACCEPT:
        case NF_DROP:
        case NF_QUEUE:
-               if (unlikely(pkt->skb->nf_trace))
-                       nft_trace_packet(pkt, chain, rulenum, NFT_TRACE_RULE);
-
+               nft_trace_packet(pkt, chain, rulenum, NFT_TRACE_RULE);
                return data[NFT_REG_VERDICT].verdict;
        }
 
        switch (data[NFT_REG_VERDICT].verdict) {
        case NFT_JUMP:
-               if (unlikely(pkt->skb->nf_trace))
-                       nft_trace_packet(pkt, chain, rulenum, NFT_TRACE_RULE);
-
                BUG_ON(stackptr >= NFT_JUMP_STACK_SIZE);
                jumpstack[stackptr].chain = chain;
                jumpstack[stackptr].rule  = rule;
                jumpstack[stackptr].rulenum = rulenum;
                stackptr++;
-               chain = data[NFT_REG_VERDICT].chain;
-               goto do_chain;
+               /* fall through */
        case NFT_GOTO:
-               if (unlikely(pkt->skb->nf_trace))
-                       nft_trace_packet(pkt, chain, rulenum, NFT_TRACE_RULE);
+               nft_trace_packet(pkt, chain, rulenum, NFT_TRACE_RULE);
 
                chain = data[NFT_REG_VERDICT].chain;
                goto do_chain;
-       case NFT_RETURN:
-               if (unlikely(pkt->skb->nf_trace))
-                       nft_trace_packet(pkt, chain, rulenum, NFT_TRACE_RETURN);
-               break;
        case NFT_CONTINUE:
-               if (unlikely(pkt->skb->nf_trace && !(chain->flags & NFT_BASE_CHAIN)))
-                       nft_trace_packet(pkt, chain, ++rulenum, NFT_TRACE_RETURN);
+               rulenum++;
+               /* fall through */
+       case NFT_RETURN:
+               nft_trace_packet(pkt, chain, rulenum, NFT_TRACE_RETURN);
                break;
        default:
                WARN_ON(1);
@@ -201,8 +197,7 @@ next_rule:
                goto next_rule;
        }
 
-       if (unlikely(pkt->skb->nf_trace))
-               nft_trace_packet(pkt, basechain, -1, NFT_TRACE_POLICY);
+       nft_trace_packet(pkt, basechain, -1, NFT_TRACE_POLICY);
 
        rcu_read_lock_bh();
        stats = this_cpu_ptr(rcu_dereference(nft_base_chain(basechain)->stats));
index 11d85b3813f26ae87f465d7f4d09ae36a6d0167e..957b83a0223b8eef159b572a2b685095a2d3e0ab 100644 (file)
@@ -539,7 +539,7 @@ __build_packet_message(struct nfnl_log_net *log,
 
        /* UID */
        sk = skb->sk;
-       if (sk && sk->sk_state != TCP_TIME_WAIT) {
+       if (sk && sk_fullsock(sk)) {
                read_lock_bh(&sk->sk_callback_lock);
                if (sk->sk_socket && sk->sk_socket->file) {
                        struct file *file = sk->sk_socket->file;
@@ -998,11 +998,13 @@ static int seq_show(struct seq_file *s, void *v)
 {
        const struct nfulnl_instance *inst = v;
 
-       return seq_printf(s, "%5d %6d %5d %1d %5d %6d %2d\n",
-                         inst->group_num,
-                         inst->peer_portid, inst->qlen,
-                         inst->copy_mode, inst->copy_range,
-                         inst->flushtimeout, atomic_read(&inst->use));
+       seq_printf(s, "%5d %6d %5d %1d %5d %6d %2d\n",
+                  inst->group_num,
+                  inst->peer_portid, inst->qlen,
+                  inst->copy_mode, inst->copy_range,
+                  inst->flushtimeout, atomic_read(&inst->use));
+
+       return 0;
 }
 
 static const struct seq_operations nful_seq_ops = {
index 0db8515e76da1f0a293a42078019fccc3bb9a295..6e74655a8d4f153818243e27750c2660e0e52380 100644 (file)
@@ -257,7 +257,7 @@ static int nfqnl_put_sk_uidgid(struct sk_buff *skb, struct sock *sk)
 {
        const struct cred *cred;
 
-       if (sk->sk_state == TCP_TIME_WAIT)
+       if (!sk_fullsock(sk))
                return 0;
 
        read_lock_bh(&sk->sk_callback_lock);
@@ -314,13 +314,13 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue,
        if (entskb->tstamp.tv64)
                size += nla_total_size(sizeof(struct nfqnl_msg_packet_timestamp));
 
-       if (entry->hook <= NF_INET_FORWARD ||
-          (entry->hook == NF_INET_POST_ROUTING && entskb->sk == NULL))
+       if (entry->state.hook <= NF_INET_FORWARD ||
+          (entry->state.hook == NF_INET_POST_ROUTING && entskb->sk == NULL))
                csum_verify = !skb_csum_unnecessary(entskb);
        else
                csum_verify = false;
 
-       outdev = entry->outdev;
+       outdev = entry->state.out;
 
        switch ((enum nfqnl_config_mode)ACCESS_ONCE(queue->copy_mode)) {
        case NFQNL_COPY_META:
@@ -368,23 +368,23 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue,
                return NULL;
        }
        nfmsg = nlmsg_data(nlh);
-       nfmsg->nfgen_family = entry->pf;
+       nfmsg->nfgen_family = entry->state.pf;
        nfmsg->version = NFNETLINK_V0;
        nfmsg->res_id = htons(queue->queue_num);
 
        nla = __nla_reserve(skb, NFQA_PACKET_HDR, sizeof(*pmsg));
        pmsg = nla_data(nla);
        pmsg->hw_protocol       = entskb->protocol;
-       pmsg->hook              = entry->hook;
+       pmsg->hook              = entry->state.hook;
        *packet_id_ptr          = &pmsg->packet_id;
 
-       indev = entry->indev;
+       indev = entry->state.in;
        if (indev) {
 #if !IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
                if (nla_put_be32(skb, NFQA_IFINDEX_INDEV, htonl(indev->ifindex)))
                        goto nla_put_failure;
 #else
-               if (entry->pf == PF_BRIDGE) {
+               if (entry->state.pf == PF_BRIDGE) {
                        /* Case 1: indev is physical input device, we need to
                         * look for bridge group (when called from
                         * netfilter_bridge) */
@@ -414,7 +414,7 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue,
                if (nla_put_be32(skb, NFQA_IFINDEX_OUTDEV, htonl(outdev->ifindex)))
                        goto nla_put_failure;
 #else
-               if (entry->pf == PF_BRIDGE) {
+               if (entry->state.pf == PF_BRIDGE) {
                        /* Case 1: outdev is physical output device, we need to
                         * look for bridge group (when called from
                         * netfilter_bridge) */
@@ -633,8 +633,8 @@ nfqnl_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum)
        struct nfqnl_instance *queue;
        struct sk_buff *skb, *segs;
        int err = -ENOBUFS;
-       struct net *net = dev_net(entry->indev ?
-                                 entry->indev : entry->outdev);
+       struct net *net = dev_net(entry->state.in ?
+                                 entry->state.in : entry->state.out);
        struct nfnl_queue_net *q = nfnl_queue_pernet(net);
 
        /* rcu_read_lock()ed by nf_hook_slow() */
@@ -647,7 +647,7 @@ nfqnl_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum)
 
        skb = entry->skb;
 
-       switch (entry->pf) {
+       switch (entry->state.pf) {
        case NFPROTO_IPV4:
                skb->protocol = htons(ETH_P_IP);
                break;
@@ -757,11 +757,11 @@ nfqnl_set_mode(struct nfqnl_instance *queue,
 static int
 dev_cmp(struct nf_queue_entry *entry, unsigned long ifindex)
 {
-       if (entry->indev)
-               if (entry->indev->ifindex == ifindex)
+       if (entry->state.in)
+               if (entry->state.in->ifindex == ifindex)
                        return 1;
-       if (entry->outdev)
-               if (entry->outdev->ifindex == ifindex)
+       if (entry->state.out)
+               if (entry->state.out->ifindex == ifindex)
                        return 1;
 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
        if (entry->skb->nf_bridge) {
index 65f3e2b6be44031448d85323ccf17cddaffd5557..589b8487cd0840de2bafd01304242265b16ea11f 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/netfilter_ipv4/ip_tables.h>
 #include <linux/netfilter_ipv6/ip6_tables.h>
 #include <linux/netfilter_bridge/ebtables.h>
+#include <linux/netfilter_arp/arp_tables.h>
 #include <net/netfilter/nf_tables.h>
 
 static int nft_compat_chain_validate_dependency(const char *tablename,
@@ -42,6 +43,7 @@ union nft_entry {
        struct ipt_entry e4;
        struct ip6t_entry e6;
        struct ebt_entry ebt;
+       struct arpt_entry arp;
 };
 
 static inline void
@@ -143,6 +145,8 @@ nft_target_set_tgchk_param(struct xt_tgchk_param *par,
                entry->ebt.ethproto = (__force __be16)proto;
                entry->ebt.invflags = inv ? EBT_IPROTO : 0;
                break;
+       case NFPROTO_ARP:
+               break;
        }
        par->entryinfo  = entry;
        par->target     = target;
@@ -357,6 +361,8 @@ nft_match_set_mtchk_param(struct xt_mtchk_param *par, const struct nft_ctx *ctx,
                entry->ebt.ethproto = (__force __be16)proto;
                entry->ebt.invflags = inv ? EBT_IPROTO : 0;
                break;
+       case NFPROTO_ARP:
+               break;
        }
        par->entryinfo  = entry;
        par->match      = match;
@@ -543,6 +549,9 @@ nfnl_compat_get(struct sock *nfnl, struct sk_buff *skb,
        case NFPROTO_BRIDGE:
                fmt = "ebt_%s";
                break;
+       case NFPROTO_ARP:
+               fmt = "arpt_%s";
+               break;
        default:
                pr_err("nft_compat: unsupported protocol %d\n",
                        nfmsg->nfgen_family);
index 37c15e6748841053df56fe092a49ced6fb06b077..c7e1a9d7d46f515c9ef80f67d8fffe630ddafb01 100644 (file)
 /* We target a hash table size of 4, element hint is 75% of final size */
 #define NFT_HASH_ELEMENT_HINT 3
 
+struct nft_hash {
+       struct rhashtable               ht;
+};
+
 struct nft_hash_elem {
        struct rhash_head               node;
-       struct nft_data                 key;
-       struct nft_data                 data[];
+       struct nft_set_ext              ext;
 };
 
-static bool nft_hash_lookup(const struct nft_set *set,
-                           const struct nft_data *key,
-                           struct nft_data *data)
-{
-       struct rhashtable *priv = nft_set_priv(set);
-       const struct nft_hash_elem *he;
+struct nft_hash_cmp_arg {
+       const struct nft_set            *set;
+       const struct nft_data           *key;
+       u8                              genmask;
+};
 
-       he = rhashtable_lookup(priv, key);
-       if (he && set->flags & NFT_SET_MAP)
-               nft_data_copy(data, he->data);
+static const struct rhashtable_params nft_hash_params;
 
-       return !!he;
+static inline u32 nft_hash_key(const void *data, u32 len, u32 seed)
+{
+       const struct nft_hash_cmp_arg *arg = data;
+
+       return jhash(arg->key, len, seed);
 }
 
-static int nft_hash_insert(const struct nft_set *set,
-                          const struct nft_set_elem *elem)
+static inline u32 nft_hash_obj(const void *data, u32 len, u32 seed)
 {
-       struct rhashtable *priv = nft_set_priv(set);
-       struct nft_hash_elem *he;
-       unsigned int size;
+       const struct nft_hash_elem *he = data;
 
-       if (elem->flags != 0)
-               return -EINVAL;
+       return jhash(nft_set_ext_key(&he->ext), len, seed);
+}
 
-       size = sizeof(*he);
-       if (set->flags & NFT_SET_MAP)
-               size += sizeof(he->data[0]);
+static inline int nft_hash_cmp(struct rhashtable_compare_arg *arg,
+                              const void *ptr)
+{
+       const struct nft_hash_cmp_arg *x = arg->key;
+       const struct nft_hash_elem *he = ptr;
 
-       he = kzalloc(size, GFP_KERNEL);
-       if (he == NULL)
-               return -ENOMEM;
+       if (nft_data_cmp(nft_set_ext_key(&he->ext), x->key, x->set->klen))
+               return 1;
+       if (!nft_set_elem_active(&he->ext, x->genmask))
+               return 1;
+       return 0;
+}
 
-       nft_data_copy(&he->key, &elem->key);
-       if (set->flags & NFT_SET_MAP)
-               nft_data_copy(he->data, &elem->data);
+static bool nft_hash_lookup(const struct nft_set *set,
+                           const struct nft_data *key,
+                           const struct nft_set_ext **ext)
+{
+       struct nft_hash *priv = nft_set_priv(set);
+       const struct nft_hash_elem *he;
+       struct nft_hash_cmp_arg arg = {
+               .genmask = nft_genmask_cur(read_pnet(&set->pnet)),
+               .set     = set,
+               .key     = key,
+       };
 
-       rhashtable_insert(priv, &he->node);
+       he = rhashtable_lookup_fast(&priv->ht, &arg, nft_hash_params);
+       if (he != NULL)
+               *ext = &he->ext;
 
-       return 0;
+       return !!he;
 }
 
-static void nft_hash_elem_destroy(const struct nft_set *set,
-                                 struct nft_hash_elem *he)
+static int nft_hash_insert(const struct nft_set *set,
+                          const struct nft_set_elem *elem)
 {
-       nft_data_uninit(&he->key, NFT_DATA_VALUE);
-       if (set->flags & NFT_SET_MAP)
-               nft_data_uninit(he->data, set->dtype);
-       kfree(he);
+       struct nft_hash *priv = nft_set_priv(set);
+       struct nft_hash_elem *he = elem->priv;
+       struct nft_hash_cmp_arg arg = {
+               .genmask = nft_genmask_next(read_pnet(&set->pnet)),
+               .set     = set,
+               .key     = &elem->key,
+       };
+
+       return rhashtable_lookup_insert_key(&priv->ht, &arg, &he->node,
+                                           nft_hash_params);
 }
 
-static void nft_hash_remove(const struct nft_set *set,
-                           const struct nft_set_elem *elem)
+static void nft_hash_activate(const struct nft_set *set,
+                             const struct nft_set_elem *elem)
 {
-       struct rhashtable *priv = nft_set_priv(set);
+       struct nft_hash_elem *he = elem->priv;
 
-       rhashtable_remove(priv, elem->cookie);
-       synchronize_rcu();
-       kfree(elem->cookie);
+       nft_set_elem_change_active(set, &he->ext);
 }
 
-struct nft_compare_arg {
-       const struct nft_set *set;
-       struct nft_set_elem *elem;
-};
-
-static bool nft_hash_compare(void *ptr, void *arg)
+static void *nft_hash_deactivate(const struct nft_set *set,
+                                const struct nft_set_elem *elem)
 {
-       struct nft_hash_elem *he = ptr;
-       struct nft_compare_arg *x = arg;
-
-       if (!nft_data_cmp(&he->key, &x->elem->key, x->set->klen)) {
-               x->elem->cookie = he;
-               x->elem->flags = 0;
-               if (x->set->flags & NFT_SET_MAP)
-                       nft_data_copy(&x->elem->data, he->data);
+       struct nft_hash *priv = nft_set_priv(set);
+       struct nft_hash_elem *he;
+       struct nft_hash_cmp_arg arg = {
+               .genmask = nft_genmask_next(read_pnet(&set->pnet)),
+               .set     = set,
+               .key     = &elem->key,
+       };
 
-               return true;
-       }
+       he = rhashtable_lookup_fast(&priv->ht, &arg, nft_hash_params);
+       if (he != NULL)
+               nft_set_elem_change_active(set, &he->ext);
 
-       return false;
+       return he;
 }
 
-static int nft_hash_get(const struct nft_set *set, struct nft_set_elem *elem)
+static void nft_hash_remove(const struct nft_set *set,
+                           const struct nft_set_elem *elem)
 {
-       struct rhashtable *priv = nft_set_priv(set);
-       struct nft_compare_arg arg = {
-               .set = set,
-               .elem = elem,
-       };
+       struct nft_hash *priv = nft_set_priv(set);
+       struct nft_hash_elem *he = elem->priv;
 
-       if (rhashtable_lookup_compare(priv, &elem->key,
-                                     &nft_hash_compare, &arg))
-               return 0;
-
-       return -ENOENT;
+       rhashtable_remove_fast(&priv->ht, &he->node, nft_hash_params);
 }
 
 static void nft_hash_walk(const struct nft_ctx *ctx, const struct nft_set *set,
                          struct nft_set_iter *iter)
 {
-       struct rhashtable *priv = nft_set_priv(set);
-       const struct nft_hash_elem *he;
+       struct nft_hash *priv = nft_set_priv(set);
+       struct nft_hash_elem *he;
        struct rhashtable_iter hti;
        struct nft_set_elem elem;
+       u8 genmask = nft_genmask_cur(read_pnet(&set->pnet));
        int err;
 
-       err = rhashtable_walk_init(priv, &hti);
+       err = rhashtable_walk_init(&priv->ht, &hti);
        iter->err = err;
        if (err)
                return;
@@ -159,11 +170,10 @@ static void nft_hash_walk(const struct nft_ctx *ctx, const struct nft_set *set,
 
                if (iter->count < iter->skip)
                        goto cont;
+               if (!nft_set_elem_active(&he->ext, genmask))
+                       goto cont;
 
-               memcpy(&elem.key, &he->key, sizeof(elem.key));
-               if (set->flags & NFT_SET_MAP)
-                       memcpy(&elem.data, he->data, sizeof(elem.data));
-               elem.flags = 0;
+               elem.priv = he;
 
                iter->err = iter->fn(ctx, set, iter, &elem);
                if (iter->err < 0)
@@ -180,45 +190,41 @@ out:
 
 static unsigned int nft_hash_privsize(const struct nlattr * const nla[])
 {
-       return sizeof(struct rhashtable);
+       return sizeof(struct nft_hash);
 }
 
+static const struct rhashtable_params nft_hash_params = {
+       .head_offset            = offsetof(struct nft_hash_elem, node),
+       .hashfn                 = nft_hash_key,
+       .obj_hashfn             = nft_hash_obj,
+       .obj_cmpfn              = nft_hash_cmp,
+       .automatic_shrinking    = true,
+};
+
 static int nft_hash_init(const struct nft_set *set,
                         const struct nft_set_desc *desc,
                         const struct nlattr * const tb[])
 {
-       struct rhashtable *priv = nft_set_priv(set);
-       struct rhashtable_params params = {
-               .nelem_hint = desc->size ? : NFT_HASH_ELEMENT_HINT,
-               .head_offset = offsetof(struct nft_hash_elem, node),
-               .key_offset = offsetof(struct nft_hash_elem, key),
-               .key_len = set->klen,
-               .hashfn = jhash,
-       };
+       struct nft_hash *priv = nft_set_priv(set);
+       struct rhashtable_params params = nft_hash_params;
 
-       return rhashtable_init(priv, &params);
+       params.nelem_hint = desc->size ?: NFT_HASH_ELEMENT_HINT;
+       params.key_len    = set->klen;
+
+       return rhashtable_init(&priv->ht, &params);
 }
 
-static void nft_hash_destroy(const struct nft_set *set)
+static void nft_hash_elem_destroy(void *ptr, void *arg)
 {
-       struct rhashtable *priv = nft_set_priv(set);
-       const struct bucket_table *tbl;
-       struct nft_hash_elem *he;
-       struct rhash_head *pos, *next;
-       unsigned int i;
-
-       /* Stop an eventual async resizing */
-       priv->being_destroyed = true;
-       mutex_lock(&priv->mutex);
+       nft_set_elem_destroy((const struct nft_set *)arg, ptr);
+}
 
-       tbl = rht_dereference(priv->tbl, priv);
-       for (i = 0; i < tbl->size; i++) {
-               rht_for_each_entry_safe(he, pos, next, tbl, i, node)
-                       nft_hash_elem_destroy(set, he);
-       }
-       mutex_unlock(&priv->mutex);
+static void nft_hash_destroy(const struct nft_set *set)
+{
+       struct nft_hash *priv = nft_set_priv(set);
 
-       rhashtable_destroy(priv);
+       rhashtable_free_and_destroy(&priv->ht, nft_hash_elem_destroy,
+                                   (void *)set);
 }
 
 static bool nft_hash_estimate(const struct nft_set_desc *desc, u32 features,
@@ -227,11 +233,8 @@ static bool nft_hash_estimate(const struct nft_set_desc *desc, u32 features,
        unsigned int esize;
 
        esize = sizeof(struct nft_hash_elem);
-       if (features & NFT_SET_MAP)
-               esize += FIELD_SIZEOF(struct nft_hash_elem, data[0]);
-
        if (desc->size) {
-               est->size = sizeof(struct rhashtable) +
+               est->size = sizeof(struct nft_hash) +
                            roundup_pow_of_two(desc->size * 4 / 3) *
                            sizeof(struct nft_hash_elem *) +
                            desc->size * esize;
@@ -251,11 +254,13 @@ static bool nft_hash_estimate(const struct nft_set_desc *desc, u32 features,
 
 static struct nft_set_ops nft_hash_ops __read_mostly = {
        .privsize       = nft_hash_privsize,
+       .elemsize       = offsetof(struct nft_hash_elem, ext),
        .estimate       = nft_hash_estimate,
        .init           = nft_hash_init,
        .destroy        = nft_hash_destroy,
-       .get            = nft_hash_get,
        .insert         = nft_hash_insert,
+       .activate       = nft_hash_activate,
+       .deactivate     = nft_hash_deactivate,
        .remove         = nft_hash_remove,
        .lookup         = nft_hash_lookup,
        .walk           = nft_hash_walk,
index bde05f28cf14782b3b9f11905fbed37ea8644b95..e18af9db2f04ec0a58d4ed563af13f7d253b8f0a 100644 (file)
@@ -78,7 +78,7 @@ static int nft_log_init(const struct nft_ctx *ctx,
                        li->u.log.level =
                                ntohl(nla_get_be32(tb[NFTA_LOG_LEVEL]));
                } else {
-                       li->u.log.level = 4;
+                       li->u.log.level = LOGLEVEL_WARNING;
                }
                if (tb[NFTA_LOG_FLAGS] != NULL) {
                        li->u.log.logflags =
index 9615b8b9fb37dcf769207537f0545dd2a08c62d6..a5f30b8760eab5aa476f0afc13fe0e8686c9ff47 100644 (file)
@@ -31,9 +31,13 @@ static void nft_lookup_eval(const struct nft_expr *expr,
 {
        const struct nft_lookup *priv = nft_expr_priv(expr);
        const struct nft_set *set = priv->set;
+       const struct nft_set_ext *ext;
 
-       if (set->ops->lookup(set, &data[priv->sreg], &data[priv->dreg]))
+       if (set->ops->lookup(set, &data[priv->sreg], &ext)) {
+               if (set->flags & NFT_SET_MAP)
+                       nft_data_copy(&data[priv->dreg], nft_set_ext_data(ext));
                return;
+       }
        data[NFT_REG_VERDICT].verdict = NFT_BREAK;
 }
 
index e99911eda91594a6b0f9ea9bcce0aa01b9fc334a..5197874372ec4a2055a3f9251f3a3ec248f53fbb 100644 (file)
@@ -83,7 +83,7 @@ void nft_meta_get_eval(const struct nft_expr *expr,
                *(u16 *)dest->data = out->type;
                break;
        case NFT_META_SKUID:
-               if (skb->sk == NULL || skb->sk->sk_state == TCP_TIME_WAIT)
+               if (skb->sk == NULL || !sk_fullsock(skb->sk))
                        goto err;
 
                read_lock_bh(&skb->sk->sk_callback_lock);
@@ -99,7 +99,7 @@ void nft_meta_get_eval(const struct nft_expr *expr,
                read_unlock_bh(&skb->sk->sk_callback_lock);
                break;
        case NFT_META_SKGID:
-               if (skb->sk == NULL || skb->sk->sk_state == TCP_TIME_WAIT)
+               if (skb->sk == NULL || !sk_fullsock(skb->sk))
                        goto err;
 
                read_lock_bh(&skb->sk->sk_callback_lock);
@@ -153,7 +153,7 @@ void nft_meta_get_eval(const struct nft_expr *expr,
                }
                break;
        case NFT_META_CPU:
-               dest->data[0] = smp_processor_id();
+               dest->data[0] = raw_smp_processor_id();
                break;
        case NFT_META_IIFGROUP:
                if (in == NULL)
index 46214f245665a0f70138cbd025bb9d1917a7e32f..42d0ca45fb9e9e64daf3799aad420f205c5586e0 100644 (file)
@@ -26,25 +26,26 @@ struct nft_rbtree {
 
 struct nft_rbtree_elem {
        struct rb_node          node;
-       u16                     flags;
-       struct nft_data         key;
-       struct nft_data         data[];
+       struct nft_set_ext      ext;
 };
 
+
 static bool nft_rbtree_lookup(const struct nft_set *set,
                              const struct nft_data *key,
-                             struct nft_data *data)
+                             const struct nft_set_ext **ext)
 {
        const struct nft_rbtree *priv = nft_set_priv(set);
        const struct nft_rbtree_elem *rbe, *interval = NULL;
-       const struct rb_node *parent = priv->root.rb_node;
+       const struct rb_node *parent;
+       u8 genmask = nft_genmask_cur(read_pnet(&set->pnet));
        int d;
 
        spin_lock_bh(&nft_rbtree_lock);
+       parent = priv->root.rb_node;
        while (parent != NULL) {
                rbe = rb_entry(parent, struct nft_rbtree_elem, node);
 
-               d = nft_data_cmp(&rbe->key, key, set->klen);
+               d = nft_data_cmp(nft_set_ext_key(&rbe->ext), key, set->klen);
                if (d < 0) {
                        parent = parent->rb_left;
                        interval = rbe;
@@ -52,12 +53,17 @@ static bool nft_rbtree_lookup(const struct nft_set *set,
                        parent = parent->rb_right;
                else {
 found:
-                       if (rbe->flags & NFT_SET_ELEM_INTERVAL_END)
+                       if (!nft_set_elem_active(&rbe->ext, genmask)) {
+                               parent = parent->rb_left;
+                               continue;
+                       }
+                       if (nft_set_ext_exists(&rbe->ext, NFT_SET_EXT_FLAGS) &&
+                           *nft_set_ext_flags(&rbe->ext) &
+                           NFT_SET_ELEM_INTERVAL_END)
                                goto out;
-                       if (set->flags & NFT_SET_MAP)
-                               nft_data_copy(data, rbe->data);
-
                        spin_unlock_bh(&nft_rbtree_lock);
+
+                       *ext = &rbe->ext;
                        return true;
                }
        }
@@ -71,23 +77,13 @@ out:
        return false;
 }
 
-static void nft_rbtree_elem_destroy(const struct nft_set *set,
-                                   struct nft_rbtree_elem *rbe)
-{
-       nft_data_uninit(&rbe->key, NFT_DATA_VALUE);
-       if (set->flags & NFT_SET_MAP &&
-           !(rbe->flags & NFT_SET_ELEM_INTERVAL_END))
-               nft_data_uninit(rbe->data, set->dtype);
-
-       kfree(rbe);
-}
-
 static int __nft_rbtree_insert(const struct nft_set *set,
                               struct nft_rbtree_elem *new)
 {
        struct nft_rbtree *priv = nft_set_priv(set);
        struct nft_rbtree_elem *rbe;
        struct rb_node *parent, **p;
+       u8 genmask = nft_genmask_next(read_pnet(&set->pnet));
        int d;
 
        parent = NULL;
@@ -95,13 +91,18 @@ static int __nft_rbtree_insert(const struct nft_set *set,
        while (*p != NULL) {
                parent = *p;
                rbe = rb_entry(parent, struct nft_rbtree_elem, node);
-               d = nft_data_cmp(&rbe->key, &new->key, set->klen);
+               d = nft_data_cmp(nft_set_ext_key(&rbe->ext),
+                                nft_set_ext_key(&new->ext),
+                                set->klen);
                if (d < 0)
                        p = &parent->rb_left;
                else if (d > 0)
                        p = &parent->rb_right;
-               else
-                       return -EEXIST;
+               else {
+                       if (nft_set_elem_active(&rbe->ext, genmask))
+                               return -EEXIST;
+                       p = &parent->rb_left;
+               }
        }
        rb_link_node(&new->node, parent, p);
        rb_insert_color(&new->node, &priv->root);
@@ -111,31 +112,13 @@ static int __nft_rbtree_insert(const struct nft_set *set,
 static int nft_rbtree_insert(const struct nft_set *set,
                             const struct nft_set_elem *elem)
 {
-       struct nft_rbtree_elem *rbe;
-       unsigned int size;
+       struct nft_rbtree_elem *rbe = elem->priv;
        int err;
 
-       size = sizeof(*rbe);
-       if (set->flags & NFT_SET_MAP &&
-           !(elem->flags & NFT_SET_ELEM_INTERVAL_END))
-               size += sizeof(rbe->data[0]);
-
-       rbe = kzalloc(size, GFP_KERNEL);
-       if (rbe == NULL)
-               return -ENOMEM;
-
-       rbe->flags = elem->flags;
-       nft_data_copy(&rbe->key, &elem->key);
-       if (set->flags & NFT_SET_MAP &&
-           !(rbe->flags & NFT_SET_ELEM_INTERVAL_END))
-               nft_data_copy(rbe->data, &elem->data);
-
        spin_lock_bh(&nft_rbtree_lock);
        err = __nft_rbtree_insert(set, rbe);
-       if (err < 0)
-               kfree(rbe);
-
        spin_unlock_bh(&nft_rbtree_lock);
+
        return err;
 }
 
@@ -143,42 +126,49 @@ static void nft_rbtree_remove(const struct nft_set *set,
                              const struct nft_set_elem *elem)
 {
        struct nft_rbtree *priv = nft_set_priv(set);
-       struct nft_rbtree_elem *rbe = elem->cookie;
+       struct nft_rbtree_elem *rbe = elem->priv;
 
        spin_lock_bh(&nft_rbtree_lock);
        rb_erase(&rbe->node, &priv->root);
        spin_unlock_bh(&nft_rbtree_lock);
-       kfree(rbe);
 }
 
-static int nft_rbtree_get(const struct nft_set *set, struct nft_set_elem *elem)
+static void nft_rbtree_activate(const struct nft_set *set,
+                               const struct nft_set_elem *elem)
+{
+       struct nft_rbtree_elem *rbe = elem->priv;
+
+       nft_set_elem_change_active(set, &rbe->ext);
+}
+
+static void *nft_rbtree_deactivate(const struct nft_set *set,
+                                  const struct nft_set_elem *elem)
 {
        const struct nft_rbtree *priv = nft_set_priv(set);
        const struct rb_node *parent = priv->root.rb_node;
        struct nft_rbtree_elem *rbe;
+       u8 genmask = nft_genmask_cur(read_pnet(&set->pnet));
        int d;
 
-       spin_lock_bh(&nft_rbtree_lock);
        while (parent != NULL) {
                rbe = rb_entry(parent, struct nft_rbtree_elem, node);
 
-               d = nft_data_cmp(&rbe->key, &elem->key, set->klen);
+               d = nft_data_cmp(nft_set_ext_key(&rbe->ext), &elem->key,
+                                set->klen);
                if (d < 0)
                        parent = parent->rb_left;
                else if (d > 0)
                        parent = parent->rb_right;
                else {
-                       elem->cookie = rbe;
-                       if (set->flags & NFT_SET_MAP &&
-                           !(rbe->flags & NFT_SET_ELEM_INTERVAL_END))
-                               nft_data_copy(&elem->data, rbe->data);
-                       elem->flags = rbe->flags;
-                       spin_unlock_bh(&nft_rbtree_lock);
-                       return 0;
+                       if (!nft_set_elem_active(&rbe->ext, genmask)) {
+                               parent = parent->rb_left;
+                               continue;
+                       }
+                       nft_set_elem_change_active(set, &rbe->ext);
+                       return rbe;
                }
        }
-       spin_unlock_bh(&nft_rbtree_lock);
-       return -ENOENT;
+       return NULL;
 }
 
 static void nft_rbtree_walk(const struct nft_ctx *ctx,
@@ -186,21 +176,21 @@ static void nft_rbtree_walk(const struct nft_ctx *ctx,
                            struct nft_set_iter *iter)
 {
        const struct nft_rbtree *priv = nft_set_priv(set);
-       const struct nft_rbtree_elem *rbe;
+       struct nft_rbtree_elem *rbe;
        struct nft_set_elem elem;
        struct rb_node *node;
+       u8 genmask = nft_genmask_cur(read_pnet(&set->pnet));
 
        spin_lock_bh(&nft_rbtree_lock);
        for (node = rb_first(&priv->root); node != NULL; node = rb_next(node)) {
+               rbe = rb_entry(node, struct nft_rbtree_elem, node);
+
                if (iter->count < iter->skip)
                        goto cont;
+               if (!nft_set_elem_active(&rbe->ext, genmask))
+                       goto cont;
 
-               rbe = rb_entry(node, struct nft_rbtree_elem, node);
-               nft_data_copy(&elem.key, &rbe->key);
-               if (set->flags & NFT_SET_MAP &&
-                   !(rbe->flags & NFT_SET_ELEM_INTERVAL_END))
-                       nft_data_copy(&elem.data, rbe->data);
-               elem.flags = rbe->flags;
+               elem.priv = rbe;
 
                iter->err = iter->fn(ctx, set, iter, &elem);
                if (iter->err < 0) {
@@ -237,7 +227,7 @@ static void nft_rbtree_destroy(const struct nft_set *set)
        while ((node = priv->root.rb_node) != NULL) {
                rb_erase(node, &priv->root);
                rbe = rb_entry(node, struct nft_rbtree_elem, node);
-               nft_rbtree_elem_destroy(set, rbe);
+               nft_set_elem_destroy(set, rbe);
        }
 }
 
@@ -247,9 +237,6 @@ static bool nft_rbtree_estimate(const struct nft_set_desc *desc, u32 features,
        unsigned int nsize;
 
        nsize = sizeof(struct nft_rbtree_elem);
-       if (features & NFT_SET_MAP)
-               nsize += FIELD_SIZEOF(struct nft_rbtree_elem, data[0]);
-
        if (desc->size)
                est->size = sizeof(struct nft_rbtree) + desc->size * nsize;
        else
@@ -262,12 +249,14 @@ static bool nft_rbtree_estimate(const struct nft_set_desc *desc, u32 features,
 
 static struct nft_set_ops nft_rbtree_ops __read_mostly = {
        .privsize       = nft_rbtree_privsize,
+       .elemsize       = offsetof(struct nft_rbtree_elem, ext),
        .estimate       = nft_rbtree_estimate,
        .init           = nft_rbtree_init,
        .destroy        = nft_rbtree_destroy,
        .insert         = nft_rbtree_insert,
        .remove         = nft_rbtree_remove,
-       .get            = nft_rbtree_get,
+       .deactivate     = nft_rbtree_deactivate,
+       .activate       = nft_rbtree_activate,
        .lookup         = nft_rbtree_lookup,
        .walk           = nft_rbtree_walk,
        .features       = NFT_SET_INTERVAL | NFT_SET_MAP,
index 7b5f9d58680ad0ebca03c13e4a6f43037283bdc1..92877114aff4634b99c0c14d74d7043487bf31cc 100644 (file)
@@ -28,14 +28,16 @@ static void nft_reject_inet_eval(const struct nft_expr *expr,
        case NFPROTO_IPV4:
                switch (priv->type) {
                case NFT_REJECT_ICMP_UNREACH:
-                       nf_send_unreach(pkt->skb, priv->icmp_code);
+                       nf_send_unreach(pkt->skb, priv->icmp_code,
+                                       pkt->ops->hooknum);
                        break;
                case NFT_REJECT_TCP_RST:
                        nf_send_reset(pkt->skb, pkt->ops->hooknum);
                        break;
                case NFT_REJECT_ICMPX_UNREACH:
                        nf_send_unreach(pkt->skb,
-                                       nft_reject_icmp_code(priv->icmp_code));
+                                       nft_reject_icmp_code(priv->icmp_code),
+                                       pkt->ops->hooknum);
                        break;
                }
                break;
index 50e1e5aaf4ce82ff7bbf1ee7171aaa51d54eefd1..c205b26a2beea67d4eec9a0d22d18f97c73edd8d 100644 (file)
@@ -42,15 +42,21 @@ enum nf_tproxy_lookup_t {
 
 static bool tproxy_sk_is_transparent(struct sock *sk)
 {
-       if (sk->sk_state != TCP_TIME_WAIT) {
-               if (inet_sk(sk)->transparent)
-                       return true;
-               sock_put(sk);
-       } else {
+       switch (sk->sk_state) {
+       case TCP_TIME_WAIT:
                if (inet_twsk(sk)->tw_transparent)
                        return true;
-               inet_twsk_put(inet_twsk(sk));
+               break;
+       case TCP_NEW_SYN_RECV:
+               if (inet_rsk(inet_reqsk(sk))->no_srccheck)
+                       return true;
+               break;
+       default:
+               if (inet_sk(sk)->transparent)
+                       return true;
        }
+
+       sock_gen_put(sk);
        return false;
 }
 
index f440f57a452fd9650d73f0a13b644383793463f5..50a52043650fd95989eb6618a36cbb8dba0f6b18 100644 (file)
@@ -56,8 +56,7 @@ physdev_mt(const struct sk_buff *skb, struct xt_action_param *par)
 
        /* This only makes sense in the FORWARD and POSTROUTING chains */
        if ((info->bitmask & XT_PHYSDEV_OP_BRIDGED) &&
-           (!!(nf_bridge->mask & BRNF_BRIDGED) ^
-           !(info->invert & XT_PHYSDEV_OP_BRIDGED)))
+           (!!nf_bridge->physoutdev ^ !(info->invert & XT_PHYSDEV_OP_BRIDGED)))
                return false;
 
        if ((info->bitmask & XT_PHYSDEV_OP_ISIN &&
index 0d47afea968240623ae4486d56c9a87fe2b7b121..89045982ec9468e01c81f6d86f50d508981e4593 100644 (file)
@@ -193,7 +193,7 @@ set_match_v3(const struct sk_buff *skb, struct xt_action_param *par)
                return ret;
 
        if (!match_counter0(opt.ext.packets, &info->packets))
-               return 0;
+               return false;
        return match_counter0(opt.ext.bytes, &info->bytes);
 }
 
@@ -239,7 +239,7 @@ set_match_v4(const struct sk_buff *skb, struct xt_action_param *par)
                return ret;
 
        if (!match_counter(opt.ext.packets, &info->packets))
-               return 0;
+               return false;
        return match_counter(opt.ext.bytes, &info->bytes);
 }
 
index 13332dbf291d6e530b77c3c8a7d155a07788ebc3..895534e87a47a5bb3c4452f93a019831c43754ca 100644 (file)
@@ -129,6 +129,20 @@ xt_socket_get_sock_v4(struct net *net, const u8 protocol,
        return NULL;
 }
 
+static bool xt_socket_sk_is_transparent(struct sock *sk)
+{
+       switch (sk->sk_state) {
+       case TCP_TIME_WAIT:
+               return inet_twsk(sk)->tw_transparent;
+
+       case TCP_NEW_SYN_RECV:
+               return inet_rsk(inet_reqsk(sk))->no_srccheck;
+
+       default:
+               return inet_sk(sk)->transparent;
+       }
+}
+
 static bool
 socket_match(const struct sk_buff *skb, struct xt_action_param *par,
             const struct xt_socket_mtinfo1 *info)
@@ -195,16 +209,14 @@ socket_match(const struct sk_buff *skb, struct xt_action_param *par,
                 * unless XT_SOCKET_NOWILDCARD is set
                 */
                wildcard = (!(info->flags & XT_SOCKET_NOWILDCARD) &&
-                           sk->sk_state != TCP_TIME_WAIT &&
+                           sk_fullsock(sk) &&
                            inet_sk(sk)->inet_rcv_saddr == 0);
 
                /* Ignore non-transparent sockets,
-                  if XT_SOCKET_TRANSPARENT is used */
+                * if XT_SOCKET_TRANSPARENT is used
+                */
                if (info->flags & XT_SOCKET_TRANSPARENT)
-                       transparent = ((sk->sk_state != TCP_TIME_WAIT &&
-                                       inet_sk(sk)->transparent) ||
-                                      (sk->sk_state == TCP_TIME_WAIT &&
-                                       inet_twsk(sk)->tw_transparent));
+                       transparent = xt_socket_sk_is_transparent(sk);
 
                if (sk != skb->sk)
                        sock_gen_put(sk);
@@ -363,16 +375,14 @@ socket_mt6_v1_v2(const struct sk_buff *skb, struct xt_action_param *par)
                 * unless XT_SOCKET_NOWILDCARD is set
                 */
                wildcard = (!(info->flags & XT_SOCKET_NOWILDCARD) &&
-                           sk->sk_state != TCP_TIME_WAIT &&
+                           sk_fullsock(sk) &&
                            ipv6_addr_any(&sk->sk_v6_rcv_saddr));
 
                /* Ignore non-transparent sockets,
-                  if XT_SOCKET_TRANSPARENT is used */
+                * if XT_SOCKET_TRANSPARENT is used
+                */
                if (info->flags & XT_SOCKET_TRANSPARENT)
-                       transparent = ((sk->sk_state != TCP_TIME_WAIT &&
-                                       inet_sk(sk)->transparent) ||
-                                      (sk->sk_state == TCP_TIME_WAIT &&
-                                       inet_twsk(sk)->tw_transparent));
+                       transparent = xt_socket_sk_is_transparent(sk);
 
                if (sk != skb->sk)
                        sock_gen_put(sk);
index 5699adb976521450c542c17acd13c91c6454750f..0bc3460319c8bd565cc11564180d566f3f89488e 100644 (file)
@@ -26,13 +26,12 @@ static bool
 string_mt(const struct sk_buff *skb, struct xt_action_param *par)
 {
        const struct xt_string_info *conf = par->matchinfo;
-       struct ts_state state;
        bool invert;
 
        invert = conf->u.v1.flags & XT_STRING_FLAG_INVERT;
 
        return (skb_find_text((struct sk_buff *)skb, conf->from_offset,
-                            conf->to_offset, conf->config, &state)
+                            conf->to_offset, conf->config)
                             != UINT_MAX) ^ invert;
 }
 
index 70440748fe5c439979e26dec5728f832d44859d6..13f777f20995bbe02edbce36a9c04f2518fe0355 100644 (file)
@@ -293,15 +293,13 @@ static int netlbl_mgmt_listentry(struct sk_buff *skb,
                                return -ENOMEM;
 
                        addr_struct.s_addr = iter4->addr;
-                       ret_val = nla_put(skb, NLBL_MGMT_A_IPV4ADDR,
-                                         sizeof(struct in_addr),
-                                         &addr_struct);
+                       ret_val = nla_put_in_addr(skb, NLBL_MGMT_A_IPV4ADDR,
+                                                 addr_struct.s_addr);
                        if (ret_val != 0)
                                return ret_val;
                        addr_struct.s_addr = iter4->mask;
-                       ret_val = nla_put(skb, NLBL_MGMT_A_IPV4MASK,
-                                         sizeof(struct in_addr),
-                                         &addr_struct);
+                       ret_val = nla_put_in_addr(skb, NLBL_MGMT_A_IPV4MASK,
+                                                 addr_struct.s_addr);
                        if (ret_val != 0)
                                return ret_val;
                        map4 = netlbl_domhsh_addr4_entry(iter4);
@@ -328,14 +326,12 @@ static int netlbl_mgmt_listentry(struct sk_buff *skb,
                        if (nla_b == NULL)
                                return -ENOMEM;
 
-                       ret_val = nla_put(skb, NLBL_MGMT_A_IPV6ADDR,
-                                         sizeof(struct in6_addr),
-                                         &iter6->addr);
+                       ret_val = nla_put_in6_addr(skb, NLBL_MGMT_A_IPV6ADDR,
+                                                  &iter6->addr);
                        if (ret_val != 0)
                                return ret_val;
-                       ret_val = nla_put(skb, NLBL_MGMT_A_IPV6MASK,
-                                         sizeof(struct in6_addr),
-                                         &iter6->mask);
+                       ret_val = nla_put_in6_addr(skb, NLBL_MGMT_A_IPV6MASK,
+                                                  &iter6->mask);
                        if (ret_val != 0)
                                return ret_val;
                        map6 = netlbl_domhsh_addr6_entry(iter6);
index aec7994f78cf8df09fab84817c9bbee4ec77daaa..b0380927f05f973a59228860e6cfae86187ab639 100644 (file)
@@ -1117,34 +1117,30 @@ static int netlbl_unlabel_staticlist_gen(u32 cmd,
                struct in_addr addr_struct;
 
                addr_struct.s_addr = addr4->list.addr;
-               ret_val = nla_put(cb_arg->skb,
-                                 NLBL_UNLABEL_A_IPV4ADDR,
-                                 sizeof(struct in_addr),
-                                 &addr_struct);
+               ret_val = nla_put_in_addr(cb_arg->skb,
+                                         NLBL_UNLABEL_A_IPV4ADDR,
+                                         addr_struct.s_addr);
                if (ret_val != 0)
                        goto list_cb_failure;
 
                addr_struct.s_addr = addr4->list.mask;
-               ret_val = nla_put(cb_arg->skb,
-                                 NLBL_UNLABEL_A_IPV4MASK,
-                                 sizeof(struct in_addr),
-                                 &addr_struct);
+               ret_val = nla_put_in_addr(cb_arg->skb,
+                                         NLBL_UNLABEL_A_IPV4MASK,
+                                         addr_struct.s_addr);
                if (ret_val != 0)
                        goto list_cb_failure;
 
                secid = addr4->secid;
        } else {
-               ret_val = nla_put(cb_arg->skb,
-                                 NLBL_UNLABEL_A_IPV6ADDR,
-                                 sizeof(struct in6_addr),
-                                 &addr6->list.addr);
+               ret_val = nla_put_in6_addr(cb_arg->skb,
+                                          NLBL_UNLABEL_A_IPV6ADDR,
+                                          &addr6->list.addr);
                if (ret_val != 0)
                        goto list_cb_failure;
 
-               ret_val = nla_put(cb_arg->skb,
-                                 NLBL_UNLABEL_A_IPV6MASK,
-                                 sizeof(struct in6_addr),
-                                 &addr6->list.mask);
+               ret_val = nla_put_in6_addr(cb_arg->skb,
+                                          NLBL_UNLABEL_A_IPV6MASK,
+                                          &addr6->list.mask);
                if (ret_val != 0)
                        goto list_cb_failure;
 
index 05919bf3f670ed1267e01f14c1de61e78e4d80c4..19909d0786a2e60574ba623d343193ca212ed2be 100644 (file)
@@ -116,6 +116,8 @@ static ATOMIC_NOTIFIER_HEAD(netlink_chain);
 static DEFINE_SPINLOCK(netlink_tap_lock);
 static struct list_head netlink_tap_all __read_mostly;
 
+static const struct rhashtable_params netlink_rhashtable_params;
+
 static inline u32 netlink_group_mask(u32 group)
 {
        return group ? 1 << (group - 1) : 0;
@@ -970,41 +972,50 @@ netlink_unlock_table(void)
 
 struct netlink_compare_arg
 {
-       struct net *net;
+       possible_net_t pnet;
        u32 portid;
 };
 
-static bool netlink_compare(void *ptr, void *arg)
+/* Doing sizeof directly may yield 4 extra bytes on 64-bit. */
+#define netlink_compare_arg_len \
+       (offsetof(struct netlink_compare_arg, portid) + sizeof(u32))
+
+static inline int netlink_compare(struct rhashtable_compare_arg *arg,
+                                 const void *ptr)
 {
-       struct netlink_compare_arg *x = arg;
-       struct sock *sk = ptr;
+       const struct netlink_compare_arg *x = arg->key;
+       const struct netlink_sock *nlk = ptr;
 
-       return nlk_sk(sk)->portid == x->portid &&
-              net_eq(sock_net(sk), x->net);
+       return nlk->portid != x->portid ||
+              !net_eq(sock_net(&nlk->sk), read_pnet(&x->pnet));
+}
+
+static void netlink_compare_arg_init(struct netlink_compare_arg *arg,
+                                    struct net *net, u32 portid)
+{
+       memset(arg, 0, sizeof(*arg));
+       write_pnet(&arg->pnet, net);
+       arg->portid = portid;
 }
 
 static struct sock *__netlink_lookup(struct netlink_table *table, u32 portid,
                                     struct net *net)
 {
-       struct netlink_compare_arg arg = {
-               .net = net,
-               .portid = portid,
-       };
+       struct netlink_compare_arg arg;
 
-       return rhashtable_lookup_compare(&table->hash, &portid,
-                                        &netlink_compare, &arg);
+       netlink_compare_arg_init(&arg, net, portid);
+       return rhashtable_lookup_fast(&table->hash, &arg,
+                                     netlink_rhashtable_params);
 }
 
-static bool __netlink_insert(struct netlink_table *table, struct sock *sk)
+static int __netlink_insert(struct netlink_table *table, struct sock *sk)
 {
-       struct netlink_compare_arg arg = {
-               .net = sock_net(sk),
-               .portid = nlk_sk(sk)->portid,
-       };
+       struct netlink_compare_arg arg;
 
-       return rhashtable_lookup_compare_insert(&table->hash,
-                                               &nlk_sk(sk)->node,
-                                               &netlink_compare, &arg);
+       netlink_compare_arg_init(&arg, sock_net(sk), nlk_sk(sk)->portid);
+       return rhashtable_lookup_insert_key(&table->hash, &arg,
+                                           &nlk_sk(sk)->node,
+                                           netlink_rhashtable_params);
 }
 
 static struct sock *netlink_lookup(struct net *net, int protocol, u32 portid)
@@ -1066,9 +1077,10 @@ static int netlink_insert(struct sock *sk, u32 portid)
        nlk_sk(sk)->portid = portid;
        sock_hold(sk);
 
-       err = 0;
-       if (!__netlink_insert(table, sk)) {
-               err = -EADDRINUSE;
+       err = __netlink_insert(table, sk);
+       if (err) {
+               if (err == -EEXIST)
+                       err = -EADDRINUSE;
                sock_put(sk);
        }
 
@@ -1082,7 +1094,8 @@ static void netlink_remove(struct sock *sk)
        struct netlink_table *table;
 
        table = &nl_table[sk->sk_protocol];
-       if (rhashtable_remove(&table->hash, &nlk_sk(sk)->node)) {
+       if (!rhashtable_remove_fast(&table->hash, &nlk_sk(sk)->node,
+                                   netlink_rhashtable_params)) {
                WARN_ON(atomic_read(&sk->sk_refcnt) == 1);
                __sock_put(sk);
        }
@@ -2256,8 +2269,7 @@ static void netlink_cmsg_recv_pktinfo(struct msghdr *msg, struct sk_buff *skb)
        put_cmsg(msg, SOL_NETLINK, NETLINK_PKTINFO, sizeof(info), &info);
 }
 
-static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock,
-                          struct msghdr *msg, size_t len)
+static int netlink_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
 {
        struct sock *sk = sock->sk;
        struct netlink_sock *nlk = nlk_sk(sk);
@@ -2346,8 +2358,7 @@ out:
        return err;
 }
 
-static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock,
-                          struct msghdr *msg, size_t len,
+static int netlink_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
                           int flags)
 {
        struct scm_cookie scm;
@@ -3116,17 +3127,28 @@ static struct pernet_operations __net_initdata netlink_net_ops = {
        .exit = netlink_net_exit,
 };
 
+static inline u32 netlink_hash(const void *data, u32 len, u32 seed)
+{
+       const struct netlink_sock *nlk = data;
+       struct netlink_compare_arg arg;
+
+       netlink_compare_arg_init(&arg, sock_net(&nlk->sk), nlk->portid);
+       return jhash2((u32 *)&arg, netlink_compare_arg_len / sizeof(u32), seed);
+}
+
+static const struct rhashtable_params netlink_rhashtable_params = {
+       .head_offset = offsetof(struct netlink_sock, node),
+       .key_len = netlink_compare_arg_len,
+       .obj_hashfn = netlink_hash,
+       .obj_cmpfn = netlink_compare,
+       .max_size = 65536,
+       .automatic_shrinking = true,
+};
+
 static int __init netlink_proto_init(void)
 {
        int i;
        int err = proto_register(&netlink_proto, 0);
-       struct rhashtable_params ht_params = {
-               .head_offset = offsetof(struct netlink_sock, node),
-               .key_offset = offsetof(struct netlink_sock, portid),
-               .key_len = sizeof(u32), /* portid */
-               .hashfn = jhash,
-               .max_shift = 16, /* 64K */
-       };
 
        if (err != 0)
                goto out;
@@ -3138,7 +3160,8 @@ static int __init netlink_proto_init(void)
                goto panic;
 
        for (i = 0; i < MAX_LINKS; i++) {
-               if (rhashtable_init(&nl_table[i].hash, &ht_params) < 0) {
+               if (rhashtable_init(&nl_table[i].hash,
+                                   &netlink_rhashtable_params) < 0) {
                        while (--i > 0)
                                rhashtable_destroy(&nl_table[i].hash);
                        kfree(nl_table);
index 69f1d5e9959f23646e82686cf5d17e1685c34c4f..b987fd56c3c52935d85a0f8f710814a7f9f5798d 100644 (file)
@@ -1023,8 +1023,7 @@ int nr_rx_frame(struct sk_buff *skb, struct net_device *dev)
        return 1;
 }
 
-static int nr_sendmsg(struct kiocb *iocb, struct socket *sock,
-                     struct msghdr *msg, size_t len)
+static int nr_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
 {
        struct sock *sk = sock->sk;
        struct nr_sock *nr = nr_sk(sk);
@@ -1133,8 +1132,8 @@ out:
        return err;
 }
 
-static int nr_recvmsg(struct kiocb *iocb, struct socket *sock,
-                     struct msghdr *msg, size_t size, int flags)
+static int nr_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
+                     int flags)
 {
        struct sock *sk = sock->sk;
        DECLARE_SOCKADDR(struct sockaddr_ax25 *, sax, msg->msg_name);
index 6ae063cebf7de71082f0b526ecbfcd935f70dff4..988f542481a835c394549666d5244be8b48e9faa 100644 (file)
@@ -65,36 +65,6 @@ int nr_rx_ip(struct sk_buff *skb, struct net_device *dev)
        return 1;
 }
 
-#ifdef CONFIG_INET
-
-static int nr_rebuild_header(struct sk_buff *skb)
-{
-       unsigned char *bp = skb->data;
-
-       if (arp_find(bp + 7, skb))
-               return 1;
-
-       bp[6] &= ~AX25_CBIT;
-       bp[6] &= ~AX25_EBIT;
-       bp[6] |= AX25_SSSID_SPARE;
-       bp    += AX25_ADDR_LEN;
-
-       bp[6] &= ~AX25_CBIT;
-       bp[6] |= AX25_EBIT;
-       bp[6] |= AX25_SSSID_SPARE;
-
-       return 0;
-}
-
-#else
-
-static int nr_rebuild_header(struct sk_buff *skb)
-{
-       return 1;
-}
-
-#endif
-
 static int nr_header(struct sk_buff *skb, struct net_device *dev,
                     unsigned short type,
                     const void *daddr, const void *saddr, unsigned int len)
@@ -188,7 +158,6 @@ static netdev_tx_t nr_xmit(struct sk_buff *skb, struct net_device *dev)
 
 static const struct header_ops nr_header_ops = {
        .create = nr_header,
-       .rebuild= nr_rebuild_header,
 };
 
 static const struct net_device_ops nr_netdev_ops = {
index e181e290427cd9727d59ddb1989d2974937f1754..9578bd6a4f3e2efd565c4626a718f8ecc6f967c4 100644 (file)
@@ -750,8 +750,8 @@ error:
        return ret;
 }
 
-static int llcp_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
-                            struct msghdr *msg, size_t len)
+static int llcp_sock_sendmsg(struct socket *sock, struct msghdr *msg,
+                            size_t len)
 {
        struct sock *sk = sock->sk;
        struct nfc_llcp_sock *llcp_sock = nfc_llcp_sock(sk);
@@ -793,8 +793,8 @@ static int llcp_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
        return nfc_llcp_send_i_frame(llcp_sock, msg, len);
 }
 
-static int llcp_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
-                            struct msghdr *msg, size_t len, int flags)
+static int llcp_sock_recvmsg(struct socket *sock, struct msghdr *msg,
+                            size_t len, int flags)
 {
        int noblock = flags & MSG_DONTWAIT;
        struct sock *sk = sock->sk;
index 373e138c0ab6687686c084ac6daad55af7b1dc72..82b4e8024778019b80f6b59ba9a4242d015d4b2f 100644 (file)
@@ -211,8 +211,7 @@ static void rawsock_tx_work(struct work_struct *work)
        }
 }
 
-static int rawsock_sendmsg(struct kiocb *iocb, struct socket *sock,
-                          struct msghdr *msg, size_t len)
+static int rawsock_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
 {
        struct sock *sk = sock->sk;
        struct nfc_dev *dev = nfc_rawsock(sk)->dev;
@@ -248,8 +247,8 @@ static int rawsock_sendmsg(struct kiocb *iocb, struct socket *sock,
        return len;
 }
 
-static int rawsock_recvmsg(struct kiocb *iocb, struct socket *sock,
-                          struct msghdr *msg, size_t len, int flags)
+static int rawsock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
+                          int flags)
 {
        int noblock = flags & MSG_DONTWAIT;
        struct sock *sk = sock->sk;
index b7d818c594234bae63e2b603cc2ee3afe10fa1c2..ed6b0f8dd1bbdfa0876c3425b24c83cf4aa315a6 100644 (file)
@@ -6,6 +6,7 @@ config OPENVSWITCH
        tristate "Open vSwitch"
        depends on INET
        select LIBCRC32C
+       select MPLS
        select NET_MPLS_GSO
        ---help---
          Open vSwitch is a multilayer Ethernet switch targeted at virtualized
index 5bae7243c5777e38df7be95454b8164724c769cf..096c6276e6b92680542ed1204bf396470c08caf5 100644 (file)
@@ -203,7 +203,6 @@ static void destroy_dp_rcu(struct rcu_head *rcu)
 
        ovs_flow_tbl_destroy(&dp->table);
        free_percpu(dp->stats_percpu);
-       release_net(ovs_dp_get_net(dp));
        kfree(dp->ports);
        kfree(dp);
 }
@@ -1501,7 +1500,7 @@ static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info)
        if (dp == NULL)
                goto err_free_reply;
 
-       ovs_dp_set_net(dp, hold_net(sock_net(skb->sk)));
+       ovs_dp_set_net(dp, sock_net(skb->sk));
 
        /* Allocate table. */
        err = ovs_flow_tbl_init(&dp->table);
@@ -1575,7 +1574,6 @@ err_destroy_percpu:
 err_destroy_table:
        ovs_flow_tbl_destroy(&dp->table);
 err_free_dp:
-       release_net(ovs_dp_get_net(dp));
        kfree(dp);
 err_free_reply:
        kfree_skb(reply);
index 3ece94563079fb154d437d75e90db23b055d6a0a..4ec4a480b147030c3e2938c4ba612d4e7d5e10be 100644 (file)
@@ -84,10 +84,8 @@ struct datapath {
        /* Stats. */
        struct dp_stats_percpu __percpu *stats_percpu;
 
-#ifdef CONFIG_NET_NS
        /* Network namespace ref. */
-       struct net *net;
-#endif
+       possible_net_t net;
 
        u32 user_features;
 };
index 22b18c145c9221675e031de2617fcdd800405170..c691b1a1eee0ae3aeedf389a3288272417e9bcbb 100644 (file)
@@ -535,11 +535,11 @@ static int ipv4_tun_from_nlattr(const struct nlattr *attr,
                        break;
                case OVS_TUNNEL_KEY_ATTR_IPV4_SRC:
                        SW_FLOW_KEY_PUT(match, tun_key.ipv4_src,
-                                       nla_get_be32(a), is_mask);
+                                       nla_get_in_addr(a), is_mask);
                        break;
                case OVS_TUNNEL_KEY_ATTR_IPV4_DST:
                        SW_FLOW_KEY_PUT(match, tun_key.ipv4_dst,
-                                       nla_get_be32(a), is_mask);
+                                       nla_get_in_addr(a), is_mask);
                        break;
                case OVS_TUNNEL_KEY_ATTR_TOS:
                        SW_FLOW_KEY_PUT(match, tun_key.ipv4_tos,
@@ -648,10 +648,12 @@ static int __ipv4_tun_to_nlattr(struct sk_buff *skb,
            nla_put_be64(skb, OVS_TUNNEL_KEY_ATTR_ID, output->tun_id))
                return -EMSGSIZE;
        if (output->ipv4_src &&
-           nla_put_be32(skb, OVS_TUNNEL_KEY_ATTR_IPV4_SRC, output->ipv4_src))
+           nla_put_in_addr(skb, OVS_TUNNEL_KEY_ATTR_IPV4_SRC,
+                           output->ipv4_src))
                return -EMSGSIZE;
        if (output->ipv4_dst &&
-           nla_put_be32(skb, OVS_TUNNEL_KEY_ATTR_IPV4_DST, output->ipv4_dst))
+           nla_put_in_addr(skb, OVS_TUNNEL_KEY_ATTR_IPV4_DST,
+                           output->ipv4_dst))
                return -EMSGSIZE;
        if (output->ipv4_tos &&
            nla_put_u8(skb, OVS_TUNNEL_KEY_ATTR_TOS, output->ipv4_tos))
index f8db7064d81c770cda356633153230eb905ccb39..5102c3cc4eec4ecec6698859935d7769d37a174c 100644 (file)
@@ -216,10 +216,16 @@ static void prb_fill_vlan_info(struct tpacket_kbdq_core *,
 static void packet_flush_mclist(struct sock *sk);
 
 struct packet_skb_cb {
-       unsigned int origlen;
        union {
                struct sockaddr_pkt pkt;
-               struct sockaddr_ll ll;
+               union {
+                       /* Trick: alias skb original length with
+                        * ll.sll_family and ll.protocol in order
+                        * to save room.
+                        */
+                       unsigned int origlen;
+                       struct sockaddr_ll ll;
+               };
        } sa;
 };
 
@@ -1608,8 +1614,8 @@ oom:
  *     protocol layers and you must therefore supply it with a complete frame
  */
 
-static int packet_sendmsg_spkt(struct kiocb *iocb, struct socket *sock,
-                              struct msghdr *msg, size_t len)
+static int packet_sendmsg_spkt(struct socket *sock, struct msghdr *msg,
+                              size_t len)
 {
        struct sock *sk = sock->sk;
        DECLARE_SOCKADDR(struct sockaddr_pkt *, saddr, msg->msg_name);
@@ -1818,13 +1824,10 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
                skb = nskb;
        }
 
-       BUILD_BUG_ON(sizeof(*PACKET_SKB_CB(skb)) + MAX_ADDR_LEN - 8 >
-                    sizeof(skb->cb));
+       sock_skb_cb_check_size(sizeof(*PACKET_SKB_CB(skb)) + MAX_ADDR_LEN - 8);
 
        sll = &PACKET_SKB_CB(skb)->sa.ll;
-       sll->sll_family = AF_PACKET;
        sll->sll_hatype = dev->type;
-       sll->sll_protocol = skb->protocol;
        sll->sll_pkttype = skb->pkt_type;
        if (unlikely(po->origdev))
                sll->sll_ifindex = orig_dev->ifindex;
@@ -1833,7 +1836,10 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
 
        sll->sll_halen = dev_parse_header(skb, sll->sll_addr);
 
-       PACKET_SKB_CB(skb)->origlen = skb->len;
+       /* sll->sll_family and sll->sll_protocol are set in packet_recvmsg().
+        * Use their space for storing the original skb length.
+        */
+       PACKET_SKB_CB(skb)->sa.origlen = skb->len;
 
        if (pskb_trim(skb, snaplen))
                goto drop_n_acct;
@@ -1847,7 +1853,7 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
 
        spin_lock(&sk->sk_receive_queue.lock);
        po->stats.stats1.tp_packets++;
-       skb->dropcount = atomic_read(&sk->sk_drops);
+       sock_skb_set_dropcount(sk, skb);
        __skb_queue_tail(&sk->sk_receive_queue, skb);
        spin_unlock(&sk->sk_receive_queue.lock);
        sk->sk_data_ready(sk);
@@ -1910,14 +1916,19 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
                }
        }
 
-       if (skb->ip_summed == CHECKSUM_PARTIAL)
-               status |= TP_STATUS_CSUMNOTREADY;
-
        snaplen = skb->len;
 
        res = run_filter(skb, sk, snaplen);
        if (!res)
                goto drop_n_restore;
+
+       if (skb->ip_summed == CHECKSUM_PARTIAL)
+               status |= TP_STATUS_CSUMNOTREADY;
+       else if (skb->pkt_type != PACKET_OUTGOING &&
+                (skb->ip_summed == CHECKSUM_COMPLETE ||
+                 skb_csum_unnecessary(skb)))
+               status |= TP_STATUS_CSUM_VALID;
+
        if (snaplen > res)
                snaplen = res;
 
@@ -2603,8 +2614,7 @@ out:
        return err;
 }
 
-static int packet_sendmsg(struct kiocb *iocb, struct socket *sock,
-               struct msghdr *msg, size_t len)
+static int packet_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
 {
        struct sock *sk = sock->sk;
        struct packet_sock *po = pkt_sk(sk);
@@ -2884,13 +2894,14 @@ out:
  *     If necessary we block.
  */
 
-static int packet_recvmsg(struct kiocb *iocb, struct socket *sock,
-                         struct msghdr *msg, size_t len, int flags)
+static int packet_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
+                         int flags)
 {
        struct sock *sk = sock->sk;
        struct sk_buff *skb;
        int copied, err;
        int vnet_hdr_len = 0;
+       unsigned int origlen = 0;
 
        err = -EINVAL;
        if (flags & ~(MSG_PEEK|MSG_DONTWAIT|MSG_TRUNC|MSG_CMSG_COMPAT|MSG_ERRQUEUE))
@@ -2990,6 +3001,15 @@ static int packet_recvmsg(struct kiocb *iocb, struct socket *sock,
        if (err)
                goto out_free;
 
+       if (sock->type != SOCK_PACKET) {
+               struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll;
+
+               /* Original length was stored in sockaddr_ll fields */
+               origlen = PACKET_SKB_CB(skb)->sa.origlen;
+               sll->sll_family = AF_PACKET;
+               sll->sll_protocol = skb->protocol;
+       }
+
        sock_recv_ts_and_drops(msg, sk, skb);
 
        if (msg->msg_name) {
@@ -3001,6 +3021,7 @@ static int packet_recvmsg(struct kiocb *iocb, struct socket *sock,
                        msg->msg_namelen = sizeof(struct sockaddr_pkt);
                } else {
                        struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll;
+
                        msg->msg_namelen = sll->sll_halen +
                                offsetof(struct sockaddr_ll, sll_addr);
                }
@@ -3014,7 +3035,12 @@ static int packet_recvmsg(struct kiocb *iocb, struct socket *sock,
                aux.tp_status = TP_STATUS_USER;
                if (skb->ip_summed == CHECKSUM_PARTIAL)
                        aux.tp_status |= TP_STATUS_CSUMNOTREADY;
-               aux.tp_len = PACKET_SKB_CB(skb)->origlen;
+               else if (skb->pkt_type != PACKET_OUTGOING &&
+                        (skb->ip_summed == CHECKSUM_COMPLETE ||
+                         skb_csum_unnecessary(skb)))
+                       aux.tp_status |= TP_STATUS_CSUM_VALID;
+
+               aux.tp_len = origlen;
                aux.tp_snaplen = skb->len;
                aux.tp_mac = 0;
                aux.tp_net = skb_network_offset(skb);
index cdddf6a303996b1a512a7a3896362acc31635f0e..fe6e20caea1d9bcd3711b3ad29a8de2ae40cd1bc 100644 (file)
@@ -74,9 +74,7 @@ extern struct mutex fanout_mutex;
 #define PACKET_FANOUT_MAX      256
 
 struct packet_fanout {
-#ifdef CONFIG_NET_NS
-       struct net              *net;
-#endif
+       possible_net_t          net;
        unsigned int            num_members;
        u16                     id;
        u8                      type;
index 26054b4b467c63d3bce92b8226ec767ad65ad960..5e710435ffa96943a59184de054553739e70d953 100644 (file)
@@ -83,8 +83,7 @@ static int pn_init(struct sock *sk)
        return 0;
 }
 
-static int pn_sendmsg(struct kiocb *iocb, struct sock *sk,
-                       struct msghdr *msg, size_t len)
+static int pn_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
 {
        DECLARE_SOCKADDR(struct sockaddr_pn *, target, msg->msg_name);
        struct sk_buff *skb;
@@ -125,9 +124,8 @@ static int pn_sendmsg(struct kiocb *iocb, struct sock *sk,
        return (err >= 0) ? len : err;
 }
 
-static int pn_recvmsg(struct kiocb *iocb, struct sock *sk,
-                       struct msghdr *msg, size_t len, int noblock,
-                       int flags, int *addr_len)
+static int pn_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
+                     int noblock, int flags, int *addr_len)
 {
        struct sk_buff *skb = NULL;
        struct sockaddr_pn sa;
index 5d3f2b7507d45a9b78ba0fd412cc3a8f57ad9178..6de2aeb98a1fc99fa7b75430f048060fac6c191b 100644 (file)
@@ -1118,8 +1118,7 @@ static int pipe_skb_send(struct sock *sk, struct sk_buff *skb)
 
 }
 
-static int pep_sendmsg(struct kiocb *iocb, struct sock *sk,
-                       struct msghdr *msg, size_t len)
+static int pep_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
 {
        struct pep_sock *pn = pep_sk(sk);
        struct sk_buff *skb;
@@ -1246,9 +1245,8 @@ struct sk_buff *pep_read(struct sock *sk)
        return skb;
 }
 
-static int pep_recvmsg(struct kiocb *iocb, struct sock *sk,
-                       struct msghdr *msg, size_t len, int noblock,
-                       int flags, int *addr_len)
+static int pep_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
+                      int noblock, int flags, int *addr_len)
 {
        struct sk_buff *skb;
        int err;
index 008214a3d5eb5b90ecbccb3bea9f532a87f630c9..d575ef4e9aa6d390bde4f78ba85247acf8e9bf65 100644 (file)
@@ -425,15 +425,15 @@ out:
        return err;
 }
 
-static int pn_socket_sendmsg(struct kiocb *iocb, struct socket *sock,
-                               struct msghdr *m, size_t total_len)
+static int pn_socket_sendmsg(struct socket *sock, struct msghdr *m,
+                            size_t total_len)
 {
        struct sock *sk = sock->sk;
 
        if (pn_socket_autobind(sock))
                return -EAGAIN;
 
-       return sk->sk_prot->sendmsg(iocb, sk, m, total_len);
+       return sk->sk_prot->sendmsg(sk, m, total_len);
 }
 
 const struct proto_ops phonet_dgram_ops = {
index c2a5eef41343c816f70e6dc16fc7a2fa9ea2d684..c3f2855c3d8432272f7899608513a499558d9ad8 100644 (file)
@@ -702,8 +702,8 @@ void rds_inc_init(struct rds_incoming *inc, struct rds_connection *conn,
 void rds_inc_put(struct rds_incoming *inc);
 void rds_recv_incoming(struct rds_connection *conn, __be32 saddr, __be32 daddr,
                       struct rds_incoming *inc, gfp_t gfp);
-int rds_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
-               size_t size, int msg_flags);
+int rds_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
+               int msg_flags);
 void rds_clear_recv_queue(struct rds_sock *rs);
 int rds_notify_queue_get(struct rds_sock *rs, struct msghdr *msg);
 void rds_inc_info_copy(struct rds_incoming *inc,
@@ -711,8 +711,7 @@ void rds_inc_info_copy(struct rds_incoming *inc,
                       __be32 saddr, __be32 daddr, int flip);
 
 /* send.c */
-int rds_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
-               size_t payload_len);
+int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len);
 void rds_send_reset(struct rds_connection *conn);
 int rds_send_xmit(struct rds_connection *conn);
 struct sockaddr_in;
index f9ec1acd801cb182372e38a6f3c26d1dbe535137..a00462b0d01de9ee2793d4fb273bf568b2eefc29 100644 (file)
@@ -395,8 +395,8 @@ static int rds_cmsg_recv(struct rds_incoming *inc, struct msghdr *msg)
        return 0;
 }
 
-int rds_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
-               size_t size, int msg_flags)
+int rds_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
+               int msg_flags)
 {
        struct sock *sk = sock->sk;
        struct rds_sock *rs = rds_sk_to_rs(sk);
index 42f65d4305c88dc5db279fec71d5fb844e708cc1..44672befc0ee29a3e04ca01768c087fd0abd2f36 100644 (file)
@@ -920,8 +920,7 @@ static int rds_cmsg_send(struct rds_sock *rs, struct rds_message *rm,
        return ret;
 }
 
-int rds_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
-               size_t payload_len)
+int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len)
 {
        struct sock *sk = sock->sk;
        struct rds_sock *rs = rds_sk_to_rs(sk);
index 43bac7c4dd9e62b136b97ad2fa99d81dbd39bf11..8ae603069a1a1706982dc0b7affd5443cb912308 100644 (file)
@@ -1046,8 +1046,7 @@ int rose_rx_call_request(struct sk_buff *skb, struct net_device *dev, struct ros
        return 1;
 }
 
-static int rose_sendmsg(struct kiocb *iocb, struct socket *sock,
-                       struct msghdr *msg, size_t len)
+static int rose_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
 {
        struct sock *sk = sock->sk;
        struct rose_sock *rose = rose_sk(sk);
@@ -1211,8 +1210,8 @@ static int rose_sendmsg(struct kiocb *iocb, struct socket *sock,
 }
 
 
-static int rose_recvmsg(struct kiocb *iocb, struct socket *sock,
-                       struct msghdr *msg, size_t size, int flags)
+static int rose_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
+                       int flags)
 {
        struct sock *sk = sock->sk;
        struct rose_sock *rose = rose_sk(sk);
index 50005888be573245fc1d30b8f38bc90b10c89698..369ca81a8c5dd262539f686c6d1d5700c8e07971 100644 (file)
@@ -41,6 +41,9 @@ static int rose_header(struct sk_buff *skb, struct net_device *dev,
 {
        unsigned char *buff = skb_push(skb, ROSE_MIN_LEN + 2);
 
+       if (daddr)
+               memcpy(buff + 7, daddr, dev->addr_len);
+
        *buff++ = ROSE_GFI | ROSE_Q_BIT;
        *buff++ = 0x00;
        *buff++ = ROSE_DATA;
@@ -53,43 +56,6 @@ static int rose_header(struct sk_buff *skb, struct net_device *dev,
        return -37;
 }
 
-static int rose_rebuild_header(struct sk_buff *skb)
-{
-#ifdef CONFIG_INET
-       struct net_device *dev = skb->dev;
-       struct net_device_stats *stats = &dev->stats;
-       unsigned char *bp = (unsigned char *)skb->data;
-       struct sk_buff *skbn;
-       unsigned int len;
-
-       if (arp_find(bp + 7, skb)) {
-               return 1;
-       }
-
-       if ((skbn = skb_clone(skb, GFP_ATOMIC)) == NULL) {
-               kfree_skb(skb);
-               return 1;
-       }
-
-       if (skb->sk != NULL)
-               skb_set_owner_w(skbn, skb->sk);
-
-       kfree_skb(skb);
-
-       len = skbn->len;
-
-       if (!rose_route_frame(skbn, NULL)) {
-               kfree_skb(skbn);
-               stats->tx_errors++;
-               return 1;
-       }
-
-       stats->tx_packets++;
-       stats->tx_bytes += len;
-#endif
-       return 1;
-}
-
 static int rose_set_mac_address(struct net_device *dev, void *addr)
 {
        struct sockaddr *sa = addr;
@@ -134,19 +100,26 @@ static int rose_close(struct net_device *dev)
 static netdev_tx_t rose_xmit(struct sk_buff *skb, struct net_device *dev)
 {
        struct net_device_stats *stats = &dev->stats;
+       unsigned int len = skb->len;
 
        if (!netif_running(dev)) {
                printk(KERN_ERR "ROSE: rose_xmit - called when iface is down\n");
                return NETDEV_TX_BUSY;
        }
-       dev_kfree_skb(skb);
-       stats->tx_errors++;
+
+       if (!rose_route_frame(skb, NULL)) {
+               dev_kfree_skb(skb);
+               stats->tx_errors++;
+               return NETDEV_TX_OK;
+       }
+
+       stats->tx_packets++;
+       stats->tx_bytes += len;
        return NETDEV_TX_OK;
 }
 
 static const struct header_ops rose_header_ops = {
        .create = rose_header,
-       .rebuild = rose_rebuild_header,
 };
 
 static const struct net_device_ops rose_netdev_ops = {
index 7b1670489638e565c7ccea71961f8532986c94cb..0095b9a0b779ca9fcc212f2c495acce399ec1550 100644 (file)
@@ -441,8 +441,7 @@ static int rxrpc_connect(struct socket *sock, struct sockaddr *addr,
  *   - sends a call data packet
  *   - may send an abort (abort code in control data)
  */
-static int rxrpc_sendmsg(struct kiocb *iocb, struct socket *sock,
-                        struct msghdr *m, size_t len)
+static int rxrpc_sendmsg(struct socket *sock, struct msghdr *m, size_t len)
 {
        struct rxrpc_transport *trans;
        struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
@@ -482,7 +481,7 @@ static int rxrpc_sendmsg(struct kiocb *iocb, struct socket *sock,
        switch (rx->sk.sk_state) {
        case RXRPC_SERVER_LISTENING:
                if (!m->msg_name) {
-                       ret = rxrpc_server_sendmsg(iocb, rx, m, len);
+                       ret = rxrpc_server_sendmsg(rx, m, len);
                        break;
                }
        case RXRPC_SERVER_BOUND:
@@ -492,7 +491,7 @@ static int rxrpc_sendmsg(struct kiocb *iocb, struct socket *sock,
                        break;
                }
        case RXRPC_CLIENT_CONNECTED:
-               ret = rxrpc_client_sendmsg(iocb, rx, trans, m, len);
+               ret = rxrpc_client_sendmsg(rx, trans, m, len);
                break;
        default:
                ret = -ENOTCONN;
index 481f89f93789a147fd5979e894e62145f4d9d767..4505a691d88c283bbbd8038c2dd088825015bd32 100644 (file)
@@ -28,7 +28,7 @@
 const char *rxrpc_pkts[] = {
        "?00",
        "DATA", "ACK", "BUSY", "ABORT", "ACKALL", "CHALL", "RESP", "DEBUG",
-       "?09", "?10", "?11", "?12", "?13", "?14", "?15"
+       "?09", "?10", "?11", "?12", "VERSION", "?14", "?15"
 };
 
 /*
@@ -593,6 +593,20 @@ static void rxrpc_post_packet_to_conn(struct rxrpc_connection *conn,
        rxrpc_queue_conn(conn);
 }
 
+/*
+ * post endpoint-level events to the local endpoint
+ * - this includes debug and version messages
+ */
+static void rxrpc_post_packet_to_local(struct rxrpc_local *local,
+                                      struct sk_buff *skb)
+{
+       _enter("%p,%p", local, skb);
+
+       atomic_inc(&local->usage);
+       skb_queue_tail(&local->event_queue, skb);
+       rxrpc_queue_work(&local->event_processor);
+}
+
 static struct rxrpc_connection *rxrpc_conn_from_local(struct rxrpc_local *local,
                                               struct sk_buff *skb,
                                               struct rxrpc_skb_priv *sp)
@@ -699,6 +713,11 @@ void rxrpc_data_ready(struct sock *sk)
                goto bad_message;
        }
 
+       if (sp->hdr.type == RXRPC_PACKET_TYPE_VERSION) {
+               rxrpc_post_packet_to_local(local, skb);
+               goto out;
+       }
+       
        if (sp->hdr.type == RXRPC_PACKET_TYPE_DATA &&
            (sp->hdr.callNumber == 0 || sp->hdr.seq == 0))
                goto bad_message;
@@ -731,6 +750,8 @@ void rxrpc_data_ready(struct sock *sk)
                else
                        goto cant_route_call;
        }
+
+out:
        rxrpc_put_local(local);
        return;
 
index ba9fd36d3f156452c470a93a0b04e428672daa35..aef1bd294e1796b68052936bdc8d0b62b52814fe 100644 (file)
@@ -152,11 +152,13 @@ struct rxrpc_local {
        struct work_struct      destroyer;      /* endpoint destroyer */
        struct work_struct      acceptor;       /* incoming call processor */
        struct work_struct      rejecter;       /* packet reject writer */
+       struct work_struct      event_processor; /* endpoint event processor */
        struct list_head        services;       /* services listening on this endpoint */
        struct list_head        link;           /* link in endpoint list */
        struct rw_semaphore     defrag_sem;     /* control re-enablement of IP DF bit */
        struct sk_buff_head     accept_queue;   /* incoming calls awaiting acceptance */
        struct sk_buff_head     reject_queue;   /* packets awaiting rejection */
+       struct sk_buff_head     event_queue;    /* endpoint event packets awaiting processing */
        spinlock_t              lock;           /* access lock */
        rwlock_t                services_lock;  /* lock for services list */
        atomic_t                usage;
@@ -548,10 +550,9 @@ int rxrpc_get_server_data_key(struct rxrpc_connection *, const void *, time_t,
 extern unsigned rxrpc_resend_timeout;
 
 int rxrpc_send_packet(struct rxrpc_transport *, struct sk_buff *);
-int rxrpc_client_sendmsg(struct kiocb *, struct rxrpc_sock *,
-                        struct rxrpc_transport *, struct msghdr *, size_t);
-int rxrpc_server_sendmsg(struct kiocb *, struct rxrpc_sock *, struct msghdr *,
-                        size_t);
+int rxrpc_client_sendmsg(struct rxrpc_sock *, struct rxrpc_transport *,
+                        struct msghdr *, size_t);
+int rxrpc_server_sendmsg(struct rxrpc_sock *, struct msghdr *, size_t);
 
 /*
  * ar-peer.c
@@ -572,8 +573,7 @@ extern const struct file_operations rxrpc_connection_seq_fops;
  * ar-recvmsg.c
  */
 void rxrpc_remove_user_ID(struct rxrpc_sock *, struct rxrpc_call *);
-int rxrpc_recvmsg(struct kiocb *, struct socket *, struct msghdr *, size_t,
-                 int);
+int rxrpc_recvmsg(struct socket *, struct msghdr *, size_t, int);
 
 /*
  * ar-security.c
index 87f7135d238b498f9208543cf4608c1cb78a59d3..ca904ed5400a11bd08e47fea56d0caeb30f0a442 100644 (file)
 #include <linux/net.h>
 #include <linux/skbuff.h>
 #include <linux/slab.h>
+#include <linux/udp.h>
+#include <linux/ip.h>
 #include <net/sock.h>
 #include <net/af_rxrpc.h>
+#include <generated/utsrelease.h>
 #include "ar-internal.h"
 
+static const char rxrpc_version_string[65] = "linux-" UTS_RELEASE " AF_RXRPC";
+
 static LIST_HEAD(rxrpc_locals);
 DEFINE_RWLOCK(rxrpc_local_lock);
 static DECLARE_RWSEM(rxrpc_local_sem);
 static DECLARE_WAIT_QUEUE_HEAD(rxrpc_local_wq);
 
 static void rxrpc_destroy_local(struct work_struct *work);
+static void rxrpc_process_local_events(struct work_struct *work);
 
 /*
  * allocate a new local
@@ -37,11 +43,13 @@ struct rxrpc_local *rxrpc_alloc_local(struct sockaddr_rxrpc *srx)
                INIT_WORK(&local->destroyer, &rxrpc_destroy_local);
                INIT_WORK(&local->acceptor, &rxrpc_accept_incoming_calls);
                INIT_WORK(&local->rejecter, &rxrpc_reject_packets);
+               INIT_WORK(&local->event_processor, &rxrpc_process_local_events);
                INIT_LIST_HEAD(&local->services);
                INIT_LIST_HEAD(&local->link);
                init_rwsem(&local->defrag_sem);
                skb_queue_head_init(&local->accept_queue);
                skb_queue_head_init(&local->reject_queue);
+               skb_queue_head_init(&local->event_queue);
                spin_lock_init(&local->lock);
                rwlock_init(&local->services_lock);
                atomic_set(&local->usage, 1);
@@ -264,10 +272,12 @@ static void rxrpc_destroy_local(struct work_struct *work)
        ASSERT(list_empty(&local->services));
        ASSERT(!work_pending(&local->acceptor));
        ASSERT(!work_pending(&local->rejecter));
+       ASSERT(!work_pending(&local->event_processor));
 
        /* finish cleaning up the local descriptor */
        rxrpc_purge_queue(&local->accept_queue);
        rxrpc_purge_queue(&local->reject_queue);
+       rxrpc_purge_queue(&local->event_queue);
        kernel_sock_shutdown(local->socket, SHUT_RDWR);
        sock_release(local->socket);
 
@@ -308,3 +318,91 @@ void __exit rxrpc_destroy_all_locals(void)
 
        _leave("");
 }
+
+/*
+ * Reply to a version request
+ */
+static void rxrpc_send_version_request(struct rxrpc_local *local,
+                                      struct rxrpc_header *hdr,
+                                      struct sk_buff *skb)
+{
+       struct sockaddr_in sin;
+       struct msghdr msg;
+       struct kvec iov[2];
+       size_t len;
+       int ret;
+
+       _enter("");
+
+       sin.sin_family = AF_INET;
+       sin.sin_port = udp_hdr(skb)->source;
+       sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
+
+       msg.msg_name    = &sin;
+       msg.msg_namelen = sizeof(sin);
+       msg.msg_control = NULL;
+       msg.msg_controllen = 0;
+       msg.msg_flags   = 0;
+
+       hdr->seq        = 0;
+       hdr->serial     = 0;
+       hdr->type       = RXRPC_PACKET_TYPE_VERSION;
+       hdr->flags      = RXRPC_LAST_PACKET | (~hdr->flags & RXRPC_CLIENT_INITIATED);
+       hdr->userStatus = 0;
+       hdr->_rsvd      = 0;
+
+       iov[0].iov_base = hdr;
+       iov[0].iov_len  = sizeof(*hdr);
+       iov[1].iov_base = (char *)rxrpc_version_string;
+       iov[1].iov_len  = sizeof(rxrpc_version_string);
+
+       len = iov[0].iov_len + iov[1].iov_len;
+
+       _proto("Tx VERSION (reply)");
+
+       ret = kernel_sendmsg(local->socket, &msg, iov, 2, len);
+       if (ret < 0)
+               _debug("sendmsg failed: %d", ret);
+
+       _leave("");
+}
+
+/*
+ * Process event packets targetted at a local endpoint.
+ */
+static void rxrpc_process_local_events(struct work_struct *work)
+{
+       struct rxrpc_local *local = container_of(work, struct rxrpc_local, event_processor);
+       struct sk_buff *skb;
+       char v;
+
+       _enter("");
+
+       atomic_inc(&local->usage);
+       
+       while ((skb = skb_dequeue(&local->event_queue))) {
+               struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
+
+               kdebug("{%d},{%u}", local->debug_id, sp->hdr.type);
+
+               switch (sp->hdr.type) {
+               case RXRPC_PACKET_TYPE_VERSION:
+                       if (skb_copy_bits(skb, 0, &v, 1) < 0)
+                               return;
+                       _proto("Rx VERSION { %02x }", v);
+                       if (v == 0)
+                               rxrpc_send_version_request(local, &sp->hdr, skb);
+                       break;
+
+               default:
+                       /* Just ignore anything we don't understand */
+                       break;
+               }
+
+               rxrpc_put_local(local);
+               rxrpc_free_skb(skb);
+       }
+
+       rxrpc_put_local(local);
+       _leave("");
+}
index 8331c95e152283d437b3dee9205cf37c9ce06271..c0042807bfc6a5e2b6e03d70fbcffe097be73326 100644 (file)
@@ -23,8 +23,7 @@
  */
 unsigned rxrpc_resend_timeout = 4 * HZ;
 
-static int rxrpc_send_data(struct kiocb *iocb,
-                          struct rxrpc_sock *rx,
+static int rxrpc_send_data(struct rxrpc_sock *rx,
                           struct rxrpc_call *call,
                           struct msghdr *msg, size_t len);
 
@@ -129,9 +128,8 @@ static void rxrpc_send_abort(struct rxrpc_call *call, u32 abort_code)
  * - caller holds the socket locked
  * - the socket may be either a client socket or a server socket
  */
-int rxrpc_client_sendmsg(struct kiocb *iocb, struct rxrpc_sock *rx,
-                        struct rxrpc_transport *trans, struct msghdr *msg,
-                        size_t len)
+int rxrpc_client_sendmsg(struct rxrpc_sock *rx, struct rxrpc_transport *trans,
+                        struct msghdr *msg, size_t len)
 {
        struct rxrpc_conn_bundle *bundle;
        enum rxrpc_command cmd;
@@ -191,7 +189,7 @@ int rxrpc_client_sendmsg(struct kiocb *iocb, struct rxrpc_sock *rx,
                /* request phase complete for this client call */
                ret = -EPROTO;
        } else {
-               ret = rxrpc_send_data(iocb, rx, call, msg, len);
+               ret = rxrpc_send_data(rx, call, msg, len);
        }
 
        rxrpc_put_call(call);
@@ -232,7 +230,7 @@ int rxrpc_kernel_send_data(struct rxrpc_call *call, struct msghdr *msg,
                   call->state != RXRPC_CALL_SERVER_SEND_REPLY) {
                ret = -EPROTO; /* request phase complete for this client call */
        } else {
-               ret = rxrpc_send_data(NULL, call->socket, call, msg, len);
+               ret = rxrpc_send_data(call->socket, call, msg, len);
        }
 
        release_sock(&call->socket->sk);
@@ -271,8 +269,7 @@ EXPORT_SYMBOL(rxrpc_kernel_abort_call);
  * send a message through a server socket
  * - caller holds the socket locked
  */
-int rxrpc_server_sendmsg(struct kiocb *iocb, struct rxrpc_sock *rx,
-                        struct msghdr *msg, size_t len)
+int rxrpc_server_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len)
 {
        enum rxrpc_command cmd;
        struct rxrpc_call *call;
@@ -313,7 +310,7 @@ int rxrpc_server_sendmsg(struct kiocb *iocb, struct rxrpc_sock *rx,
                        break;
                }
 
-               ret = rxrpc_send_data(iocb, rx, call, msg, len);
+               ret = rxrpc_send_data(rx, call, msg, len);
                break;
 
        case RXRPC_CMD_SEND_ABORT:
@@ -520,8 +517,7 @@ static void rxrpc_queue_packet(struct rxrpc_call *call, struct sk_buff *skb,
  * - must be called in process context
  * - caller holds the socket locked
  */
-static int rxrpc_send_data(struct kiocb *iocb,
-                          struct rxrpc_sock *rx,
+static int rxrpc_send_data(struct rxrpc_sock *rx,
                           struct rxrpc_call *call,
                           struct msghdr *msg, size_t len)
 {
@@ -546,11 +542,7 @@ static int rxrpc_send_data(struct kiocb *iocb,
        call->tx_pending = NULL;
 
        copied = 0;
-       if (len > iov_iter_count(&msg->msg_iter))
-               len = iov_iter_count(&msg->msg_iter);
-       while (len) {
-               int copy;
-
+       do {
                if (!skb) {
                        size_t size, chunk, max, space;
 
@@ -572,8 +564,8 @@ static int rxrpc_send_data(struct kiocb *iocb,
                        max &= ~(call->conn->size_align - 1UL);
 
                        chunk = max;
-                       if (chunk > len && !more)
-                               chunk = len;
+                       if (chunk > msg_data_left(msg) && !more)
+                               chunk = msg_data_left(msg);
 
                        space = chunk + call->conn->size_align;
                        space &= ~(call->conn->size_align - 1UL);
@@ -616,23 +608,23 @@ static int rxrpc_send_data(struct kiocb *iocb,
                sp = rxrpc_skb(skb);
 
                /* append next segment of data to the current buffer */
-               copy = skb_tailroom(skb);
-               ASSERTCMP(copy, >, 0);
-               if (copy > len)
-                       copy = len;
-               if (copy > sp->remain)
-                       copy = sp->remain;
-
-               _debug("add");
-               ret = skb_add_data(skb, &msg->msg_iter, copy);
-               _debug("added");
-               if (ret < 0)
-                       goto efault;
-               sp->remain -= copy;
-               skb->mark += copy;
-               copied += copy;
-
-               len -= copy;
+               if (msg_data_left(msg) > 0) {
+                       int copy = skb_tailroom(skb);
+                       ASSERTCMP(copy, >, 0);
+                       if (copy > msg_data_left(msg))
+                               copy = msg_data_left(msg);
+                       if (copy > sp->remain)
+                               copy = sp->remain;
+
+                       _debug("add");
+                       ret = skb_add_data(skb, &msg->msg_iter, copy);
+                       _debug("added");
+                       if (ret < 0)
+                               goto efault;
+                       sp->remain -= copy;
+                       skb->mark += copy;
+                       copied += copy;
+               }
 
                /* check for the far side aborting the call or a network error
                 * occurring */
@@ -640,7 +632,8 @@ static int rxrpc_send_data(struct kiocb *iocb,
                        goto call_aborted;
 
                /* add the packet to the send queue if it's now full */
-               if (sp->remain <= 0 || (!len && !more)) {
+               if (sp->remain <= 0 ||
+                   (msg_data_left(msg) == 0 && !more)) {
                        struct rxrpc_connection *conn = call->conn;
                        uint32_t seq;
                        size_t pad;
@@ -670,7 +663,7 @@ static int rxrpc_send_data(struct kiocb *iocb,
                        sp->hdr.serviceId = conn->service_id;
 
                        sp->hdr.flags = conn->out_clientflag;
-                       if (len == 0 && !more)
+                       if (msg_data_left(msg) == 0 && !more)
                                sp->hdr.flags |= RXRPC_LAST_PACKET;
                        else if (CIRC_SPACE(call->acks_head, call->acks_tail,
                                            call->acks_winsz) > 1)
@@ -686,10 +679,10 @@ static int rxrpc_send_data(struct kiocb *iocb,
 
                        memcpy(skb->head, &sp->hdr,
                               sizeof(struct rxrpc_header));
-                       rxrpc_queue_packet(call, skb, !iov_iter_count(&msg->msg_iter) && !more);
+                       rxrpc_queue_packet(call, skb, !msg_data_left(msg) && !more);
                        skb = NULL;
                }
-       }
+       } while (msg_data_left(msg) > 0);
 
 success:
        ret = copied;
index 19a560626dc4f4232592e7d34381a859a04b6f4c..b92beded7459403910b2208ba4dd19a3a752c05c 100644 (file)
@@ -43,8 +43,8 @@ void rxrpc_remove_user_ID(struct rxrpc_sock *rx, struct rxrpc_call *call)
  * - we need to be careful about two or more threads calling recvmsg
  *   simultaneously
  */
-int rxrpc_recvmsg(struct kiocb *iocb, struct socket *sock,
-                 struct msghdr *msg, size_t len, int flags)
+int rxrpc_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
+                 int flags)
 {
        struct rxrpc_skb_priv *sp;
        struct rxrpc_call *call = NULL, *continue_call = NULL;
@@ -150,7 +150,7 @@ int rxrpc_recvmsg(struct kiocb *iocb, struct socket *sock,
                                       &call->conn->trans->peer->srx, len);
                                msg->msg_namelen = len;
                        }
-                       sock_recv_ts_and_drops(msg, &rx->sk, skb);
+                       sock_recv_timestamp(msg, &rx->sk, skb);
                }
 
                /* receive the message */
index 5f6288fa3f1247462897cd747364dfbc9e0da843..4d2cede1746842e8dcc0b7267638241755870112 100644 (file)
 #include <linux/skbuff.h>
 #include <linux/rtnetlink.h>
 #include <linux/filter.h>
+#include <linux/bpf.h>
+
 #include <net/netlink.h>
 #include <net/pkt_sched.h>
 
 #include <linux/tc_act/tc_bpf.h>
 #include <net/tc_act/tc_bpf.h>
 
-#define BPF_TAB_MASK     15
+#define BPF_TAB_MASK           15
+#define ACT_BPF_NAME_LEN       256
+
+struct tcf_bpf_cfg {
+       struct bpf_prog *filter;
+       struct sock_filter *bpf_ops;
+       char *bpf_name;
+       u32 bpf_fd;
+       u16 bpf_num_ops;
+};
 
-static int tcf_bpf(struct sk_buff *skb, const struct tc_action *a,
+static int tcf_bpf(struct sk_buff *skb, const struct tc_action *act,
                   struct tcf_result *res)
 {
-       struct tcf_bpf *b = a->priv;
+       struct tcf_bpf *prog = act->priv;
        int action, filter_res;
 
-       spin_lock(&b->tcf_lock);
+       spin_lock(&prog->tcf_lock);
 
-       b->tcf_tm.lastuse = jiffies;
-       bstats_update(&b->tcf_bstats, skb);
+       prog->tcf_tm.lastuse = jiffies;
+       bstats_update(&prog->tcf_bstats, skb);
 
-       filter_res = BPF_PROG_RUN(b->filter, skb);
+       /* Needed here for accessing maps. */
+       rcu_read_lock();
+       filter_res = BPF_PROG_RUN(prog->filter, skb);
+       rcu_read_unlock();
 
        /* A BPF program may overwrite the default action opcode.
         * Similarly as in cls_bpf, if filter_res == -1 we use the
@@ -52,52 +66,87 @@ static int tcf_bpf(struct sk_buff *skb, const struct tc_action *a,
                break;
        case TC_ACT_SHOT:
                action = filter_res;
-               b->tcf_qstats.drops++;
+               prog->tcf_qstats.drops++;
                break;
        case TC_ACT_UNSPEC:
-               action = b->tcf_action;
+               action = prog->tcf_action;
                break;
        default:
                action = TC_ACT_UNSPEC;
                break;
        }
 
-       spin_unlock(&b->tcf_lock);
+       spin_unlock(&prog->tcf_lock);
        return action;
 }
 
-static int tcf_bpf_dump(struct sk_buff *skb, struct tc_action *a,
+static bool tcf_bpf_is_ebpf(const struct tcf_bpf *prog)
+{
+       return !prog->bpf_ops;
+}
+
+static int tcf_bpf_dump_bpf_info(const struct tcf_bpf *prog,
+                                struct sk_buff *skb)
+{
+       struct nlattr *nla;
+
+       if (nla_put_u16(skb, TCA_ACT_BPF_OPS_LEN, prog->bpf_num_ops))
+               return -EMSGSIZE;
+
+       nla = nla_reserve(skb, TCA_ACT_BPF_OPS, prog->bpf_num_ops *
+                         sizeof(struct sock_filter));
+       if (nla == NULL)
+               return -EMSGSIZE;
+
+       memcpy(nla_data(nla), prog->bpf_ops, nla_len(nla));
+
+       return 0;
+}
+
+static int tcf_bpf_dump_ebpf_info(const struct tcf_bpf *prog,
+                                 struct sk_buff *skb)
+{
+       if (nla_put_u32(skb, TCA_ACT_BPF_FD, prog->bpf_fd))
+               return -EMSGSIZE;
+
+       if (prog->bpf_name &&
+           nla_put_string(skb, TCA_ACT_BPF_NAME, prog->bpf_name))
+               return -EMSGSIZE;
+
+       return 0;
+}
+
+static int tcf_bpf_dump(struct sk_buff *skb, struct tc_action *act,
                        int bind, int ref)
 {
        unsigned char *tp = skb_tail_pointer(skb);
-       struct tcf_bpf *b = a->priv;
+       struct tcf_bpf *prog = act->priv;
        struct tc_act_bpf opt = {
-               .index    = b->tcf_index,
-               .refcnt   = b->tcf_refcnt - ref,
-               .bindcnt  = b->tcf_bindcnt - bind,
-               .action   = b->tcf_action,
+               .index   = prog->tcf_index,
+               .refcnt  = prog->tcf_refcnt - ref,
+               .bindcnt = prog->tcf_bindcnt - bind,
+               .action  = prog->tcf_action,
        };
-       struct tcf_t t;
-       struct nlattr *nla;
+       struct tcf_t tm;
+       int ret;
 
        if (nla_put(skb, TCA_ACT_BPF_PARMS, sizeof(opt), &opt))
                goto nla_put_failure;
 
-       if (nla_put_u16(skb, TCA_ACT_BPF_OPS_LEN, b->bpf_num_ops))
-               goto nla_put_failure;
-
-       nla = nla_reserve(skb, TCA_ACT_BPF_OPS, b->bpf_num_ops *
-                         sizeof(struct sock_filter));
-       if (!nla)
+       if (tcf_bpf_is_ebpf(prog))
+               ret = tcf_bpf_dump_ebpf_info(prog, skb);
+       else
+               ret = tcf_bpf_dump_bpf_info(prog, skb);
+       if (ret)
                goto nla_put_failure;
 
-       memcpy(nla_data(nla), b->bpf_ops, nla_len(nla));
+       tm.install = jiffies_to_clock_t(jiffies - prog->tcf_tm.install);
+       tm.lastuse = jiffies_to_clock_t(jiffies - prog->tcf_tm.lastuse);
+       tm.expires = jiffies_to_clock_t(prog->tcf_tm.expires);
 
-       t.install = jiffies_to_clock_t(jiffies - b->tcf_tm.install);
-       t.lastuse = jiffies_to_clock_t(jiffies - b->tcf_tm.lastuse);
-       t.expires = jiffies_to_clock_t(b->tcf_tm.expires);
-       if (nla_put(skb, TCA_ACT_BPF_TM, sizeof(t), &t))
+       if (nla_put(skb, TCA_ACT_BPF_TM, sizeof(tm), &tm))
                goto nla_put_failure;
+
        return skb->len;
 
 nla_put_failure:
@@ -107,36 +156,21 @@ nla_put_failure:
 
 static const struct nla_policy act_bpf_policy[TCA_ACT_BPF_MAX + 1] = {
        [TCA_ACT_BPF_PARMS]     = { .len = sizeof(struct tc_act_bpf) },
+       [TCA_ACT_BPF_FD]        = { .type = NLA_U32 },
+       [TCA_ACT_BPF_NAME]      = { .type = NLA_NUL_STRING, .len = ACT_BPF_NAME_LEN },
        [TCA_ACT_BPF_OPS_LEN]   = { .type = NLA_U16 },
        [TCA_ACT_BPF_OPS]       = { .type = NLA_BINARY,
                                    .len = sizeof(struct sock_filter) * BPF_MAXINSNS },
 };
 
-static int tcf_bpf_init(struct net *net, struct nlattr *nla,
-                       struct nlattr *est, struct tc_action *a,
-                       int ovr, int bind)
+static int tcf_bpf_init_from_ops(struct nlattr **tb, struct tcf_bpf_cfg *cfg)
 {
-       struct nlattr *tb[TCA_ACT_BPF_MAX + 1];
-       struct tc_act_bpf *parm;
-       struct tcf_bpf *b;
-       u16 bpf_size, bpf_num_ops;
        struct sock_filter *bpf_ops;
-       struct sock_fprog_kern tmp;
+       struct sock_fprog_kern fprog_tmp;
        struct bpf_prog *fp;
+       u16 bpf_size, bpf_num_ops;
        int ret;
 
-       if (!nla)
-               return -EINVAL;
-
-       ret = nla_parse_nested(tb, TCA_ACT_BPF_MAX, nla, act_bpf_policy);
-       if (ret < 0)
-               return ret;
-
-       if (!tb[TCA_ACT_BPF_PARMS] ||
-           !tb[TCA_ACT_BPF_OPS_LEN] || !tb[TCA_ACT_BPF_OPS])
-               return -EINVAL;
-       parm = nla_data(tb[TCA_ACT_BPF_PARMS]);
-
        bpf_num_ops = nla_get_u16(tb[TCA_ACT_BPF_OPS_LEN]);
        if (bpf_num_ops > BPF_MAXINSNS || bpf_num_ops == 0)
                return -EINVAL;
@@ -146,68 +180,165 @@ static int tcf_bpf_init(struct net *net, struct nlattr *nla,
                return -EINVAL;
 
        bpf_ops = kzalloc(bpf_size, GFP_KERNEL);
-       if (!bpf_ops)
+       if (bpf_ops == NULL)
                return -ENOMEM;
 
        memcpy(bpf_ops, nla_data(tb[TCA_ACT_BPF_OPS]), bpf_size);
 
-       tmp.len = bpf_num_ops;
-       tmp.filter = bpf_ops;
+       fprog_tmp.len = bpf_num_ops;
+       fprog_tmp.filter = bpf_ops;
 
-       ret = bpf_prog_create(&fp, &tmp);
-       if (ret)
-               goto free_bpf_ops;
+       ret = bpf_prog_create(&fp, &fprog_tmp);
+       if (ret < 0) {
+               kfree(bpf_ops);
+               return ret;
+       }
 
-       if (!tcf_hash_check(parm->index, a, bind)) {
-               ret = tcf_hash_create(parm->index, est, a, sizeof(*b), bind);
-               if (ret)
+       cfg->bpf_ops = bpf_ops;
+       cfg->bpf_num_ops = bpf_num_ops;
+       cfg->filter = fp;
+
+       return 0;
+}
+
+static int tcf_bpf_init_from_efd(struct nlattr **tb, struct tcf_bpf_cfg *cfg)
+{
+       struct bpf_prog *fp;
+       char *name = NULL;
+       u32 bpf_fd;
+
+       bpf_fd = nla_get_u32(tb[TCA_ACT_BPF_FD]);
+
+       fp = bpf_prog_get(bpf_fd);
+       if (IS_ERR(fp))
+               return PTR_ERR(fp);
+
+       if (fp->type != BPF_PROG_TYPE_SCHED_ACT) {
+               bpf_prog_put(fp);
+               return -EINVAL;
+       }
+
+       if (tb[TCA_ACT_BPF_NAME]) {
+               name = kmemdup(nla_data(tb[TCA_ACT_BPF_NAME]),
+                              nla_len(tb[TCA_ACT_BPF_NAME]),
+                              GFP_KERNEL);
+               if (!name) {
+                       bpf_prog_put(fp);
+                       return -ENOMEM;
+               }
+       }
+
+       cfg->bpf_fd = bpf_fd;
+       cfg->bpf_name = name;
+       cfg->filter = fp;
+
+       return 0;
+}
+
+static int tcf_bpf_init(struct net *net, struct nlattr *nla,
+                       struct nlattr *est, struct tc_action *act,
+                       int replace, int bind)
+{
+       struct nlattr *tb[TCA_ACT_BPF_MAX + 1];
+       struct tc_act_bpf *parm;
+       struct tcf_bpf *prog;
+       struct tcf_bpf_cfg cfg;
+       bool is_bpf, is_ebpf;
+       int ret;
+
+       if (!nla)
+               return -EINVAL;
+
+       ret = nla_parse_nested(tb, TCA_ACT_BPF_MAX, nla, act_bpf_policy);
+       if (ret < 0)
+               return ret;
+
+       is_bpf = tb[TCA_ACT_BPF_OPS_LEN] && tb[TCA_ACT_BPF_OPS];
+       is_ebpf = tb[TCA_ACT_BPF_FD];
+
+       if ((!is_bpf && !is_ebpf) || (is_bpf && is_ebpf) ||
+           !tb[TCA_ACT_BPF_PARMS])
+               return -EINVAL;
+
+       parm = nla_data(tb[TCA_ACT_BPF_PARMS]);
+
+       memset(&cfg, 0, sizeof(cfg));
+
+       ret = is_bpf ? tcf_bpf_init_from_ops(tb, &cfg) :
+                      tcf_bpf_init_from_efd(tb, &cfg);
+       if (ret < 0)
+               return ret;
+
+       if (!tcf_hash_check(parm->index, act, bind)) {
+               ret = tcf_hash_create(parm->index, est, act,
+                                     sizeof(*prog), bind);
+               if (ret < 0)
                        goto destroy_fp;
 
                ret = ACT_P_CREATED;
        } else {
+               /* Don't override defaults. */
                if (bind)
                        goto destroy_fp;
-               tcf_hash_release(a, bind);
-               if (!ovr) {
+
+               tcf_hash_release(act, bind);
+               if (!replace) {
                        ret = -EEXIST;
                        goto destroy_fp;
                }
        }
 
-       b = to_bpf(a);
-       spin_lock_bh(&b->tcf_lock);
-       b->tcf_action = parm->action;
-       b->bpf_num_ops = bpf_num_ops;
-       b->bpf_ops = bpf_ops;
-       b->filter = fp;
-       spin_unlock_bh(&b->tcf_lock);
+       prog = to_bpf(act);
+       spin_lock_bh(&prog->tcf_lock);
+
+       prog->bpf_ops = cfg.bpf_ops;
+       prog->bpf_name = cfg.bpf_name;
+
+       if (cfg.bpf_num_ops)
+               prog->bpf_num_ops = cfg.bpf_num_ops;
+       if (cfg.bpf_fd)
+               prog->bpf_fd = cfg.bpf_fd;
+
+       prog->tcf_action = parm->action;
+       prog->filter = cfg.filter;
+
+       spin_unlock_bh(&prog->tcf_lock);
 
        if (ret == ACT_P_CREATED)
-               tcf_hash_insert(a);
+               tcf_hash_insert(act);
+
        return ret;
 
 destroy_fp:
-       bpf_prog_destroy(fp);
-free_bpf_ops:
-       kfree(bpf_ops);
+       if (is_ebpf)
+               bpf_prog_put(cfg.filter);
+       else
+               bpf_prog_destroy(cfg.filter);
+
+       kfree(cfg.bpf_ops);
+       kfree(cfg.bpf_name);
+
        return ret;
 }
 
-static void tcf_bpf_cleanup(struct tc_action *a, int bind)
+static void tcf_bpf_cleanup(struct tc_action *act, int bind)
 {
-       struct tcf_bpf *b = a->priv;
+       const struct tcf_bpf *prog = act->priv;
 
-       bpf_prog_destroy(b->filter);
+       if (tcf_bpf_is_ebpf(prog))
+               bpf_prog_put(prog->filter);
+       else
+               bpf_prog_destroy(prog->filter);
 }
 
-static struct tc_action_ops act_bpf_ops = {
-       .kind =         "bpf",
-       .type =         TCA_ACT_BPF,
-       .owner        THIS_MODULE,
-       .act =          tcf_bpf,
-       .dump =         tcf_bpf_dump,
-       .cleanup =      tcf_bpf_cleanup,
-       .init =         tcf_bpf_init,
+static struct tc_action_ops act_bpf_ops __read_mostly = {
+       .kind           =       "bpf",
+       .type           =       TCA_ACT_BPF,
+       .owner          =       THIS_MODULE,
+       .act            =       tcf_bpf,
+       .dump           =       tcf_bpf_dump,
+       .cleanup        =       tcf_bpf_cleanup,
+       .init           =       tcf_bpf_init,
 };
 
 static int __init bpf_init_module(void)
index baef987fe2c036ae61f7108455ce1d828ec40e6c..8b0470e418dc6e9475464768d629969087e66b37 100644 (file)
@@ -286,7 +286,7 @@ replay:
                        RCU_INIT_POINTER(*back, next);
 
                        tfilter_notify(net, skb, n, tp, fh, RTM_DELTFILTER);
-                       tcf_destroy(tp);
+                       tcf_destroy(tp, true);
                        err = 0;
                        goto errout;
                }
@@ -301,14 +301,20 @@ replay:
                        err = -EEXIST;
                        if (n->nlmsg_flags & NLM_F_EXCL) {
                                if (tp_created)
-                                       tcf_destroy(tp);
+                                       tcf_destroy(tp, true);
                                goto errout;
                        }
                        break;
                case RTM_DELTFILTER:
                        err = tp->ops->delete(tp, fh);
-                       if (err == 0)
+                       if (err == 0) {
                                tfilter_notify(net, skb, n, tp, fh, RTM_DELTFILTER);
+                               if (tcf_destroy(tp, false)) {
+                                       struct tcf_proto *next = rtnl_dereference(tp->next);
+
+                                       RCU_INIT_POINTER(*back, next);
+                               }
+                       }
                        goto errout;
                case RTM_GETTFILTER:
                        err = tfilter_notify(net, skb, n, tp, fh, RTM_NEWTFILTER);
@@ -329,7 +335,7 @@ replay:
                tfilter_notify(net, skb, n, tp, fh, RTM_NEWTFILTER);
        } else {
                if (tp_created)
-                       tcf_destroy(tp);
+                       tcf_destroy(tp, true);
        }
 
 errout:
index fc399db86f11b17cb05536df8f211fe79c7a3232..0b8c3ace671f1fff47cf2a12f7e6428bb5704b9f 100644 (file)
@@ -96,11 +96,14 @@ static void basic_delete_filter(struct rcu_head *head)
        kfree(f);
 }
 
-static void basic_destroy(struct tcf_proto *tp)
+static bool basic_destroy(struct tcf_proto *tp, bool force)
 {
        struct basic_head *head = rtnl_dereference(tp->root);
        struct basic_filter *f, *n;
 
+       if (!force && !list_empty(&head->flist))
+               return false;
+
        list_for_each_entry_safe(f, n, &head->flist, link) {
                list_del_rcu(&f->link);
                tcf_unbind_filter(tp, &f->res);
@@ -108,6 +111,7 @@ static void basic_destroy(struct tcf_proto *tp)
        }
        RCU_INIT_POINTER(tp->root, NULL);
        kfree_rcu(head, rcu);
+       return true;
 }
 
 static int basic_delete(struct tcf_proto *tp, unsigned long arg)
index 5f3ee9e4b5bf539e97f9195ceb904dc419634302..5c4171c5d2bd367188344186f424782bc8baba63 100644 (file)
@@ -16,6 +16,8 @@
 #include <linux/types.h>
 #include <linux/skbuff.h>
 #include <linux/filter.h>
+#include <linux/bpf.h>
+
 #include <net/rtnetlink.h>
 #include <net/pkt_cls.h>
 #include <net/sock.h>
@@ -24,6 +26,8 @@ MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Daniel Borkmann <dborkman@redhat.com>");
 MODULE_DESCRIPTION("TC BPF based classifier");
 
+#define CLS_BPF_NAME_LEN       256
+
 struct cls_bpf_head {
        struct list_head plist;
        u32 hgen;
@@ -32,18 +36,24 @@ struct cls_bpf_head {
 
 struct cls_bpf_prog {
        struct bpf_prog *filter;
-       struct sock_filter *bpf_ops;
-       struct tcf_exts exts;
-       struct tcf_result res;
        struct list_head link;
+       struct tcf_result res;
+       struct tcf_exts exts;
        u32 handle;
-       u16 bpf_num_ops;
+       union {
+               u32 bpf_fd;
+               u16 bpf_num_ops;
+       };
+       struct sock_filter *bpf_ops;
+       const char *bpf_name;
        struct tcf_proto *tp;
        struct rcu_head rcu;
 };
 
 static const struct nla_policy bpf_policy[TCA_BPF_MAX + 1] = {
        [TCA_BPF_CLASSID]       = { .type = NLA_U32 },
+       [TCA_BPF_FD]            = { .type = NLA_U32 },
+       [TCA_BPF_NAME]          = { .type = NLA_NUL_STRING, .len = CLS_BPF_NAME_LEN },
        [TCA_BPF_OPS_LEN]       = { .type = NLA_U16 },
        [TCA_BPF_OPS]           = { .type = NLA_BINARY,
                                    .len = sizeof(struct sock_filter) * BPF_MAXINSNS },
@@ -54,8 +64,10 @@ static int cls_bpf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
 {
        struct cls_bpf_head *head = rcu_dereference_bh(tp->root);
        struct cls_bpf_prog *prog;
-       int ret;
+       int ret = -1;
 
+       /* Needed here for accessing maps. */
+       rcu_read_lock();
        list_for_each_entry_rcu(prog, &head->plist, link) {
                int filter_res = BPF_PROG_RUN(prog->filter, skb);
 
@@ -70,10 +82,16 @@ static int cls_bpf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
                if (ret < 0)
                        continue;
 
-               return ret;
+               break;
        }
+       rcu_read_unlock();
 
-       return -1;
+       return ret;
+}
+
+static bool cls_bpf_is_ebpf(const struct cls_bpf_prog *prog)
+{
+       return !prog->bpf_ops;
 }
 
 static int cls_bpf_init(struct tcf_proto *tp)
@@ -94,8 +112,12 @@ static void cls_bpf_delete_prog(struct tcf_proto *tp, struct cls_bpf_prog *prog)
 {
        tcf_exts_destroy(&prog->exts);
 
-       bpf_prog_destroy(prog->filter);
+       if (cls_bpf_is_ebpf(prog))
+               bpf_prog_put(prog->filter);
+       else
+               bpf_prog_destroy(prog->filter);
 
+       kfree(prog->bpf_name);
        kfree(prog->bpf_ops);
        kfree(prog);
 }
@@ -114,14 +136,18 @@ static int cls_bpf_delete(struct tcf_proto *tp, unsigned long arg)
        list_del_rcu(&prog->link);
        tcf_unbind_filter(tp, &prog->res);
        call_rcu(&prog->rcu, __cls_bpf_delete_prog);
+
        return 0;
 }
 
-static void cls_bpf_destroy(struct tcf_proto *tp)
+static bool cls_bpf_destroy(struct tcf_proto *tp, bool force)
 {
        struct cls_bpf_head *head = rtnl_dereference(tp->root);
        struct cls_bpf_prog *prog, *tmp;
 
+       if (!force && !list_empty(&head->plist))
+               return false;
+
        list_for_each_entry_safe(prog, tmp, &head->plist, link) {
                list_del_rcu(&prog->link);
                tcf_unbind_filter(tp, &prog->res);
@@ -130,6 +156,7 @@ static void cls_bpf_destroy(struct tcf_proto *tp)
 
        RCU_INIT_POINTER(tp->root, NULL);
        kfree_rcu(head, rcu);
+       return true;
 }
 
 static unsigned long cls_bpf_get(struct tcf_proto *tp, u32 handle)
@@ -151,69 +178,121 @@ static unsigned long cls_bpf_get(struct tcf_proto *tp, u32 handle)
        return ret;
 }
 
-static int cls_bpf_modify_existing(struct net *net, struct tcf_proto *tp,
-                                  struct cls_bpf_prog *prog,
-                                  unsigned long base, struct nlattr **tb,
-                                  struct nlattr *est, bool ovr)
+static int cls_bpf_prog_from_ops(struct nlattr **tb,
+                                struct cls_bpf_prog *prog, u32 classid)
 {
        struct sock_filter *bpf_ops;
-       struct tcf_exts exts;
-       struct sock_fprog_kern tmp;
+       struct sock_fprog_kern fprog_tmp;
        struct bpf_prog *fp;
        u16 bpf_size, bpf_num_ops;
-       u32 classid;
        int ret;
 
-       if (!tb[TCA_BPF_OPS_LEN] || !tb[TCA_BPF_OPS] || !tb[TCA_BPF_CLASSID])
-               return -EINVAL;
-
-       tcf_exts_init(&exts, TCA_BPF_ACT, TCA_BPF_POLICE);
-       ret = tcf_exts_validate(net, tp, tb, est, &exts, ovr);
-       if (ret < 0)
-               return ret;
-
-       classid = nla_get_u32(tb[TCA_BPF_CLASSID]);
        bpf_num_ops = nla_get_u16(tb[TCA_BPF_OPS_LEN]);
-       if (bpf_num_ops > BPF_MAXINSNS || bpf_num_ops == 0) {
-               ret = -EINVAL;
-               goto errout;
-       }
+       if (bpf_num_ops > BPF_MAXINSNS || bpf_num_ops == 0)
+               return -EINVAL;
 
        bpf_size = bpf_num_ops * sizeof(*bpf_ops);
-       if (bpf_size != nla_len(tb[TCA_BPF_OPS])) {
-               ret = -EINVAL;
-               goto errout;
-       }
+       if (bpf_size != nla_len(tb[TCA_BPF_OPS]))
+               return -EINVAL;
 
        bpf_ops = kzalloc(bpf_size, GFP_KERNEL);
-       if (bpf_ops == NULL) {
-               ret = -ENOMEM;
-               goto errout;
-       }
+       if (bpf_ops == NULL)
+               return -ENOMEM;
 
        memcpy(bpf_ops, nla_data(tb[TCA_BPF_OPS]), bpf_size);
 
-       tmp.len = bpf_num_ops;
-       tmp.filter = bpf_ops;
+       fprog_tmp.len = bpf_num_ops;
+       fprog_tmp.filter = bpf_ops;
 
-       ret = bpf_prog_create(&fp, &tmp);
-       if (ret)
-               goto errout_free;
+       ret = bpf_prog_create(&fp, &fprog_tmp);
+       if (ret < 0) {
+               kfree(bpf_ops);
+               return ret;
+       }
 
-       prog->bpf_num_ops = bpf_num_ops;
        prog->bpf_ops = bpf_ops;
+       prog->bpf_num_ops = bpf_num_ops;
+       prog->bpf_name = NULL;
+
+       prog->filter = fp;
+       prog->res.classid = classid;
+
+       return 0;
+}
+
+static int cls_bpf_prog_from_efd(struct nlattr **tb,
+                                struct cls_bpf_prog *prog, u32 classid)
+{
+       struct bpf_prog *fp;
+       char *name = NULL;
+       u32 bpf_fd;
+
+       bpf_fd = nla_get_u32(tb[TCA_BPF_FD]);
+
+       fp = bpf_prog_get(bpf_fd);
+       if (IS_ERR(fp))
+               return PTR_ERR(fp);
+
+       if (fp->type != BPF_PROG_TYPE_SCHED_CLS) {
+               bpf_prog_put(fp);
+               return -EINVAL;
+       }
+
+       if (tb[TCA_BPF_NAME]) {
+               name = kmemdup(nla_data(tb[TCA_BPF_NAME]),
+                              nla_len(tb[TCA_BPF_NAME]),
+                              GFP_KERNEL);
+               if (!name) {
+                       bpf_prog_put(fp);
+                       return -ENOMEM;
+               }
+       }
+
+       prog->bpf_ops = NULL;
+       prog->bpf_fd = bpf_fd;
+       prog->bpf_name = name;
+
        prog->filter = fp;
        prog->res.classid = classid;
 
+       return 0;
+}
+
+static int cls_bpf_modify_existing(struct net *net, struct tcf_proto *tp,
+                                  struct cls_bpf_prog *prog,
+                                  unsigned long base, struct nlattr **tb,
+                                  struct nlattr *est, bool ovr)
+{
+       struct tcf_exts exts;
+       bool is_bpf, is_ebpf;
+       u32 classid;
+       int ret;
+
+       is_bpf = tb[TCA_BPF_OPS_LEN] && tb[TCA_BPF_OPS];
+       is_ebpf = tb[TCA_BPF_FD];
+
+       if ((!is_bpf && !is_ebpf) || (is_bpf && is_ebpf) ||
+           !tb[TCA_BPF_CLASSID])
+               return -EINVAL;
+
+       tcf_exts_init(&exts, TCA_BPF_ACT, TCA_BPF_POLICE);
+       ret = tcf_exts_validate(net, tp, tb, est, &exts, ovr);
+       if (ret < 0)
+               return ret;
+
+       classid = nla_get_u32(tb[TCA_BPF_CLASSID]);
+
+       ret = is_bpf ? cls_bpf_prog_from_ops(tb, prog, classid) :
+                      cls_bpf_prog_from_efd(tb, prog, classid);
+       if (ret < 0) {
+               tcf_exts_destroy(&exts);
+               return ret;
+       }
+
        tcf_bind_filter(tp, &prog->res, base);
        tcf_exts_change(tp, &prog->exts, &exts);
 
        return 0;
-errout_free:
-       kfree(bpf_ops);
-errout:
-       tcf_exts_destroy(&exts);
-       return ret;
 }
 
 static u32 cls_bpf_grab_new_handle(struct tcf_proto *tp,
@@ -297,11 +376,43 @@ errout:
        return ret;
 }
 
+static int cls_bpf_dump_bpf_info(const struct cls_bpf_prog *prog,
+                                struct sk_buff *skb)
+{
+       struct nlattr *nla;
+
+       if (nla_put_u16(skb, TCA_BPF_OPS_LEN, prog->bpf_num_ops))
+               return -EMSGSIZE;
+
+       nla = nla_reserve(skb, TCA_BPF_OPS, prog->bpf_num_ops *
+                         sizeof(struct sock_filter));
+       if (nla == NULL)
+               return -EMSGSIZE;
+
+       memcpy(nla_data(nla), prog->bpf_ops, nla_len(nla));
+
+       return 0;
+}
+
+static int cls_bpf_dump_ebpf_info(const struct cls_bpf_prog *prog,
+                                 struct sk_buff *skb)
+{
+       if (nla_put_u32(skb, TCA_BPF_FD, prog->bpf_fd))
+               return -EMSGSIZE;
+
+       if (prog->bpf_name &&
+           nla_put_string(skb, TCA_BPF_NAME, prog->bpf_name))
+               return -EMSGSIZE;
+
+       return 0;
+}
+
 static int cls_bpf_dump(struct net *net, struct tcf_proto *tp, unsigned long fh,
                        struct sk_buff *skb, struct tcmsg *tm)
 {
        struct cls_bpf_prog *prog = (struct cls_bpf_prog *) fh;
-       struct nlattr *nest, *nla;
+       struct nlattr *nest;
+       int ret;
 
        if (prog == NULL)
                return skb->len;
@@ -314,16 +425,14 @@ static int cls_bpf_dump(struct net *net, struct tcf_proto *tp, unsigned long fh,
 
        if (nla_put_u32(skb, TCA_BPF_CLASSID, prog->res.classid))
                goto nla_put_failure;
-       if (nla_put_u16(skb, TCA_BPF_OPS_LEN, prog->bpf_num_ops))
-               goto nla_put_failure;
 
-       nla = nla_reserve(skb, TCA_BPF_OPS, prog->bpf_num_ops *
-                         sizeof(struct sock_filter));
-       if (nla == NULL)
+       if (cls_bpf_is_ebpf(prog))
+               ret = cls_bpf_dump_ebpf_info(prog, skb);
+       else
+               ret = cls_bpf_dump_bpf_info(prog, skb);
+       if (ret)
                goto nla_put_failure;
 
-       memcpy(nla_data(nla), prog->bpf_ops, nla_len(nla));
-
        if (tcf_exts_dump(skb, &prog->exts) < 0)
                goto nla_put_failure;
 
index 221697ab0247c5e786c70e181a43c1145a30748b..ea611b21641241737223f34334c0189df00d11e7 100644 (file)
@@ -143,14 +143,18 @@ errout:
        return err;
 }
 
-static void cls_cgroup_destroy(struct tcf_proto *tp)
+static bool cls_cgroup_destroy(struct tcf_proto *tp, bool force)
 {
        struct cls_cgroup_head *head = rtnl_dereference(tp->root);
 
+       if (!force)
+               return false;
+
        if (head) {
                RCU_INIT_POINTER(tp->root, NULL);
                call_rcu(&head->rcu, cls_cgroup_destroy_rcu);
        }
+       return true;
 }
 
 static int cls_cgroup_delete(struct tcf_proto *tp, unsigned long arg)
index 461410394d085917ee8b8039ab5cfd5f5a6cf7c3..a620c4e288a51f55771399f6c1f81328bab9f7c7 100644 (file)
@@ -557,17 +557,21 @@ static int flow_init(struct tcf_proto *tp)
        return 0;
 }
 
-static void flow_destroy(struct tcf_proto *tp)
+static bool flow_destroy(struct tcf_proto *tp, bool force)
 {
        struct flow_head *head = rtnl_dereference(tp->root);
        struct flow_filter *f, *next;
 
+       if (!force && !list_empty(&head->filters))
+               return false;
+
        list_for_each_entry_safe(f, next, &head->filters, list) {
                list_del_rcu(&f->list);
                call_rcu(&f->rcu, flow_destroy_filter);
        }
        RCU_INIT_POINTER(tp->root, NULL);
        kfree_rcu(head, rcu);
+       return true;
 }
 
 static unsigned long flow_get(struct tcf_proto *tp, u32 handle)
index a5269f76004c2974a2e1e650433a7ba394710965..715e01e5910a94a9af40534ec5c0e820b96adc99 100644 (file)
@@ -33,6 +33,7 @@
 
 struct fw_head {
        u32                     mask;
+       bool                    mask_set;
        struct fw_filter __rcu  *ht[HTSIZE];
        struct rcu_head         rcu;
 };
@@ -113,6 +114,14 @@ static unsigned long fw_get(struct tcf_proto *tp, u32 handle)
 
 static int fw_init(struct tcf_proto *tp)
 {
+       struct fw_head *head;
+
+       head = kzalloc(sizeof(struct fw_head), GFP_KERNEL);
+       if (head == NULL)
+               return -ENOBUFS;
+
+       head->mask_set = false;
+       rcu_assign_pointer(tp->root, head);
        return 0;
 }
 
@@ -124,14 +133,20 @@ static void fw_delete_filter(struct rcu_head *head)
        kfree(f);
 }
 
-static void fw_destroy(struct tcf_proto *tp)
+static bool fw_destroy(struct tcf_proto *tp, bool force)
 {
        struct fw_head *head = rtnl_dereference(tp->root);
        struct fw_filter *f;
        int h;
 
        if (head == NULL)
-               return;
+               return true;
+
+       if (!force) {
+               for (h = 0; h < HTSIZE; h++)
+                       if (rcu_access_pointer(head->ht[h]))
+                               return false;
+       }
 
        for (h = 0; h < HTSIZE; h++) {
                while ((f = rtnl_dereference(head->ht[h])) != NULL) {
@@ -143,6 +158,7 @@ static void fw_destroy(struct tcf_proto *tp)
        }
        RCU_INIT_POINTER(tp->root, NULL);
        kfree_rcu(head, rcu);
+       return true;
 }
 
 static int fw_delete(struct tcf_proto *tp, unsigned long arg)
@@ -286,17 +302,11 @@ static int fw_change(struct net *net, struct sk_buff *in_skb,
        if (!handle)
                return -EINVAL;
 
-       if (head == NULL) {
-               u32 mask = 0xFFFFFFFF;
+       if (!head->mask_set) {
+               head->mask = 0xFFFFFFFF;
                if (tb[TCA_FW_MASK])
-                       mask = nla_get_u32(tb[TCA_FW_MASK]);
-
-               head = kzalloc(sizeof(struct fw_head), GFP_KERNEL);
-               if (head == NULL)
-                       return -ENOBUFS;
-               head->mask = mask;
-
-               rcu_assign_pointer(tp->root, head);
+                       head->mask = nla_get_u32(tb[TCA_FW_MASK]);
+               head->mask_set = true;
        }
 
        f = kzalloc(sizeof(struct fw_filter), GFP_KERNEL);
index 2ecd24688554e76e38d98b7338072a37d4c0cea1..08a3b0a6f5abd3fd674d7bca32c62c2626608b15 100644 (file)
@@ -258,6 +258,13 @@ static unsigned long route4_get(struct tcf_proto *tp, u32 handle)
 
 static int route4_init(struct tcf_proto *tp)
 {
+       struct route4_head *head;
+
+       head = kzalloc(sizeof(struct route4_head), GFP_KERNEL);
+       if (head == NULL)
+               return -ENOBUFS;
+
+       rcu_assign_pointer(tp->root, head);
        return 0;
 }
 
@@ -270,13 +277,20 @@ route4_delete_filter(struct rcu_head *head)
        kfree(f);
 }
 
-static void route4_destroy(struct tcf_proto *tp)
+static bool route4_destroy(struct tcf_proto *tp, bool force)
 {
        struct route4_head *head = rtnl_dereference(tp->root);
        int h1, h2;
 
        if (head == NULL)
-               return;
+               return true;
+
+       if (!force) {
+               for (h1 = 0; h1 <= 256; h1++) {
+                       if (rcu_access_pointer(head->table[h1]))
+                               return false;
+               }
+       }
 
        for (h1 = 0; h1 <= 256; h1++) {
                struct route4_bucket *b;
@@ -301,6 +315,7 @@ static void route4_destroy(struct tcf_proto *tp)
        }
        RCU_INIT_POINTER(tp->root, NULL);
        kfree_rcu(head, rcu);
+       return true;
 }
 
 static int route4_delete(struct tcf_proto *tp, unsigned long arg)
@@ -484,13 +499,6 @@ static int route4_change(struct net *net, struct sk_buff *in_skb,
                        return -EINVAL;
 
        err = -ENOBUFS;
-       if (head == NULL) {
-               head = kzalloc(sizeof(struct route4_head), GFP_KERNEL);
-               if (head == NULL)
-                       goto errout;
-               rcu_assign_pointer(tp->root, head);
-       }
-
        f = kzalloc(sizeof(struct route4_filter), GFP_KERNEL);
        if (!f)
                goto errout;
index edd8ade3fbc1f4358b4275940e6f62b3d814b3dd..02fa82792dab8334d1dc14408f7ed42a4db0c141 100644 (file)
@@ -291,13 +291,20 @@ rsvp_delete_filter(struct tcf_proto *tp, struct rsvp_filter *f)
        kfree_rcu(f, rcu);
 }
 
-static void rsvp_destroy(struct tcf_proto *tp)
+static bool rsvp_destroy(struct tcf_proto *tp, bool force)
 {
        struct rsvp_head *data = rtnl_dereference(tp->root);
        int h1, h2;
 
        if (data == NULL)
-               return;
+               return true;
+
+       if (!force) {
+               for (h1 = 0; h1 < 256; h1++) {
+                       if (rcu_access_pointer(data->ht[h1]))
+                               return false;
+               }
+       }
 
        RCU_INIT_POINTER(tp->root, NULL);
 
@@ -319,6 +326,7 @@ static void rsvp_destroy(struct tcf_proto *tp)
                }
        }
        kfree_rcu(data, rcu);
+       return true;
 }
 
 static int rsvp_delete(struct tcf_proto *tp, unsigned long arg)
index bd49bf547a479f139b25e0507b090d51c137c519..a557dbaf5afedaa7a3a3a18c53a83238f6d32420 100644 (file)
@@ -468,11 +468,14 @@ static void tcindex_walk(struct tcf_proto *tp, struct tcf_walker *walker)
        }
 }
 
-static void tcindex_destroy(struct tcf_proto *tp)
+static bool tcindex_destroy(struct tcf_proto *tp, bool force)
 {
        struct tcindex_data *p = rtnl_dereference(tp->root);
        struct tcf_walker walker;
 
+       if (!force)
+               return false;
+
        pr_debug("tcindex_destroy(tp %p),p %p\n", tp, p);
        walker.count = 0;
        walker.skip = 0;
@@ -481,6 +484,7 @@ static void tcindex_destroy(struct tcf_proto *tp)
 
        RCU_INIT_POINTER(tp->root, NULL);
        call_rcu(&p->rcu, __tcindex_destroy);
+       return true;
 }
 
 
index 95fdf4e4005190704dcd405e6b9422f8f56ba796..cab9e9b43967a57780140b9229c8cadff088607c 100644 (file)
@@ -463,13 +463,35 @@ static int u32_destroy_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht)
        return -ENOENT;
 }
 
-static void u32_destroy(struct tcf_proto *tp)
+static bool ht_empty(struct tc_u_hnode *ht)
+{
+       unsigned int h;
+
+       for (h = 0; h <= ht->divisor; h++)
+               if (rcu_access_pointer(ht->ht[h]))
+                       return false;
+
+       return true;
+}
+
+static bool u32_destroy(struct tcf_proto *tp, bool force)
 {
        struct tc_u_common *tp_c = tp->data;
        struct tc_u_hnode *root_ht = rtnl_dereference(tp->root);
 
        WARN_ON(root_ht == NULL);
 
+       if (!force) {
+               if (root_ht) {
+                       if (root_ht->refcnt > 1)
+                               return false;
+                       if (root_ht->refcnt == 1) {
+                               if (!ht_empty(root_ht))
+                                       return false;
+                       }
+               }
+       }
+
        if (root_ht && --root_ht->refcnt == 0)
                u32_destroy_hnode(tp, root_ht);
 
@@ -494,6 +516,7 @@ static void u32_destroy(struct tcf_proto *tp)
        }
 
        tp->data = NULL;
+       return true;
 }
 
 static int u32_delete(struct tcf_proto *tp, unsigned long arg)
index f03c3de16c274f3152f4ef701339d20158609b51..73e2ed576ceb35e9e9f66eca57d25e8c24b936f4 100644 (file)
@@ -34,7 +34,6 @@ static int em_text_match(struct sk_buff *skb, struct tcf_ematch *m,
 {
        struct text_match *tm = EM_TEXT_PRIV(m);
        int from, to;
-       struct ts_state state;
 
        from = tcf_get_base_ptr(skb, tm->from_layer) - skb->data;
        from += tm->from_offset;
@@ -42,7 +41,7 @@ static int em_text_match(struct sk_buff *skb, struct tcf_ematch *m,
        to = tcf_get_base_ptr(skb, tm->to_layer) - skb->data;
        to += tm->to_offset;
 
-       return skb_find_text(skb, from, to, tm->config, &state) != UINT_MAX;
+       return skb_find_text(skb, from, to, tm->config) != UINT_MAX;
 }
 
 static int em_text_change(struct net *net, void *data, int len,
index 243b7d169d6183f662ab7f30d0e93492b29e79e3..ad9eed70bc8f8e16c3118c6527374a952823e2c0 100644 (file)
@@ -1858,11 +1858,15 @@ reclassify:
 }
 EXPORT_SYMBOL(tc_classify);
 
-void tcf_destroy(struct tcf_proto *tp)
+bool tcf_destroy(struct tcf_proto *tp, bool force)
 {
-       tp->ops->destroy(tp);
-       module_put(tp->ops->owner);
-       kfree_rcu(tp, rcu);
+       if (tp->ops->destroy(tp, force)) {
+               module_put(tp->ops->owner);
+               kfree_rcu(tp, rcu);
+               return true;
+       }
+
+       return false;
 }
 
 void tcf_destroy_chain(struct tcf_proto __rcu **fl)
@@ -1871,7 +1875,7 @@ void tcf_destroy_chain(struct tcf_proto __rcu **fl)
 
        while ((tp = rtnl_dereference(*fl)) != NULL) {
                RCU_INIT_POINTER(*fl, tp->next);
-               tcf_destroy(tp);
+               tcf_destroy(tp, true);
        }
 }
 EXPORT_SYMBOL(tcf_destroy_chain);
index dfcea20e31711288aea660add30248b442769979..f377702d4b9185762293a7251f2574a9e72515eb 100644 (file)
@@ -8,7 +8,7 @@
  *     as published by the Free Software Foundation; either version
  *     2 of the License, or (at your option) any later version.
  *
- *  Meant to be mostly used for localy generated traffic :
+ *  Meant to be mostly used for locally generated traffic :
  *  Fast classification depends on skb->sk being set before reaching us.
  *  If not, (router workload), we use rxhash as fallback, with 32 bits wide hash.
  *  All packets belonging to a socket are considered as a 'flow'.
@@ -63,7 +63,7 @@ struct fq_flow {
                struct sk_buff *tail;   /* last skb in the list */
                unsigned long  age;     /* jiffies when flow was emptied, for gc */
        };
-       struct rb_node  fq_node;        /* anchor in fq_root[] trees */
+       struct rb_node  fq_node;        /* anchor in fq_root[] trees */
        struct sock     *sk;
        int             qlen;           /* number of packets in flow queue */
        int             credit;
index 8f34b27d5775f053ffde8a763f724c0d8b4f6e1f..53b7acde9aa37bf3d4029c459421564d5270f4c0 100644 (file)
@@ -1322,8 +1322,7 @@ static __init int sctp_init(void)
        int max_share;
        int order;
 
-       BUILD_BUG_ON(sizeof(struct sctp_ulpevent) >
-                    sizeof(((struct sk_buff *) 0)->cb));
+       sock_skb_cb_check_size(sizeof(struct sctp_ulpevent));
 
        /* Allocate bind_bucket and chunk caches. */
        status = -ENOBUFS;
index aafe94bf292e73ecb765a31ae3456c3c11fe932f..f09de7fac2e6acddad8b2e046dbf626e329cb674 100644 (file)
@@ -102,11 +102,6 @@ static int sctp_autobind(struct sock *sk);
 static void sctp_sock_migrate(struct sock *, struct sock *,
                              struct sctp_association *, sctp_socket_type_t);
 
-extern struct kmem_cache *sctp_bucket_cachep;
-extern long sysctl_sctp_mem[3];
-extern int sysctl_sctp_rmem[3];
-extern int sysctl_sctp_wmem[3];
-
 static int sctp_memory_pressure;
 static atomic_long_t sctp_memory_allocated;
 struct percpu_counter sctp_sockets_allocated;
@@ -1586,8 +1581,7 @@ static int sctp_error(struct sock *sk, int flags, int err)
 
 static int sctp_msghdr_parse(const struct msghdr *, sctp_cmsgs_t *);
 
-static int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
-                       struct msghdr *msg, size_t msg_len)
+static int sctp_sendmsg(struct sock *sk, struct msghdr *msg, size_t msg_len)
 {
        struct net *net = sock_net(sk);
        struct sctp_sock *sp;
@@ -2066,9 +2060,8 @@ static int sctp_skb_pull(struct sk_buff *skb, int len)
  *  flags   - flags sent or received with the user message, see Section
  *            5 for complete description of the flags.
  */
-static int sctp_recvmsg(struct kiocb *iocb, struct sock *sk,
-                       struct msghdr *msg, size_t len, int noblock,
-                       int flags, int *addr_len)
+static int sctp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
+                       int noblock, int flags, int *addr_len)
 {
        struct sctp_ulpevent *event = NULL;
        struct sctp_sock *sp = sctp_sk(sk);
index 2e9ada10fd846c10bc6281c30f29cf1fed02ca3c..26d50c565f54223ec28d29358932227b4b22acd1 100644 (file)
@@ -58,10 +58,6 @@ static unsigned long max_autoclose_max =
        (MAX_SCHEDULE_TIMEOUT / HZ > UINT_MAX)
        ? UINT_MAX : MAX_SCHEDULE_TIMEOUT / HZ;
 
-extern long sysctl_sctp_mem[3];
-extern int sysctl_sctp_rmem[3];
-extern int sysctl_sctp_wmem[3];
-
 static int proc_sctp_do_hmac_alg(struct ctl_table *ctl, int write,
                                void __user *buffer, size_t *lenp,
                                loff_t *ppos);
index 245330ca0015c2fd2548ead861d379714151c901..3e33959f3ce5711863fca7ce1805a4d1cf4ed6e0 100644 (file)
@@ -140,8 +140,6 @@ static ssize_t sock_splice_read(struct file *file, loff_t *ppos,
 static const struct file_operations socket_file_ops = {
        .owner =        THIS_MODULE,
        .llseek =       no_llseek,
-       .read =         new_sync_read,
-       .write =        new_sync_write,
        .read_iter =    sock_read_iter,
        .write_iter =   sock_write_iter,
        .poll =         sock_poll,
@@ -610,60 +608,27 @@ void __sock_tx_timestamp(const struct sock *sk, __u8 *tx_flags)
 }
 EXPORT_SYMBOL(__sock_tx_timestamp);
 
-static inline int __sock_sendmsg_nosec(struct kiocb *iocb, struct socket *sock,
-                                      struct msghdr *msg, size_t size)
+static inline int sock_sendmsg_nosec(struct socket *sock, struct msghdr *msg)
 {
-       return sock->ops->sendmsg(iocb, sock, msg, size);
-}
-
-static inline int __sock_sendmsg(struct kiocb *iocb, struct socket *sock,
-                                struct msghdr *msg, size_t size)
-{
-       int err = security_socket_sendmsg(sock, msg, size);
-
-       return err ?: __sock_sendmsg_nosec(iocb, sock, msg, size);
-}
-
-static int do_sock_sendmsg(struct socket *sock, struct msghdr *msg,
-                          size_t size, bool nosec)
-{
-       struct kiocb iocb;
-       int ret;
-
-       init_sync_kiocb(&iocb, NULL);
-       ret = nosec ? __sock_sendmsg_nosec(&iocb, sock, msg, size) :
-                     __sock_sendmsg(&iocb, sock, msg, size);
-       if (-EIOCBQUEUED == ret)
-               ret = wait_on_sync_kiocb(&iocb);
+       int ret = sock->ops->sendmsg(sock, msg, msg_data_left(msg));
+       BUG_ON(ret == -EIOCBQUEUED);
        return ret;
 }
 
-int sock_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
+int sock_sendmsg(struct socket *sock, struct msghdr *msg)
 {
-       return do_sock_sendmsg(sock, msg, size, false);
-}
-EXPORT_SYMBOL(sock_sendmsg);
+       int err = security_socket_sendmsg(sock, msg,
+                                         msg_data_left(msg));
 
-static int sock_sendmsg_nosec(struct socket *sock, struct msghdr *msg, size_t size)
-{
-       return do_sock_sendmsg(sock, msg, size, true);
+       return err ?: sock_sendmsg_nosec(sock, msg);
 }
+EXPORT_SYMBOL(sock_sendmsg);
 
 int kernel_sendmsg(struct socket *sock, struct msghdr *msg,
                   struct kvec *vec, size_t num, size_t size)
 {
-       mm_segment_t oldfs = get_fs();
-       int result;
-
-       set_fs(KERNEL_DS);
-       /*
-        * the following is safe, since for compiler definitions of kvec and
-        * iovec are identical, yielding the same in-core layout and alignment
-        */
-       iov_iter_init(&msg->msg_iter, WRITE, (struct iovec *)vec, num, size);
-       result = sock_sendmsg(sock, msg, size);
-       set_fs(oldfs);
-       return result;
+       iov_iter_kvec(&msg->msg_iter, WRITE | ITER_KVEC, vec, num, size);
+       return sock_sendmsg(sock, msg);
 }
 EXPORT_SYMBOL(kernel_sendmsg);
 
@@ -731,9 +696,9 @@ EXPORT_SYMBOL_GPL(__sock_recv_wifi_status);
 static inline void sock_recv_drops(struct msghdr *msg, struct sock *sk,
                                   struct sk_buff *skb)
 {
-       if (sock_flag(sk, SOCK_RXQ_OVFL) && skb && skb->dropcount)
+       if (sock_flag(sk, SOCK_RXQ_OVFL) && skb && SOCK_SKB_CB(skb)->dropcount)
                put_cmsg(msg, SOL_SOCKET, SO_RXQ_OVFL,
-                       sizeof(__u32), &skb->dropcount);
+                       sizeof(__u32), &SOCK_SKB_CB(skb)->dropcount);
 }
 
 void __sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk,
@@ -744,47 +709,21 @@ void __sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk,
 }
 EXPORT_SYMBOL_GPL(__sock_recv_ts_and_drops);
 
-static inline int __sock_recvmsg_nosec(struct kiocb *iocb, struct socket *sock,
-                                      struct msghdr *msg, size_t size, int flags)
+static inline int sock_recvmsg_nosec(struct socket *sock, struct msghdr *msg,
+                                    size_t size, int flags)
 {
-       return sock->ops->recvmsg(iocb, sock, msg, size, flags);
+       return sock->ops->recvmsg(sock, msg, size, flags);
 }
 
-static inline int __sock_recvmsg(struct kiocb *iocb, struct socket *sock,
-                                struct msghdr *msg, size_t size, int flags)
+int sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
+                int flags)
 {
        int err = security_socket_recvmsg(sock, msg, size, flags);
 
-       return err ?: __sock_recvmsg_nosec(iocb, sock, msg, size, flags);
-}
-
-int sock_recvmsg(struct socket *sock, struct msghdr *msg,
-                size_t size, int flags)
-{
-       struct kiocb iocb;
-       int ret;
-
-       init_sync_kiocb(&iocb, NULL);
-       ret = __sock_recvmsg(&iocb, sock, msg, size, flags);
-       if (-EIOCBQUEUED == ret)
-               ret = wait_on_sync_kiocb(&iocb);
-       return ret;
+       return err ?: sock_recvmsg_nosec(sock, msg, size, flags);
 }
 EXPORT_SYMBOL(sock_recvmsg);
 
-static int sock_recvmsg_nosec(struct socket *sock, struct msghdr *msg,
-                             size_t size, int flags)
-{
-       struct kiocb iocb;
-       int ret;
-
-       init_sync_kiocb(&iocb, NULL);
-       ret = __sock_recvmsg_nosec(&iocb, sock, msg, size, flags);
-       if (-EIOCBQUEUED == ret)
-               ret = wait_on_sync_kiocb(&iocb);
-       return ret;
-}
-
 /**
  * kernel_recvmsg - Receive a message from a socket (kernel space)
  * @sock:       The socket to receive the message from
@@ -806,12 +745,8 @@ int kernel_recvmsg(struct socket *sock, struct msghdr *msg,
        mm_segment_t oldfs = get_fs();
        int result;
 
+       iov_iter_kvec(&msg->msg_iter, READ | ITER_KVEC, vec, num, size);
        set_fs(KERNEL_DS);
-       /*
-        * the following is safe, since for compiler definitions of kvec and
-        * iovec are identical, yielding the same in-core layout and alignment
-        */
-       iov_iter_init(&msg->msg_iter, READ, (struct iovec *)vec, num, size);
        result = sock_recvmsg(sock, msg, size, flags);
        set_fs(oldfs);
        return result;
@@ -849,7 +784,8 @@ static ssize_t sock_read_iter(struct kiocb *iocb, struct iov_iter *to)
 {
        struct file *file = iocb->ki_filp;
        struct socket *sock = file->private_data;
-       struct msghdr msg = {.msg_iter = *to};
+       struct msghdr msg = {.msg_iter = *to,
+                            .msg_iocb = iocb};
        ssize_t res;
 
        if (file->f_flags & O_NONBLOCK)
@@ -858,11 +794,10 @@ static ssize_t sock_read_iter(struct kiocb *iocb, struct iov_iter *to)
        if (iocb->ki_pos != 0)
                return -ESPIPE;
 
-       if (iocb->ki_nbytes == 0)       /* Match SYS5 behaviour */
+       if (!iov_iter_count(to))        /* Match SYS5 behaviour */
                return 0;
 
-       res = __sock_recvmsg(iocb, sock, &msg,
-                            iocb->ki_nbytes, msg.msg_flags);
+       res = sock_recvmsg(sock, &msg, iov_iter_count(to), msg.msg_flags);
        *to = msg.msg_iter;
        return res;
 }
@@ -871,7 +806,8 @@ static ssize_t sock_write_iter(struct kiocb *iocb, struct iov_iter *from)
 {
        struct file *file = iocb->ki_filp;
        struct socket *sock = file->private_data;
-       struct msghdr msg = {.msg_iter = *from};
+       struct msghdr msg = {.msg_iter = *from,
+                            .msg_iocb = iocb};
        ssize_t res;
 
        if (iocb->ki_pos != 0)
@@ -883,7 +819,7 @@ static ssize_t sock_write_iter(struct kiocb *iocb, struct iov_iter *from)
        if (sock->type == SOCK_SEQPACKET)
                msg.msg_flags |= MSG_EOR;
 
-       res = __sock_sendmsg(iocb, sock, &msg, iocb->ki_nbytes);
+       res = sock_sendmsg(sock, &msg);
        *from = msg.msg_iter;
        return res;
 }
@@ -1700,18 +1636,14 @@ SYSCALL_DEFINE6(sendto, int, fd, void __user *, buff, size_t, len,
        struct iovec iov;
        int fput_needed;
 
-       if (len > INT_MAX)
-               len = INT_MAX;
-       if (unlikely(!access_ok(VERIFY_READ, buff, len)))
-               return -EFAULT;
+       err = import_single_range(WRITE, buff, len, &iov, &msg.msg_iter);
+       if (unlikely(err))
+               return err;
        sock = sockfd_lookup_light(fd, &err, &fput_needed);
        if (!sock)
                goto out;
 
-       iov.iov_base = buff;
-       iov.iov_len = len;
        msg.msg_name = NULL;
-       iov_iter_init(&msg.msg_iter, WRITE, &iov, 1, len);
        msg.msg_control = NULL;
        msg.msg_controllen = 0;
        msg.msg_namelen = 0;
@@ -1725,7 +1657,7 @@ SYSCALL_DEFINE6(sendto, int, fd, void __user *, buff, size_t, len,
        if (sock->file->f_flags & O_NONBLOCK)
                flags |= MSG_DONTWAIT;
        msg.msg_flags = flags;
-       err = sock_sendmsg(sock, &msg, len);
+       err = sock_sendmsg(sock, &msg);
 
 out_put:
        fput_light(sock->file, fput_needed);
@@ -1760,26 +1692,22 @@ SYSCALL_DEFINE6(recvfrom, int, fd, void __user *, ubuf, size_t, size,
        int err, err2;
        int fput_needed;
 
-       if (size > INT_MAX)
-               size = INT_MAX;
-       if (unlikely(!access_ok(VERIFY_WRITE, ubuf, size)))
-               return -EFAULT;
+       err = import_single_range(READ, ubuf, size, &iov, &msg.msg_iter);
+       if (unlikely(err))
+               return err;
        sock = sockfd_lookup_light(fd, &err, &fput_needed);
        if (!sock)
                goto out;
 
        msg.msg_control = NULL;
        msg.msg_controllen = 0;
-       iov.iov_len = size;
-       iov.iov_base = ubuf;
-       iov_iter_init(&msg.msg_iter, READ, &iov, 1, size);
        /* Save some cycles and don't copy the address if not needed */
        msg.msg_name = addr ? (struct sockaddr *)&address : NULL;
        /* We assume all kernel code knows the size of sockaddr_storage */
        msg.msg_namelen = 0;
        if (sock->file->f_flags & O_NONBLOCK)
                flags |= MSG_DONTWAIT;
-       err = sock_recvmsg(sock, &msg, size, flags);
+       err = sock_recvmsg(sock, &msg, iov_iter_count(&msg.msg_iter), flags);
 
        if (err >= 0 && addr != NULL) {
                err2 = move_addr_to_user(&address,
@@ -1899,10 +1827,10 @@ struct used_address {
        unsigned int name_len;
 };
 
-static ssize_t copy_msghdr_from_user(struct msghdr *kmsg,
-                                    struct user_msghdr __user *umsg,
-                                    struct sockaddr __user **save_addr,
-                                    struct iovec **iov)
+static int copy_msghdr_from_user(struct msghdr *kmsg,
+                                struct user_msghdr __user *umsg,
+                                struct sockaddr __user **save_addr,
+                                struct iovec **iov)
 {
        struct sockaddr __user *uaddr;
        struct iovec __user *uiov;
@@ -1946,13 +1874,10 @@ static ssize_t copy_msghdr_from_user(struct msghdr *kmsg,
        if (nr_segs > UIO_MAXIOV)
                return -EMSGSIZE;
 
-       err = rw_copy_check_uvector(save_addr ? READ : WRITE,
-                                   uiov, nr_segs,
-                                   UIO_FASTIOV, *iov, iov);
-       if (err >= 0)
-               iov_iter_init(&kmsg->msg_iter, save_addr ? READ : WRITE,
-                             *iov, nr_segs, err);
-       return err;
+       kmsg->msg_iocb = NULL;
+
+       return import_iovec(save_addr ? READ : WRITE, uiov, nr_segs,
+                           UIO_FASTIOV, iov, &kmsg->msg_iter);
 }
 
 static int ___sys_sendmsg(struct socket *sock, struct user_msghdr __user *msg,
@@ -1967,7 +1892,7 @@ static int ___sys_sendmsg(struct socket *sock, struct user_msghdr __user *msg,
            __attribute__ ((aligned(sizeof(__kernel_size_t))));
        /* 20 is size of ipv6_pktinfo */
        unsigned char *ctl_buf = ctl;
-       int ctl_len, total_len;
+       int ctl_len;
        ssize_t err;
 
        msg_sys->msg_name = &address;
@@ -1977,8 +1902,7 @@ static int ___sys_sendmsg(struct socket *sock, struct user_msghdr __user *msg,
        else
                err = copy_msghdr_from_user(msg_sys, msg, NULL, &iov);
        if (err < 0)
-               goto out_freeiov;
-       total_len = err;
+               return err;
 
        err = -ENOBUFS;
 
@@ -2025,10 +1949,10 @@ static int ___sys_sendmsg(struct socket *sock, struct user_msghdr __user *msg,
            used_address->name_len == msg_sys->msg_namelen &&
            !memcmp(&used_address->name, msg_sys->msg_name,
                    used_address->name_len)) {
-               err = sock_sendmsg_nosec(sock, msg_sys, total_len);
+               err = sock_sendmsg_nosec(sock, msg_sys);
                goto out_freectl;
        }
-       err = sock_sendmsg(sock, msg_sys, total_len);
+       err = sock_sendmsg(sock, msg_sys);
        /*
         * If this is sendmmsg() and sending to current destination address was
         * successful, remember it.
@@ -2044,8 +1968,7 @@ out_freectl:
        if (ctl_buf != ctl)
                sock_kfree_s(sock->sk, ctl_buf, ctl_len);
 out_freeiov:
-       if (iov != iovstack)
-               kfree(iov);
+       kfree(iov);
        return err;
 }
 
@@ -2170,8 +2093,8 @@ static int ___sys_recvmsg(struct socket *sock, struct user_msghdr __user *msg,
        else
                err = copy_msghdr_from_user(msg_sys, msg, &uaddr, &iov);
        if (err < 0)
-               goto out_freeiov;
-       total_len = err;
+               return err;
+       total_len = iov_iter_count(&msg_sys->msg_iter);
 
        cmsg_ptr = (unsigned long)msg_sys->msg_control;
        msg_sys->msg_flags = flags & (MSG_CMSG_CLOEXEC|MSG_CMSG_COMPAT);
@@ -2209,8 +2132,7 @@ static int ___sys_recvmsg(struct socket *sock, struct user_msghdr __user *msg,
        err = len;
 
 out_freeiov:
-       if (iov != iovstack)
-               kfree(iov);
+       kfree(iov);
        return err;
 }
 
index cc331b6cf573d95340fddf0ea0e3ae8dd8ae8f3d..0c8120229a0353967138d20c5fe009a1c6e81d1f 100644 (file)
@@ -257,7 +257,7 @@ static int svc_sendto(struct svc_rqst *rqstp, struct xdr_buf *xdr)
 
                svc_set_cmsg_data(rqstp, cmh);
 
-               if (sock_sendmsg(sock, &msg, 0) < 0)
+               if (sock_sendmsg(sock, &msg) < 0)
                        goto out;
        }
 
index 8c1e558db11893b7f70eaa5e5201da07a3df8812..46568b85c3339f57a0d6835e82eff4b67c3ba326 100644 (file)
@@ -1,6 +1,7 @@
 /*
  * net/switchdev/switchdev.c - Switch device API
  * Copyright (c) 2014 Jiri Pirko <jiri@resnulli.us>
+ * Copyright (c) 2014-2015 Scott Feldman <sfeldma@gmail.com>
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -14,6 +15,7 @@
 #include <linux/mutex.h>
 #include <linux/notifier.h>
 #include <linux/netdevice.h>
+#include <net/ip_fib.h>
 #include <net/switchdev.h>
 
 /**
 int netdev_switch_parent_id_get(struct net_device *dev,
                                struct netdev_phys_item_id *psid)
 {
-       const struct net_device_ops *ops = dev->netdev_ops;
+       const struct swdev_ops *ops = dev->swdev_ops;
 
-       if (!ops->ndo_switch_parent_id_get)
+       if (!ops || !ops->swdev_parent_id_get)
                return -EOPNOTSUPP;
-       return ops->ndo_switch_parent_id_get(dev, psid);
+       return ops->swdev_parent_id_get(dev, psid);
 }
-EXPORT_SYMBOL(netdev_switch_parent_id_get);
+EXPORT_SYMBOL_GPL(netdev_switch_parent_id_get);
 
 /**
  *     netdev_switch_port_stp_update - Notify switch device port of STP
@@ -44,20 +46,29 @@ EXPORT_SYMBOL(netdev_switch_parent_id_get);
  */
 int netdev_switch_port_stp_update(struct net_device *dev, u8 state)
 {
-       const struct net_device_ops *ops = dev->netdev_ops;
+       const struct swdev_ops *ops = dev->swdev_ops;
+       struct net_device *lower_dev;
+       struct list_head *iter;
+       int err = -EOPNOTSUPP;
 
-       if (!ops->ndo_switch_port_stp_update)
-               return -EOPNOTSUPP;
-       WARN_ON(!ops->ndo_switch_parent_id_get);
-       return ops->ndo_switch_port_stp_update(dev, state);
+       if (ops && ops->swdev_port_stp_update)
+               return ops->swdev_port_stp_update(dev, state);
+
+       netdev_for_each_lower_dev(dev, lower_dev, iter) {
+               err = netdev_switch_port_stp_update(lower_dev, state);
+               if (err && err != -EOPNOTSUPP)
+                       return err;
+       }
+
+       return err;
 }
-EXPORT_SYMBOL(netdev_switch_port_stp_update);
+EXPORT_SYMBOL_GPL(netdev_switch_port_stp_update);
 
 static DEFINE_MUTEX(netdev_switch_mutex);
 static RAW_NOTIFIER_HEAD(netdev_switch_notif_chain);
 
 /**
- *     register_netdev_switch_notifier - Register nofifier
+ *     register_netdev_switch_notifier - Register notifier
  *     @nb: notifier_block
  *
  *     Register switch device notifier. This should be used by code
@@ -73,10 +84,10 @@ int register_netdev_switch_notifier(struct notifier_block *nb)
        mutex_unlock(&netdev_switch_mutex);
        return err;
 }
-EXPORT_SYMBOL(register_netdev_switch_notifier);
+EXPORT_SYMBOL_GPL(register_netdev_switch_notifier);
 
 /**
- *     unregister_netdev_switch_notifier - Unregister nofifier
+ *     unregister_netdev_switch_notifier - Unregister notifier
  *     @nb: notifier_block
  *
  *     Unregister switch device notifier.
@@ -91,10 +102,10 @@ int unregister_netdev_switch_notifier(struct notifier_block *nb)
        mutex_unlock(&netdev_switch_mutex);
        return err;
 }
-EXPORT_SYMBOL(unregister_netdev_switch_notifier);
+EXPORT_SYMBOL_GPL(unregister_netdev_switch_notifier);
 
 /**
- *     call_netdev_switch_notifiers - Call nofifiers
+ *     call_netdev_switch_notifiers - Call notifiers
  *     @val: value passed unmodified to notifier function
  *     @dev: port device
  *     @info: notifier information data
@@ -114,7 +125,7 @@ int call_netdev_switch_notifiers(unsigned long val, struct net_device *dev,
        mutex_unlock(&netdev_switch_mutex);
        return err;
 }
-EXPORT_SYMBOL(call_netdev_switch_notifiers);
+EXPORT_SYMBOL_GPL(call_netdev_switch_notifiers);
 
 /**
  *     netdev_switch_port_bridge_setlink - Notify switch device port of bridge
@@ -139,7 +150,7 @@ int netdev_switch_port_bridge_setlink(struct net_device *dev,
 
        return ops->ndo_bridge_setlink(dev, nlh, flags);
 }
-EXPORT_SYMBOL(netdev_switch_port_bridge_setlink);
+EXPORT_SYMBOL_GPL(netdev_switch_port_bridge_setlink);
 
 /**
  *     netdev_switch_port_bridge_dellink - Notify switch device port of bridge
@@ -164,7 +175,7 @@ int netdev_switch_port_bridge_dellink(struct net_device *dev,
 
        return ops->ndo_bridge_dellink(dev, nlh, flags);
 }
-EXPORT_SYMBOL(netdev_switch_port_bridge_dellink);
+EXPORT_SYMBOL_GPL(netdev_switch_port_bridge_dellink);
 
 /**
  *     ndo_dflt_netdev_switch_port_bridge_setlink - default ndo bridge setlink
@@ -194,7 +205,7 @@ int ndo_dflt_netdev_switch_port_bridge_setlink(struct net_device *dev,
 
        return ret;
 }
-EXPORT_SYMBOL(ndo_dflt_netdev_switch_port_bridge_setlink);
+EXPORT_SYMBOL_GPL(ndo_dflt_netdev_switch_port_bridge_setlink);
 
 /**
  *     ndo_dflt_netdev_switch_port_bridge_dellink - default ndo bridge dellink
@@ -224,4 +235,170 @@ int ndo_dflt_netdev_switch_port_bridge_dellink(struct net_device *dev,
 
        return ret;
 }
-EXPORT_SYMBOL(ndo_dflt_netdev_switch_port_bridge_dellink);
+EXPORT_SYMBOL_GPL(ndo_dflt_netdev_switch_port_bridge_dellink);
+
+static struct net_device *netdev_switch_get_lowest_dev(struct net_device *dev)
+{
+       const struct swdev_ops *ops = dev->swdev_ops;
+       struct net_device *lower_dev;
+       struct net_device *port_dev;
+       struct list_head *iter;
+
+       /* Recusively search down until we find a sw port dev.
+        * (A sw port dev supports swdev_parent_id_get).
+        */
+
+       if (dev->features & NETIF_F_HW_SWITCH_OFFLOAD &&
+           ops && ops->swdev_parent_id_get)
+               return dev;
+
+       netdev_for_each_lower_dev(dev, lower_dev, iter) {
+               port_dev = netdev_switch_get_lowest_dev(lower_dev);
+               if (port_dev)
+                       return port_dev;
+       }
+
+       return NULL;
+}
+
+static struct net_device *netdev_switch_get_dev_by_nhs(struct fib_info *fi)
+{
+       struct netdev_phys_item_id psid;
+       struct netdev_phys_item_id prev_psid;
+       struct net_device *dev = NULL;
+       int nhsel;
+
+       /* For this route, all nexthop devs must be on the same switch. */
+
+       for (nhsel = 0; nhsel < fi->fib_nhs; nhsel++) {
+               const struct fib_nh *nh = &fi->fib_nh[nhsel];
+
+               if (!nh->nh_dev)
+                       return NULL;
+
+               dev = netdev_switch_get_lowest_dev(nh->nh_dev);
+               if (!dev)
+                       return NULL;
+
+               if (netdev_switch_parent_id_get(dev, &psid))
+                       return NULL;
+
+               if (nhsel > 0) {
+                       if (prev_psid.id_len != psid.id_len)
+                               return NULL;
+                       if (memcmp(prev_psid.id, psid.id, psid.id_len))
+                               return NULL;
+               }
+
+               prev_psid = psid;
+       }
+
+       return dev;
+}
+
+/**
+ *     netdev_switch_fib_ipv4_add - Add IPv4 route entry to switch
+ *
+ *     @dst: route's IPv4 destination address
+ *     @dst_len: destination address length (prefix length)
+ *     @fi: route FIB info structure
+ *     @tos: route TOS
+ *     @type: route type
+ *     @nlflags: netlink flags passed in (NLM_F_*)
+ *     @tb_id: route table ID
+ *
+ *     Add IPv4 route entry to switch device.
+ */
+int netdev_switch_fib_ipv4_add(u32 dst, int dst_len, struct fib_info *fi,
+                              u8 tos, u8 type, u32 nlflags, u32 tb_id)
+{
+       struct net_device *dev;
+       const struct swdev_ops *ops;
+       int err = 0;
+
+       /* Don't offload route if using custom ip rules or if
+        * IPv4 FIB offloading has been disabled completely.
+        */
+
+#ifdef CONFIG_IP_MULTIPLE_TABLES
+       if (fi->fib_net->ipv4.fib_has_custom_rules)
+               return 0;
+#endif
+
+       if (fi->fib_net->ipv4.fib_offload_disabled)
+               return 0;
+
+       dev = netdev_switch_get_dev_by_nhs(fi);
+       if (!dev)
+               return 0;
+       ops = dev->swdev_ops;
+
+       if (ops->swdev_fib_ipv4_add) {
+               err = ops->swdev_fib_ipv4_add(dev, htonl(dst), dst_len,
+                                             fi, tos, type, nlflags,
+                                             tb_id);
+               if (!err)
+                       fi->fib_flags |= RTNH_F_EXTERNAL;
+       }
+
+       return err;
+}
+EXPORT_SYMBOL_GPL(netdev_switch_fib_ipv4_add);
+
+/**
+ *     netdev_switch_fib_ipv4_del - Delete IPv4 route entry from switch
+ *
+ *     @dst: route's IPv4 destination address
+ *     @dst_len: destination address length (prefix length)
+ *     @fi: route FIB info structure
+ *     @tos: route TOS
+ *     @type: route type
+ *     @tb_id: route table ID
+ *
+ *     Delete IPv4 route entry from switch device.
+ */
+int netdev_switch_fib_ipv4_del(u32 dst, int dst_len, struct fib_info *fi,
+                              u8 tos, u8 type, u32 tb_id)
+{
+       struct net_device *dev;
+       const struct swdev_ops *ops;
+       int err = 0;
+
+       if (!(fi->fib_flags & RTNH_F_EXTERNAL))
+               return 0;
+
+       dev = netdev_switch_get_dev_by_nhs(fi);
+       if (!dev)
+               return 0;
+       ops = dev->swdev_ops;
+
+       if (ops->swdev_fib_ipv4_del) {
+               err = ops->swdev_fib_ipv4_del(dev, htonl(dst), dst_len,
+                                             fi, tos, type, tb_id);
+               if (!err)
+                       fi->fib_flags &= ~RTNH_F_EXTERNAL;
+       }
+
+       return err;
+}
+EXPORT_SYMBOL_GPL(netdev_switch_fib_ipv4_del);
+
+/**
+ *     netdev_switch_fib_ipv4_abort - Abort an IPv4 FIB operation
+ *
+ *     @fi: route FIB info structure
+ */
+void netdev_switch_fib_ipv4_abort(struct fib_info *fi)
+{
+       /* There was a problem installing this route to the offload
+        * device.  For now, until we come up with more refined
+        * policy handling, abruptly end IPv4 fib offloading for
+        * for entire net by flushing offload device(s) of all
+        * IPv4 routes, and mark IPv4 fib offloading broken from
+        * this point forward.
+        */
+
+       fib_flush_external(fi->fib_net);
+       fi->fib_net->ipv4.fib_offload_disabled = true;
+}
+EXPORT_SYMBOL_GPL(netdev_switch_fib_ipv4_abort);
index 91c8a8e031db718a067fa2ed4ef9f2924ddb7c25..c25a3a149dc4e6d50b20f2ba3ffc5a77d213fdde 100644 (file)
@@ -26,3 +26,11 @@ config TIPC_MEDIA_IB
        help
          Saying Y here will enable support for running TIPC on
          IP-over-InfiniBand devices.
+config TIPC_MEDIA_UDP
+       bool "IP/UDP media type support"
+       depends on TIPC
+       select NET_UDP_TUNNEL
+       help
+         Saying Y here will enable support for running TIPC over IP/UDP
+       bool
+       default y
index 599b1a540d2b0390db6ecbcc8f71aea9c9ad5ca9..57e460be46920cb270c0ed4eddb8d957462eb767 100644 (file)
@@ -10,5 +10,6 @@ tipc-y        += addr.o bcast.o bearer.o \
           netlink.o netlink_compat.o node.o socket.o eth_media.o \
           server.o socket.o
 
+tipc-$(CONFIG_TIPC_MEDIA_UDP)  += udp_media.o
 tipc-$(CONFIG_TIPC_MEDIA_IB)   += ib_media.o
 tipc-$(CONFIG_SYSCTL)          += sysctl.o
index 48fd3b5a73fbaf934178c444cbba07aa1a0f5b8c..ba7daa864d44471ad6c57d630bcaa4379aabea0c 100644 (file)
 #include "addr.h"
 #include "core.h"
 
+u32 tipc_own_addr(struct net *net)
+{
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
+
+       return tn->own_addr;
+}
+
 /**
  * in_own_cluster - test for cluster inclusion; <0.0.0> always matches
  */
index c700c2d28e09eb6921bb2a211f1cba50b8d08b82..7ba6d5c8ae405727721ba8ace46c7051938163ef 100644 (file)
@@ -55,6 +55,7 @@ static inline u32 tipc_cluster_mask(u32 addr)
        return addr & TIPC_CLUSTER_MASK;
 }
 
+u32 tipc_own_addr(struct net *net);
 int in_own_cluster(struct net *net, u32 addr);
 int in_own_cluster_exact(struct net *net, u32 addr);
 int in_own_node(struct net *net, u32 addr);
index 3e41704832de6debbe92d0a46e0cbbaa4701e981..c5cbdcb1f0b561a22d2f5537f5cca0a80ee36486 100644 (file)
@@ -62,21 +62,8 @@ static void tipc_bclink_lock(struct net *net)
 static void tipc_bclink_unlock(struct net *net)
 {
        struct tipc_net *tn = net_generic(net, tipc_net_id);
-       struct tipc_node *node = NULL;
 
-       if (likely(!tn->bclink->flags)) {
-               spin_unlock_bh(&tn->bclink->lock);
-               return;
-       }
-
-       if (tn->bclink->flags & TIPC_BCLINK_RESET) {
-               tn->bclink->flags &= ~TIPC_BCLINK_RESET;
-               node = tipc_bclink_retransmit_to(net);
-       }
        spin_unlock_bh(&tn->bclink->lock);
-
-       if (node)
-               tipc_link_reset_all(node);
 }
 
 void tipc_bclink_input(struct net *net)
@@ -91,13 +78,6 @@ uint  tipc_bclink_get_mtu(void)
        return MAX_PKT_DEFAULT_MCAST;
 }
 
-void tipc_bclink_set_flags(struct net *net, unsigned int flags)
-{
-       struct tipc_net *tn = net_generic(net, tipc_net_id);
-
-       tn->bclink->flags |= flags;
-}
-
 static u32 bcbuf_acks(struct sk_buff *buf)
 {
        return (u32)(unsigned long)TIPC_SKB_CB(buf)->handle;
@@ -135,9 +115,10 @@ static void bclink_set_last_sent(struct net *net)
 {
        struct tipc_net *tn = net_generic(net, tipc_net_id);
        struct tipc_link *bcl = tn->bcl;
+       struct sk_buff *skb = skb_peek(&bcl->backlogq);
 
-       if (bcl->next_out)
-               bcl->fsm_msg_cnt = mod(buf_seqno(bcl->next_out) - 1);
+       if (skb)
+               bcl->fsm_msg_cnt = mod(buf_seqno(skb) - 1);
        else
                bcl->fsm_msg_cnt = mod(bcl->next_out_no - 1);
 }
@@ -155,7 +136,6 @@ static void bclink_update_last_sent(struct tipc_node *node, u32 seqno)
                                                seqno : node->bclink.last_sent;
 }
 
-
 /**
  * tipc_bclink_retransmit_to - get most recent node to request retransmission
  *
@@ -180,7 +160,7 @@ static void bclink_retransmit_pkt(struct tipc_net *tn, u32 after, u32 to)
        struct sk_buff *skb;
        struct tipc_link *bcl = tn->bcl;
 
-       skb_queue_walk(&bcl->outqueue, skb) {
+       skb_queue_walk(&bcl->transmq, skb) {
                if (more(buf_seqno(skb), after)) {
                        tipc_link_retransmit(bcl, skb, mod(to - after));
                        break;
@@ -210,14 +190,17 @@ void tipc_bclink_wakeup_users(struct net *net)
 void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked)
 {
        struct sk_buff *skb, *tmp;
-       struct sk_buff *next;
        unsigned int released = 0;
        struct net *net = n_ptr->net;
        struct tipc_net *tn = net_generic(net, tipc_net_id);
 
+       if (unlikely(!n_ptr->bclink.recv_permitted))
+               return;
+
        tipc_bclink_lock(net);
+
        /* Bail out if tx queue is empty (no clean up is required) */
-       skb = skb_peek(&tn->bcl->outqueue);
+       skb = skb_peek(&tn->bcl->transmq);
        if (!skb)
                goto exit;
 
@@ -244,27 +227,19 @@ void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked)
        }
 
        /* Skip over packets that node has previously acknowledged */
-       skb_queue_walk(&tn->bcl->outqueue, skb) {
+       skb_queue_walk(&tn->bcl->transmq, skb) {
                if (more(buf_seqno(skb), n_ptr->bclink.acked))
                        break;
        }
 
        /* Update packets that node is now acknowledging */
-       skb_queue_walk_from_safe(&tn->bcl->outqueue, skb, tmp) {
+       skb_queue_walk_from_safe(&tn->bcl->transmq, skb, tmp) {
                if (more(buf_seqno(skb), acked))
                        break;
-
-               next = tipc_skb_queue_next(&tn->bcl->outqueue, skb);
-               if (skb != tn->bcl->next_out) {
-                       bcbuf_decr_acks(skb);
-               } else {
-                       bcbuf_set_acks(skb, 0);
-                       tn->bcl->next_out = next;
-                       bclink_set_last_sent(net);
-               }
-
+               bcbuf_decr_acks(skb);
+               bclink_set_last_sent(net);
                if (bcbuf_acks(skb) == 0) {
-                       __skb_unlink(skb, &tn->bcl->outqueue);
+                       __skb_unlink(skb, &tn->bcl->transmq);
                        kfree_skb(skb);
                        released = 1;
                }
@@ -272,7 +247,7 @@ void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked)
        n_ptr->bclink.acked = acked;
 
        /* Try resolving broadcast link congestion, if necessary */
-       if (unlikely(tn->bcl->next_out)) {
+       if (unlikely(skb_peek(&tn->bcl->backlogq))) {
                tipc_link_push_packets(tn->bcl);
                bclink_set_last_sent(net);
        }
@@ -319,7 +294,7 @@ void tipc_bclink_update_link_state(struct tipc_node *n_ptr,
        buf = tipc_buf_acquire(INT_H_SIZE);
        if (buf) {
                struct tipc_msg *msg = buf_msg(buf);
-               struct sk_buff *skb = skb_peek(&n_ptr->bclink.deferred_queue);
+               struct sk_buff *skb = skb_peek(&n_ptr->bclink.deferdq);
                u32 to = skb ? buf_seqno(skb) - 1 : n_ptr->bclink.last_sent;
 
                tipc_msg_init(tn->own_addr, msg, BCAST_PROTOCOL, STATE_MSG,
@@ -354,13 +329,12 @@ static void bclink_peek_nack(struct net *net, struct tipc_msg *msg)
                return;
 
        tipc_node_lock(n_ptr);
-
        if (n_ptr->bclink.recv_permitted &&
            (n_ptr->bclink.last_in != n_ptr->bclink.last_sent) &&
            (n_ptr->bclink.last_in == msg_bcgap_after(msg)))
                n_ptr->bclink.oos_state = 2;
-
        tipc_node_unlock(n_ptr);
+       tipc_node_put(n_ptr);
 }
 
 /* tipc_bclink_xmit - deliver buffer chain to all nodes in cluster
@@ -387,14 +361,13 @@ int tipc_bclink_xmit(struct net *net, struct sk_buff_head *list)
                __skb_queue_purge(list);
                return -EHOSTUNREACH;
        }
-
        /* Broadcast to all nodes */
        if (likely(bclink)) {
                tipc_bclink_lock(net);
                if (likely(bclink->bcast_nodes.count)) {
                        rc = __tipc_link_xmit(net, bcl, list);
                        if (likely(!rc)) {
-                               u32 len = skb_queue_len(&bcl->outqueue);
+                               u32 len = skb_queue_len(&bcl->transmq);
 
                                bclink_set_last_sent(net);
                                bcl->stats.queue_sz_counts++;
@@ -440,7 +413,7 @@ static void bclink_accept_pkt(struct tipc_node *node, u32 seqno)
         */
        if (((seqno - tn->own_addr) % TIPC_MIN_LINK_WIN) == 0) {
                tipc_link_proto_xmit(node->active_links[node->addr & 1],
-                                    STATE_MSG, 0, 0, 0, 0, 0);
+                                    STATE_MSG, 0, 0, 0, 0);
                tn->bcl->stats.sent_acks++;
        }
 }
@@ -481,17 +454,18 @@ void tipc_bclink_rcv(struct net *net, struct sk_buff *buf)
                        goto unlock;
                if (msg_destnode(msg) == tn->own_addr) {
                        tipc_bclink_acknowledge(node, msg_bcast_ack(msg));
-                       tipc_node_unlock(node);
                        tipc_bclink_lock(net);
                        bcl->stats.recv_nacks++;
                        tn->bclink->retransmit_to = node;
                        bclink_retransmit_pkt(tn, msg_bcgap_after(msg),
                                              msg_bcgap_to(msg));
                        tipc_bclink_unlock(net);
+                       tipc_node_unlock(node);
                } else {
                        tipc_node_unlock(node);
                        bclink_peek_nack(net, msg);
                }
+               tipc_node_put(node);
                goto exit;
        }
 
@@ -528,11 +502,13 @@ receive:
                        tipc_bclink_unlock(net);
                        tipc_node_unlock(node);
                } else if (msg_user(msg) == MSG_FRAGMENTER) {
-                       tipc_buf_append(&node->bclink.reasm_buf, &buf);
-                       if (unlikely(!buf && !node->bclink.reasm_buf))
-                               goto unlock;
                        tipc_bclink_lock(net);
                        bclink_accept_pkt(node, seqno);
+                       tipc_buf_append(&node->bclink.reasm_buf, &buf);
+                       if (unlikely(!buf && !node->bclink.reasm_buf)) {
+                               tipc_bclink_unlock(net);
+                               goto unlock;
+                       }
                        bcl->stats.recv_fragments++;
                        if (buf) {
                                bcl->stats.recv_fragmented++;
@@ -559,25 +535,25 @@ receive:
                if (node->bclink.last_in == node->bclink.last_sent)
                        goto unlock;
 
-               if (skb_queue_empty(&node->bclink.deferred_queue)) {
+               if (skb_queue_empty(&node->bclink.deferdq)) {
                        node->bclink.oos_state = 1;
                        goto unlock;
                }
 
-               msg = buf_msg(skb_peek(&node->bclink.deferred_queue));
+               msg = buf_msg(skb_peek(&node->bclink.deferdq));
                seqno = msg_seqno(msg);
                next_in = mod(next_in + 1);
                if (seqno != next_in)
                        goto unlock;
 
                /* Take in-sequence message from deferred queue & deliver it */
-               buf = __skb_dequeue(&node->bclink.deferred_queue);
+               buf = __skb_dequeue(&node->bclink.deferdq);
                goto receive;
        }
 
        /* Handle out-of-sequence broadcast message */
        if (less(next_in, seqno)) {
-               deferred = tipc_link_defer_pkt(&node->bclink.deferred_queue,
+               deferred = tipc_link_defer_pkt(&node->bclink.deferdq,
                                               buf);
                bclink_update_last_sent(node, seqno);
                buf = NULL;
@@ -594,6 +570,7 @@ receive:
 
 unlock:
        tipc_node_unlock(node);
+       tipc_node_put(node);
 exit:
        kfree_skb(buf);
 }
@@ -634,7 +611,6 @@ static int tipc_bcbearer_send(struct net *net, struct sk_buff *buf,
                msg_set_non_seq(msg, 1);
                msg_set_mc_netid(msg, tn->net_id);
                tn->bcl->stats.sent_info++;
-
                if (WARN_ON(!bclink->bcast_nodes.count)) {
                        dump_stack();
                        return 0;
@@ -835,7 +811,7 @@ int tipc_nl_add_bc_link(struct net *net, struct tipc_nl_msg *msg)
        prop = nla_nest_start(msg->skb, TIPC_NLA_LINK_PROP);
        if (!prop)
                goto attr_msg_full;
-       if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN, bcl->queue_limit[0]))
+       if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN, bcl->window))
                goto prop_msg_full;
        nla_nest_end(msg->skb, prop);
 
@@ -913,8 +889,9 @@ int tipc_bclink_init(struct net *net)
        sprintf(bcbearer->media.name, "tipc-broadcast");
 
        spin_lock_init(&bclink->lock);
-       __skb_queue_head_init(&bcl->outqueue);
-       __skb_queue_head_init(&bcl->deferred_queue);
+       __skb_queue_head_init(&bcl->transmq);
+       __skb_queue_head_init(&bcl->backlogq);
+       __skb_queue_head_init(&bcl->deferdq);
        skb_queue_head_init(&bcl->wakeupq);
        bcl->next_out_no = 1;
        spin_lock_init(&bclink->node.lock);
@@ -922,7 +899,7 @@ int tipc_bclink_init(struct net *net)
        skb_queue_head_init(&bclink->inputq);
        bcl->owner = &bclink->node;
        bcl->owner->net = net;
-       bcl->max_pkt = MAX_PKT_DEFAULT_MCAST;
+       bcl->mtu = MAX_PKT_DEFAULT_MCAST;
        tipc_link_set_queue_limits(bcl, BCLINK_WIN_DEFAULT);
        bcl->bearer_id = MAX_BEARERS;
        rcu_assign_pointer(tn->bearer_list[MAX_BEARERS], &bcbearer->bearer);
index 43f397fbac55908c6312c96e3d71f8dd0223d414..4bdc12277d33ff8eb382b755dd1323424d9d43a2 100644 (file)
@@ -55,7 +55,6 @@ struct tipc_bcbearer_pair {
        struct tipc_bearer *secondary;
 };
 
-#define TIPC_BCLINK_RESET      1
 #define        BCBEARER                MAX_BEARERS
 
 /**
@@ -86,7 +85,6 @@ struct tipc_bcbearer {
  * @lock: spinlock governing access to structure
  * @link: (non-standard) broadcast link structure
  * @node: (non-standard) node structure representing b'cast link's peer node
- * @flags: represent bclink states
  * @bcast_nodes: map of broadcast-capable nodes
  * @retransmit_to: node that most recently requested a retransmit
  *
@@ -96,7 +94,6 @@ struct tipc_bclink {
        spinlock_t lock;
        struct tipc_link link;
        struct tipc_node node;
-       unsigned int flags;
        struct sk_buff_head arrvq;
        struct sk_buff_head inputq;
        struct tipc_node_map bcast_nodes;
@@ -117,7 +114,6 @@ static inline int tipc_nmap_equal(struct tipc_node_map *nm_a,
 
 int tipc_bclink_init(struct net *net);
 void tipc_bclink_stop(struct net *net);
-void tipc_bclink_set_flags(struct net *tn, unsigned int flags);
 void tipc_bclink_add_node(struct net *net, u32 addr);
 void tipc_bclink_remove_node(struct net *net, u32 addr);
 struct tipc_node *tipc_bclink_retransmit_to(struct net *tn);
index 48852c2dcc033585a733c73fc5646834f9d458e8..3613e72e858e2e259bd91455127e4ec0af5c903b 100644 (file)
@@ -47,6 +47,9 @@ static struct tipc_media * const media_info_array[] = {
        &eth_media_info,
 #ifdef CONFIG_TIPC_MEDIA_IB
        &ib_media_info,
+#endif
+#ifdef CONFIG_TIPC_MEDIA_UDP
+       &udp_media_info,
 #endif
        NULL
 };
@@ -216,7 +219,8 @@ void tipc_bearer_remove_dest(struct net *net, u32 bearer_id, u32 dest)
  * tipc_enable_bearer - enable bearer with the given name
  */
 static int tipc_enable_bearer(struct net *net, const char *name,
-                             u32 disc_domain, u32 priority)
+                             u32 disc_domain, u32 priority,
+                             struct nlattr *attr[])
 {
        struct tipc_net *tn = net_generic(net, tipc_net_id);
        struct tipc_bearer *b_ptr;
@@ -304,7 +308,7 @@ restart:
 
        strcpy(b_ptr->name, name);
        b_ptr->media = m_ptr;
-       res = m_ptr->enable_media(net, b_ptr);
+       res = m_ptr->enable_media(net, b_ptr, attr);
        if (res) {
                pr_warn("Bearer <%s> rejected, enable failure (%d)\n",
                        name, -res);
@@ -372,7 +376,8 @@ static void bearer_disable(struct net *net, struct tipc_bearer *b_ptr,
        kfree_rcu(b_ptr, rcu);
 }
 
-int tipc_enable_l2_media(struct net *net, struct tipc_bearer *b)
+int tipc_enable_l2_media(struct net *net, struct tipc_bearer *b,
+                        struct nlattr *attr[])
 {
        struct net_device *dev;
        char *driver_name = strchr((const char *)b->name, ':') + 1;
@@ -791,7 +796,7 @@ int tipc_nl_bearer_enable(struct sk_buff *skb, struct genl_info *info)
        }
 
        rtnl_lock();
-       err = tipc_enable_bearer(net, bearer, domain, prio);
+       err = tipc_enable_bearer(net, bearer, domain, prio, attrs);
        if (err) {
                rtnl_unlock();
                return err;
index 6b17795ff8bc309d70628c07566dc455e638614a..5cad243ee8fc646efccfe72f26ad49ebdfe42f4f 100644 (file)
@@ -41,7 +41,7 @@
 #include <net/genetlink.h>
 
 #define MAX_BEARERS    2
-#define MAX_MEDIA      2
+#define MAX_MEDIA      3
 #define MAX_NODES      4096
 #define WSIZE          32
 
  * - the field's actual content and length is defined per media
  * - remaining unused bytes in the field are set to zero
  */
-#define TIPC_MEDIA_ADDR_SIZE   32
+#define TIPC_MEDIA_INFO_SIZE   32
 #define TIPC_MEDIA_TYPE_OFFSET 3
+#define TIPC_MEDIA_ADDR_OFFSET 4
 
 /*
  * Identifiers of supported TIPC media types
  */
 #define TIPC_MEDIA_TYPE_ETH    1
 #define TIPC_MEDIA_TYPE_IB     2
+#define TIPC_MEDIA_TYPE_UDP    3
 
 /**
  * struct tipc_node_map - set of node identifiers
@@ -76,7 +78,7 @@ struct tipc_node_map {
  * @broadcast: non-zero if address is a broadcast address
  */
 struct tipc_media_addr {
-       u8 value[TIPC_MEDIA_ADDR_SIZE];
+       u8 value[TIPC_MEDIA_INFO_SIZE];
        u8 media_id;
        u8 broadcast;
 };
@@ -103,7 +105,8 @@ struct tipc_media {
        int (*send_msg)(struct net *net, struct sk_buff *buf,
                        struct tipc_bearer *b_ptr,
                        struct tipc_media_addr *dest);
-       int (*enable_media)(struct net *net, struct tipc_bearer *b_ptr);
+       int (*enable_media)(struct net *net, struct tipc_bearer *b_ptr,
+                           struct nlattr *attr[]);
        void (*disable_media)(struct tipc_bearer *b_ptr);
        int (*addr2str)(struct tipc_media_addr *addr,
                        char *strbuf,
@@ -182,6 +185,9 @@ extern struct tipc_media eth_media_info;
 #ifdef CONFIG_TIPC_MEDIA_IB
 extern struct tipc_media ib_media_info;
 #endif
+#ifdef CONFIG_TIPC_MEDIA_UDP
+extern struct tipc_media udp_media_info;
+#endif
 
 int tipc_nl_bearer_disable(struct sk_buff *skb, struct genl_info *info);
 int tipc_nl_bearer_enable(struct sk_buff *skb, struct genl_info *info);
@@ -196,7 +202,8 @@ int tipc_nl_media_set(struct sk_buff *skb, struct genl_info *info);
 int tipc_media_set_priority(const char *name, u32 new_value);
 int tipc_media_set_window(const char *name, u32 new_value);
 void tipc_media_addr_printf(char *buf, int len, struct tipc_media_addr *a);
-int tipc_enable_l2_media(struct net *net, struct tipc_bearer *b);
+int tipc_enable_l2_media(struct net *net, struct tipc_bearer *b,
+                        struct nlattr *attrs[]);
 void tipc_disable_l2_media(struct tipc_bearer *b);
 int tipc_l2_send_msg(struct net *net, struct sk_buff *buf,
                     struct tipc_bearer *b, struct tipc_media_addr *dest);
index feef3753615d24f9067f859fcf7b0476b2708775..967e292f53c89182bc0ed128b1dacd51fe02d090 100644 (file)
@@ -86,9 +86,10 @@ static void tipc_disc_init_msg(struct net *net, struct sk_buff *buf, u32 type,
 
        msg = buf_msg(buf);
        tipc_msg_init(tn->own_addr, msg, LINK_CONFIG, type,
-                     INT_H_SIZE, dest_domain);
+                     MAX_H_SIZE, dest_domain);
        msg_set_non_seq(msg, 1);
        msg_set_node_sig(msg, tn->random);
+       msg_set_node_capabilities(msg, 0);
        msg_set_dest_domain(msg, dest_domain);
        msg_set_bc_netid(msg, tn->net_id);
        b_ptr->media->addr2msg(msg_media_addr(msg), &b_ptr->addr);
@@ -133,6 +134,7 @@ void tipc_disc_rcv(struct net *net, struct sk_buff *buf,
        u32 net_id = msg_bc_netid(msg);
        u32 mtyp = msg_type(msg);
        u32 signature = msg_node_sig(msg);
+       u16 caps = msg_node_capabilities(msg);
        bool addr_match = false;
        bool sign_match = false;
        bool link_up = false;
@@ -167,6 +169,7 @@ void tipc_disc_rcv(struct net *net, struct sk_buff *buf,
        if (!node)
                return;
        tipc_node_lock(node);
+       node->capabilities = caps;
        link = node->links[bearer->identity];
 
        /* Prepare to validate requesting node's signature and media address */
@@ -249,7 +252,7 @@ void tipc_disc_rcv(struct net *net, struct sk_buff *buf,
 
        /* Send response, if necessary */
        if (respond && (mtyp == DSC_REQ_MSG)) {
-               rbuf = tipc_buf_acquire(INT_H_SIZE);
+               rbuf = tipc_buf_acquire(MAX_H_SIZE);
                if (rbuf) {
                        tipc_disc_init_msg(net, rbuf, DSC_RESP_MSG, bearer);
                        tipc_bearer_send(net, bearer->identity, rbuf, &maddr);
@@ -257,6 +260,7 @@ void tipc_disc_rcv(struct net *net, struct sk_buff *buf,
                }
        }
        tipc_node_unlock(node);
+       tipc_node_put(node);
 }
 
 /**
@@ -359,8 +363,7 @@ int tipc_disc_create(struct net *net, struct tipc_bearer *b_ptr,
        req = kmalloc(sizeof(*req), GFP_ATOMIC);
        if (!req)
                return -ENOMEM;
-
-       req->buf = tipc_buf_acquire(INT_H_SIZE);
+       req->buf = tipc_buf_acquire(MAX_H_SIZE);
        if (!req->buf) {
                kfree(req);
                return -ENOMEM;
index 5e1426f1751f146cf3350983e9d0c04d218da850..f69a2fde9f4a065472f1a14aeee22c6380e07fa4 100644 (file)
@@ -37,8 +37,6 @@
 #include "core.h"
 #include "bearer.h"
 
-#define ETH_ADDR_OFFSET  4  /* MAC addr position inside address field */
-
 /* Convert Ethernet address (media address format) to string */
 static int tipc_eth_addr2str(struct tipc_media_addr *addr,
                             char *strbuf, int bufsz)
@@ -53,9 +51,9 @@ static int tipc_eth_addr2str(struct tipc_media_addr *addr,
 /* Convert from media address format to discovery message addr format */
 static int tipc_eth_addr2msg(char *msg, struct tipc_media_addr *addr)
 {
-       memset(msg, 0, TIPC_MEDIA_ADDR_SIZE);
+       memset(msg, 0, TIPC_MEDIA_INFO_SIZE);
        msg[TIPC_MEDIA_TYPE_OFFSET] = TIPC_MEDIA_TYPE_ETH;
-       memcpy(msg + ETH_ADDR_OFFSET, addr->value, ETH_ALEN);
+       memcpy(msg + TIPC_MEDIA_ADDR_OFFSET, addr->value, ETH_ALEN);
        return 0;
 }
 
@@ -79,7 +77,7 @@ static int tipc_eth_msg2addr(struct tipc_bearer *b,
                             char *msg)
 {
        /* Skip past preamble: */
-       msg += ETH_ADDR_OFFSET;
+       msg += TIPC_MEDIA_ADDR_OFFSET;
        return tipc_eth_raw2addr(b, addr, msg);
 }
 
index 8522eef9c136bc25d39e166b32dfc459881d77c9..e8c16718e3faea3bf4983f12f2f5fc5c289175eb 100644 (file)
@@ -57,7 +57,7 @@ static int tipc_ib_addr2str(struct tipc_media_addr *a, char *str_buf,
 /* Convert from media address format to discovery message addr format */
 static int tipc_ib_addr2msg(char *msg, struct tipc_media_addr *addr)
 {
-       memset(msg, 0, TIPC_MEDIA_ADDR_SIZE);
+       memset(msg, 0, TIPC_MEDIA_INFO_SIZE);
        memcpy(msg, addr->value, INFINIBAND_ALEN);
        return 0;
 }
index 14f09b3cb87c2fd9c87c67dfb67ce5e8df7d9f0f..a6b30df6ec02ec22f1b4b44930bd1ceb168258f3 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * net/tipc/link.c: TIPC link code
  *
- * Copyright (c) 1996-2007, 2012-2014, Ericsson AB
+ * Copyright (c) 1996-2007, 2012-2015, Ericsson AB
  * Copyright (c) 2004-2007, 2010-2013, Wind River Systems
  * All rights reserved.
  *
@@ -35,6 +35,7 @@
  */
 
 #include "core.h"
+#include "subscr.h"
 #include "link.h"
 #include "bcast.h"
 #include "socket.h"
@@ -88,24 +89,14 @@ static const struct nla_policy tipc_nl_prop_policy[TIPC_NLA_PROP_MAX + 1] = {
 #define  TIMEOUT_EVT     560817u       /* link timer expired */
 
 /*
- * The following two 'message types' is really just implementation
- * data conveniently stored in the message header.
- * They must not be considered part of the protocol
+ * State value stored in 'failover_pkts'
  */
-#define OPEN_MSG   0
-#define CLOSED_MSG 1
-
-/*
- * State value stored in 'exp_msg_count'
- */
-#define START_CHANGEOVER 100000u
+#define FIRST_FAILOVER 0xffffu
 
 static void link_handle_out_of_seq_msg(struct tipc_link *link,
                                       struct sk_buff *skb);
 static void tipc_link_proto_rcv(struct tipc_link *link,
                                struct sk_buff *skb);
-static int  tipc_link_tunnel_rcv(struct tipc_node *node,
-                                struct sk_buff **skb);
 static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tol);
 static void link_state_event(struct tipc_link *l_ptr, u32 event);
 static void link_reset_statistics(struct tipc_link *l_ptr);
@@ -114,7 +105,7 @@ static void tipc_link_sync_xmit(struct tipc_link *l);
 static void tipc_link_sync_rcv(struct tipc_node *n, struct sk_buff *buf);
 static void tipc_link_input(struct tipc_link *l, struct sk_buff *skb);
 static bool tipc_data_input(struct tipc_link *l, struct sk_buff *skb);
-
+static bool tipc_link_failover_rcv(struct tipc_link *l, struct sk_buff **skb);
 /*
  *  Simple link routines
  */
@@ -138,32 +129,11 @@ static void tipc_link_put(struct tipc_link *l_ptr)
        kref_put(&l_ptr->ref, tipc_link_release);
 }
 
-static void link_init_max_pkt(struct tipc_link *l_ptr)
+static struct tipc_link *tipc_parallel_link(struct tipc_link *l)
 {
-       struct tipc_node *node = l_ptr->owner;
-       struct tipc_net *tn = net_generic(node->net, tipc_net_id);
-       struct tipc_bearer *b_ptr;
-       u32 max_pkt;
-
-       rcu_read_lock();
-       b_ptr = rcu_dereference_rtnl(tn->bearer_list[l_ptr->bearer_id]);
-       if (!b_ptr) {
-               rcu_read_unlock();
-               return;
-       }
-       max_pkt = (b_ptr->mtu & ~3);
-       rcu_read_unlock();
-
-       if (max_pkt > MAX_MSG_SIZE)
-               max_pkt = MAX_MSG_SIZE;
-
-       l_ptr->max_pkt_target = max_pkt;
-       if (l_ptr->max_pkt_target < MAX_PKT_DEFAULT)
-               l_ptr->max_pkt = l_ptr->max_pkt_target;
-       else
-               l_ptr->max_pkt = MAX_PKT_DEFAULT;
-
-       l_ptr->max_pkt_probes = 0;
+       if (l->owner->active_links[0] != l)
+               return l->owner->active_links[0];
+       return l->owner->active_links[1];
 }
 
 /*
@@ -194,10 +164,10 @@ static void link_timeout(unsigned long data)
        tipc_node_lock(l_ptr->owner);
 
        /* update counters used in statistical profiling of send traffic */
-       l_ptr->stats.accu_queue_sz += skb_queue_len(&l_ptr->outqueue);
+       l_ptr->stats.accu_queue_sz += skb_queue_len(&l_ptr->transmq);
        l_ptr->stats.queue_sz_counts++;
 
-       skb = skb_peek(&l_ptr->outqueue);
+       skb = skb_peek(&l_ptr->transmq);
        if (skb) {
                struct tipc_msg *msg = buf_msg(skb);
                u32 length = msg_size(msg);
@@ -229,7 +199,7 @@ static void link_timeout(unsigned long data)
        /* do all other link processing performed on a periodic basis */
        link_state_event(l_ptr, TIMEOUT_EVT);
 
-       if (l_ptr->next_out)
+       if (skb_queue_len(&l_ptr->backlogq))
                tipc_link_push_packets(l_ptr);
 
        tipc_node_unlock(l_ptr->owner);
@@ -305,16 +275,15 @@ struct tipc_link *tipc_link_create(struct tipc_node *n_ptr,
        msg_set_session(msg, (tn->random & 0xffff));
        msg_set_bearer_id(msg, b_ptr->identity);
        strcpy((char *)msg_data(msg), if_name);
-
+       l_ptr->net_plane = b_ptr->net_plane;
+       l_ptr->advertised_mtu = b_ptr->mtu;
+       l_ptr->mtu = l_ptr->advertised_mtu;
        l_ptr->priority = b_ptr->priority;
        tipc_link_set_queue_limits(l_ptr, b_ptr->window);
-
-       l_ptr->net_plane = b_ptr->net_plane;
-       link_init_max_pkt(l_ptr);
-
        l_ptr->next_out_no = 1;
-       __skb_queue_head_init(&l_ptr->outqueue);
-       __skb_queue_head_init(&l_ptr->deferred_queue);
+       __skb_queue_head_init(&l_ptr->transmq);
+       __skb_queue_head_init(&l_ptr->backlogq);
+       __skb_queue_head_init(&l_ptr->deferdq);
        skb_queue_head_init(&l_ptr->wakeupq);
        skb_queue_head_init(&l_ptr->inputq);
        skb_queue_head_init(&l_ptr->namedq);
@@ -327,15 +296,19 @@ struct tipc_link *tipc_link_create(struct tipc_node *n_ptr,
 }
 
 /**
- * link_delete - Conditional deletion of link.
- *               If timer still running, real delete is done when it expires
- * @link: link to be deleted
+ * tipc_link_delete - Delete a link
+ * @l: link to be deleted
  */
-void tipc_link_delete(struct tipc_link *link)
+void tipc_link_delete(struct tipc_link *l)
 {
-       tipc_link_reset_fragments(link);
-       tipc_node_detach_link(link->owner, link);
-       tipc_link_put(link);
+       tipc_link_reset(l);
+       if (del_timer(&l->timer))
+               tipc_link_put(l);
+       l->flags |= LINK_STOPPED;
+       /* Delete link now, or when timer is finished: */
+       tipc_link_reset_fragments(l);
+       tipc_node_detach_link(l->owner, l);
+       tipc_link_put(l);
 }
 
 void tipc_link_delete_list(struct net *net, unsigned int bearer_id,
@@ -349,16 +322,7 @@ void tipc_link_delete_list(struct net *net, unsigned int bearer_id,
        list_for_each_entry_rcu(node, &tn->node_list, list) {
                tipc_node_lock(node);
                link = node->links[bearer_id];
-               if (!link) {
-                       tipc_node_unlock(node);
-                       continue;
-               }
-               tipc_link_reset(link);
-               if (del_timer(&link->timer))
-                       tipc_link_put(link);
-               link->flags |= LINK_STOPPED;
-               /* Delete link now, or when failover is finished: */
-               if (shutting_down || !tipc_node_is_up(node))
+               if (link)
                        tipc_link_delete(link);
                tipc_node_unlock(node);
        }
@@ -366,28 +330,43 @@ void tipc_link_delete_list(struct net *net, unsigned int bearer_id,
 }
 
 /**
- * link_schedule_user - schedule user for wakeup after congestion
+ * link_schedule_user - schedule a message sender for wakeup after congestion
  * @link: congested link
- * @oport: sending port
- * @chain_sz: size of buffer chain that was attempted sent
- * @imp: importance of message attempted sent
+ * @list: message that was attempted sent
  * Create pseudo msg to send back to user when congestion abates
+ * Only consumes message if there is an error
  */
-static bool link_schedule_user(struct tipc_link *link, u32 oport,
-                              uint chain_sz, uint imp)
+static int link_schedule_user(struct tipc_link *link, struct sk_buff_head *list)
 {
-       struct sk_buff *buf;
+       struct tipc_msg *msg = buf_msg(skb_peek(list));
+       int imp = msg_importance(msg);
+       u32 oport = msg_origport(msg);
+       u32 addr = link_own_addr(link);
+       struct sk_buff *skb;
 
-       buf = tipc_msg_create(SOCK_WAKEUP, 0, INT_H_SIZE, 0,
-                             link_own_addr(link), link_own_addr(link),
-                             oport, 0, 0);
-       if (!buf)
-               return false;
-       TIPC_SKB_CB(buf)->chain_sz = chain_sz;
-       TIPC_SKB_CB(buf)->chain_imp = imp;
-       skb_queue_tail(&link->wakeupq, buf);
+       /* This really cannot happen...  */
+       if (unlikely(imp > TIPC_CRITICAL_IMPORTANCE)) {
+               pr_warn("%s<%s>, send queue full", link_rst_msg, link->name);
+               tipc_link_reset(link);
+               goto err;
+       }
+       /* Non-blocking sender: */
+       if (TIPC_SKB_CB(skb_peek(list))->wakeup_pending)
+               return -ELINKCONG;
+
+       /* Create and schedule wakeup pseudo message */
+       skb = tipc_msg_create(SOCK_WAKEUP, 0, INT_H_SIZE, 0,
+                             addr, addr, oport, 0, 0);
+       if (!skb)
+               goto err;
+       TIPC_SKB_CB(skb)->chain_sz = skb_queue_len(list);
+       TIPC_SKB_CB(skb)->chain_imp = imp;
+       skb_queue_tail(&link->wakeupq, skb);
        link->stats.link_congs++;
-       return true;
+       return -ELINKCONG;
+err:
+       __skb_queue_purge(list);
+       return -ENOBUFS;
 }
 
 /**
@@ -396,19 +375,22 @@ static bool link_schedule_user(struct tipc_link *link, u32 oport,
  * Move a number of waiting users, as permitted by available space in
  * the send queue, from link wait queue to node wait queue for wakeup
  */
-void link_prepare_wakeup(struct tipc_link *link)
+void link_prepare_wakeup(struct tipc_link *l)
 {
-       uint pend_qsz = skb_queue_len(&link->outqueue);
+       int pnd[TIPC_SYSTEM_IMPORTANCE + 1] = {0,};
+       int imp, lim;
        struct sk_buff *skb, *tmp;
 
-       skb_queue_walk_safe(&link->wakeupq, skb, tmp) {
-               if (pend_qsz >= link->queue_limit[TIPC_SKB_CB(skb)->chain_imp])
+       skb_queue_walk_safe(&l->wakeupq, skb, tmp) {
+               imp = TIPC_SKB_CB(skb)->chain_imp;
+               lim = l->window + l->backlog[imp].limit;
+               pnd[imp] += TIPC_SKB_CB(skb)->chain_sz;
+               if ((pnd[imp] + l->backlog[imp].len) >= lim)
                        break;
-               pend_qsz += TIPC_SKB_CB(skb)->chain_sz;
-               skb_unlink(skb, &link->wakeupq);
-               skb_queue_tail(&link->inputq, skb);
-               link->owner->inputq = &link->inputq;
-               link->owner->action_flags |= TIPC_MSG_EVT;
+               skb_unlink(skb, &l->wakeupq);
+               skb_queue_tail(&l->inputq, skb);
+               l->owner->inputq = &l->inputq;
+               l->owner->action_flags |= TIPC_MSG_EVT;
        }
 }
 
@@ -422,31 +404,42 @@ void tipc_link_reset_fragments(struct tipc_link *l_ptr)
        l_ptr->reasm_buf = NULL;
 }
 
+static void tipc_link_purge_backlog(struct tipc_link *l)
+{
+       __skb_queue_purge(&l->backlogq);
+       l->backlog[TIPC_LOW_IMPORTANCE].len = 0;
+       l->backlog[TIPC_MEDIUM_IMPORTANCE].len = 0;
+       l->backlog[TIPC_HIGH_IMPORTANCE].len = 0;
+       l->backlog[TIPC_CRITICAL_IMPORTANCE].len = 0;
+       l->backlog[TIPC_SYSTEM_IMPORTANCE].len = 0;
+}
+
 /**
  * tipc_link_purge_queues - purge all pkt queues associated with link
  * @l_ptr: pointer to link
  */
 void tipc_link_purge_queues(struct tipc_link *l_ptr)
 {
-       __skb_queue_purge(&l_ptr->deferred_queue);
-       __skb_queue_purge(&l_ptr->outqueue);
+       __skb_queue_purge(&l_ptr->deferdq);
+       __skb_queue_purge(&l_ptr->transmq);
+       tipc_link_purge_backlog(l_ptr);
        tipc_link_reset_fragments(l_ptr);
 }
 
 void tipc_link_reset(struct tipc_link *l_ptr)
 {
        u32 prev_state = l_ptr->state;
-       u32 checkpoint = l_ptr->next_in_no;
        int was_active_link = tipc_link_is_active(l_ptr);
        struct tipc_node *owner = l_ptr->owner;
+       struct tipc_link *pl = tipc_parallel_link(l_ptr);
 
        msg_set_session(l_ptr->pmsg, ((msg_session(l_ptr->pmsg) + 1) & 0xffff));
 
        /* Link is down, accept any session */
        l_ptr->peer_session = INVALID_SESSION;
 
-       /* Prepare for max packet size negotiation */
-       link_init_max_pkt(l_ptr);
+       /* Prepare for renewed mtu size negotiation */
+       l_ptr->mtu = l_ptr->advertised_mtu;
 
        l_ptr->state = RESET_UNKNOWN;
 
@@ -456,21 +449,26 @@ void tipc_link_reset(struct tipc_link *l_ptr)
        tipc_node_link_down(l_ptr->owner, l_ptr);
        tipc_bearer_remove_dest(owner->net, l_ptr->bearer_id, l_ptr->addr);
 
-       if (was_active_link && tipc_node_active_links(l_ptr->owner)) {
-               l_ptr->reset_checkpoint = checkpoint;
-               l_ptr->exp_msg_count = START_CHANGEOVER;
+       if (was_active_link && tipc_node_is_up(l_ptr->owner) && (pl != l_ptr)) {
+               l_ptr->flags |= LINK_FAILINGOVER;
+               l_ptr->failover_checkpt = l_ptr->next_in_no;
+               pl->failover_pkts = FIRST_FAILOVER;
+               pl->failover_checkpt = l_ptr->next_in_no;
+               pl->failover_skb = l_ptr->reasm_buf;
+       } else {
+               kfree_skb(l_ptr->reasm_buf);
        }
-
        /* Clean up all queues, except inputq: */
-       __skb_queue_purge(&l_ptr->outqueue);
-       __skb_queue_purge(&l_ptr->deferred_queue);
+       __skb_queue_purge(&l_ptr->transmq);
+       __skb_queue_purge(&l_ptr->deferdq);
        if (!owner->inputq)
                owner->inputq = &l_ptr->inputq;
        skb_queue_splice_init(&l_ptr->wakeupq, owner->inputq);
        if (!skb_queue_empty(owner->inputq))
                owner->action_flags |= TIPC_MSG_EVT;
-       l_ptr->next_out = NULL;
-       l_ptr->unacked_window = 0;
+       tipc_link_purge_backlog(l_ptr);
+       l_ptr->reasm_buf = NULL;
+       l_ptr->rcv_unacked = 0;
        l_ptr->checkpoint = 1;
        l_ptr->next_out_no = 1;
        l_ptr->fsm_msg_cnt = 0;
@@ -521,8 +519,7 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
        if (!(l_ptr->flags & LINK_STARTED) && (event != STARTING_EVT))
                return;         /* Not yet. */
 
-       /* Check whether changeover is going on */
-       if (l_ptr->exp_msg_count) {
+       if (l_ptr->flags & LINK_FAILINGOVER) {
                if (event == TIMEOUT_EVT)
                        link_set_timer(l_ptr, cont_intv);
                return;
@@ -539,11 +536,7 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
                                l_ptr->checkpoint = l_ptr->next_in_no;
                                if (tipc_bclink_acks_missing(l_ptr->owner)) {
                                        tipc_link_proto_xmit(l_ptr, STATE_MSG,
-                                                            0, 0, 0, 0, 0);
-                                       l_ptr->fsm_msg_cnt++;
-                               } else if (l_ptr->max_pkt < l_ptr->max_pkt_target) {
-                                       tipc_link_proto_xmit(l_ptr, STATE_MSG,
-                                                            1, 0, 0, 0, 0);
+                                                            0, 0, 0, 0);
                                        l_ptr->fsm_msg_cnt++;
                                }
                                link_set_timer(l_ptr, cont_intv);
@@ -551,7 +544,7 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
                        }
                        l_ptr->state = WORKING_UNKNOWN;
                        l_ptr->fsm_msg_cnt = 0;
-                       tipc_link_proto_xmit(l_ptr, STATE_MSG, 1, 0, 0, 0, 0);
+                       tipc_link_proto_xmit(l_ptr, STATE_MSG, 1, 0, 0, 0);
                        l_ptr->fsm_msg_cnt++;
                        link_set_timer(l_ptr, cont_intv / 4);
                        break;
@@ -562,7 +555,7 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
                        l_ptr->state = RESET_RESET;
                        l_ptr->fsm_msg_cnt = 0;
                        tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG,
-                                            0, 0, 0, 0, 0);
+                                            0, 0, 0, 0);
                        l_ptr->fsm_msg_cnt++;
                        link_set_timer(l_ptr, cont_intv);
                        break;
@@ -585,7 +578,7 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
                        l_ptr->state = RESET_RESET;
                        l_ptr->fsm_msg_cnt = 0;
                        tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG,
-                                            0, 0, 0, 0, 0);
+                                            0, 0, 0, 0);
                        l_ptr->fsm_msg_cnt++;
                        link_set_timer(l_ptr, cont_intv);
                        break;
@@ -596,13 +589,13 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
                                l_ptr->checkpoint = l_ptr->next_in_no;
                                if (tipc_bclink_acks_missing(l_ptr->owner)) {
                                        tipc_link_proto_xmit(l_ptr, STATE_MSG,
-                                                            0, 0, 0, 0, 0);
+                                                            0, 0, 0, 0);
                                        l_ptr->fsm_msg_cnt++;
                                }
                                link_set_timer(l_ptr, cont_intv);
                        } else if (l_ptr->fsm_msg_cnt < l_ptr->abort_limit) {
                                tipc_link_proto_xmit(l_ptr, STATE_MSG,
-                                                    1, 0, 0, 0, 0);
+                                                    1, 0, 0, 0);
                                l_ptr->fsm_msg_cnt++;
                                link_set_timer(l_ptr, cont_intv / 4);
                        } else {        /* Link has failed */
@@ -612,7 +605,7 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
                                l_ptr->state = RESET_UNKNOWN;
                                l_ptr->fsm_msg_cnt = 0;
                                tipc_link_proto_xmit(l_ptr, RESET_MSG,
-                                                    0, 0, 0, 0, 0);
+                                                    0, 0, 0, 0);
                                l_ptr->fsm_msg_cnt++;
                                link_set_timer(l_ptr, cont_intv);
                        }
@@ -632,7 +625,7 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
                        l_ptr->state = WORKING_WORKING;
                        l_ptr->fsm_msg_cnt = 0;
                        link_activate(l_ptr);
-                       tipc_link_proto_xmit(l_ptr, STATE_MSG, 1, 0, 0, 0, 0);
+                       tipc_link_proto_xmit(l_ptr, STATE_MSG, 1, 0, 0, 0);
                        l_ptr->fsm_msg_cnt++;
                        if (l_ptr->owner->working_links == 1)
                                tipc_link_sync_xmit(l_ptr);
@@ -642,7 +635,7 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
                        l_ptr->state = RESET_RESET;
                        l_ptr->fsm_msg_cnt = 0;
                        tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG,
-                                            1, 0, 0, 0, 0);
+                                            1, 0, 0, 0);
                        l_ptr->fsm_msg_cnt++;
                        link_set_timer(l_ptr, cont_intv);
                        break;
@@ -652,7 +645,7 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
                        link_set_timer(l_ptr, cont_intv);
                        break;
                case TIMEOUT_EVT:
-                       tipc_link_proto_xmit(l_ptr, RESET_MSG, 0, 0, 0, 0, 0);
+                       tipc_link_proto_xmit(l_ptr, RESET_MSG, 0, 0, 0, 0);
                        l_ptr->fsm_msg_cnt++;
                        link_set_timer(l_ptr, cont_intv);
                        break;
@@ -670,7 +663,7 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
                        l_ptr->state = WORKING_WORKING;
                        l_ptr->fsm_msg_cnt = 0;
                        link_activate(l_ptr);
-                       tipc_link_proto_xmit(l_ptr, STATE_MSG, 1, 0, 0, 0, 0);
+                       tipc_link_proto_xmit(l_ptr, STATE_MSG, 1, 0, 0, 0);
                        l_ptr->fsm_msg_cnt++;
                        if (l_ptr->owner->working_links == 1)
                                tipc_link_sync_xmit(l_ptr);
@@ -680,7 +673,7 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
                        break;
                case TIMEOUT_EVT:
                        tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG,
-                                            0, 0, 0, 0, 0);
+                                            0, 0, 0, 0);
                        l_ptr->fsm_msg_cnt++;
                        link_set_timer(l_ptr, cont_intv);
                        break;
@@ -693,101 +686,65 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
        }
 }
 
-/* tipc_link_cong: determine return value and how to treat the
- * sent buffer during link congestion.
- * - For plain, errorless user data messages we keep the buffer and
- *   return -ELINKONG.
- * - For all other messages we discard the buffer and return -EHOSTUNREACH
- * - For TIPC internal messages we also reset the link
- */
-static int tipc_link_cong(struct tipc_link *link, struct sk_buff_head *list)
-{
-       struct sk_buff *skb = skb_peek(list);
-       struct tipc_msg *msg = buf_msg(skb);
-       uint imp = tipc_msg_tot_importance(msg);
-       u32 oport = msg_tot_origport(msg);
-
-       if (unlikely(imp > TIPC_CRITICAL_IMPORTANCE)) {
-               pr_warn("%s<%s>, send queue full", link_rst_msg, link->name);
-               tipc_link_reset(link);
-               goto drop;
-       }
-       if (unlikely(msg_errcode(msg)))
-               goto drop;
-       if (unlikely(msg_reroute_cnt(msg)))
-               goto drop;
-       if (TIPC_SKB_CB(skb)->wakeup_pending)
-               return -ELINKCONG;
-       if (link_schedule_user(link, oport, skb_queue_len(list), imp))
-               return -ELINKCONG;
-drop:
-       __skb_queue_purge(list);
-       return -EHOSTUNREACH;
-}
-
 /**
  * __tipc_link_xmit(): same as tipc_link_xmit, but destlink is known & locked
  * @link: link to use
  * @list: chain of buffers containing message
  *
- * Consumes the buffer chain, except when returning -ELINKCONG
- * Returns 0 if success, otherwise errno: -ELINKCONG, -EMSGSIZE (plain socket
- * user data messages) or -EHOSTUNREACH (all other messages/senders)
- * Only the socket functions tipc_send_stream() and tipc_send_packet() need
- * to act on the return value, since they may need to do more send attempts.
+ * Consumes the buffer chain, except when returning -ELINKCONG,
+ * since the caller then may want to make more send attempts.
+ * Returns 0 if success, or errno: -ELINKCONG, -EMSGSIZE or -ENOBUFS
+ * Messages at TIPC_SYSTEM_IMPORTANCE are always accepted
  */
 int __tipc_link_xmit(struct net *net, struct tipc_link *link,
                     struct sk_buff_head *list)
 {
        struct tipc_msg *msg = buf_msg(skb_peek(list));
-       uint psz = msg_size(msg);
-       uint sndlim = link->queue_limit[0];
-       uint imp = tipc_msg_tot_importance(msg);
-       uint mtu = link->max_pkt;
+       unsigned int maxwin = link->window;
+       unsigned int imp = msg_importance(msg);
+       uint mtu = link->mtu;
        uint ack = mod(link->next_in_no - 1);
        uint seqno = link->next_out_no;
        uint bc_last_in = link->owner->bclink.last_in;
        struct tipc_media_addr *addr = &link->media_addr;
-       struct sk_buff_head *outqueue = &link->outqueue;
+       struct sk_buff_head *transmq = &link->transmq;
+       struct sk_buff_head *backlogq = &link->backlogq;
        struct sk_buff *skb, *tmp;
 
-       /* Match queue limits against msg importance: */
-       if (unlikely(skb_queue_len(outqueue) >= link->queue_limit[imp]))
-               return tipc_link_cong(link, list);
+       /* Match backlog limit against msg importance: */
+       if (unlikely(link->backlog[imp].len >= link->backlog[imp].limit))
+               return link_schedule_user(link, list);
 
-       /* Has valid packet limit been used ? */
-       if (unlikely(psz > mtu)) {
+       if (unlikely(msg_size(msg) > mtu)) {
                __skb_queue_purge(list);
                return -EMSGSIZE;
        }
-
-       /* Prepare each packet for sending, and add to outqueue: */
+       /* Prepare each packet for sending, and add to relevant queue: */
        skb_queue_walk_safe(list, skb, tmp) {
                __skb_unlink(skb, list);
                msg = buf_msg(skb);
-               msg_set_word(msg, 2, ((ack << 16) | mod(seqno)));
+               msg_set_seqno(msg, seqno);
+               msg_set_ack(msg, ack);
                msg_set_bcast_ack(msg, bc_last_in);
 
-               if (skb_queue_len(outqueue) < sndlim) {
-                       __skb_queue_tail(outqueue, skb);
-                       tipc_bearer_send(net, link->bearer_id,
-                                        skb, addr);
-                       link->next_out = NULL;
-                       link->unacked_window = 0;
-               } else if (tipc_msg_bundle(outqueue, skb, mtu)) {
+               if (likely(skb_queue_len(transmq) < maxwin)) {
+                       __skb_queue_tail(transmq, skb);
+                       tipc_bearer_send(net, link->bearer_id, skb, addr);
+                       link->rcv_unacked = 0;
+                       seqno++;
+                       continue;
+               }
+               if (tipc_msg_bundle(skb_peek_tail(backlogq), skb, mtu)) {
                        link->stats.sent_bundled++;
                        continue;
-               } else if (tipc_msg_make_bundle(outqueue, skb, mtu,
-                                               link->addr)) {
+               }
+               if (tipc_msg_make_bundle(&skb, mtu, link->addr)) {
                        link->stats.sent_bundled++;
                        link->stats.sent_bundles++;
-                       if (!link->next_out)
-                               link->next_out = skb_peek_tail(outqueue);
-               } else {
-                       __skb_queue_tail(outqueue, skb);
-                       if (!link->next_out)
-                               link->next_out = skb;
+                       imp = msg_importance(buf_msg(skb));
                }
+               __skb_queue_tail(backlogq, skb);
+               link->backlog[imp].len++;
                seqno++;
        }
        link->next_out_no = seqno;
@@ -808,13 +765,25 @@ static int __tipc_link_xmit_skb(struct tipc_link *link, struct sk_buff *skb)
        return __tipc_link_xmit(link->owner->net, link, &head);
 }
 
+/* tipc_link_xmit_skb(): send single buffer to destination
+ * Buffers sent via this functon are generally TIPC_SYSTEM_IMPORTANCE
+ * messages, which will not be rejected
+ * The only exception is datagram messages rerouted after secondary
+ * lookup, which are rare and safe to dispose of anyway.
+ * TODO: Return real return value, and let callers use
+ * tipc_wait_for_sendpkt() where applicable
+ */
 int tipc_link_xmit_skb(struct net *net, struct sk_buff *skb, u32 dnode,
                       u32 selector)
 {
        struct sk_buff_head head;
+       int rc;
 
        skb2list(skb, &head);
-       return tipc_link_xmit(net, &head, dnode, selector);
+       rc = tipc_link_xmit(net, &head, dnode, selector);
+       if (rc == -ELINKCONG)
+               kfree_skb(skb);
+       return 0;
 }
 
 /**
@@ -841,12 +810,15 @@ int tipc_link_xmit(struct net *net, struct sk_buff_head *list, u32 dnode,
                if (link)
                        rc = __tipc_link_xmit(net, link, list);
                tipc_node_unlock(node);
+               tipc_node_put(node);
        }
        if (link)
                return rc;
 
-       if (likely(in_own_node(net, dnode)))
-               return tipc_sk_rcv(net, list);
+       if (likely(in_own_node(net, dnode))) {
+               tipc_sk_rcv(net, list);
+               return 0;
+       }
 
        __skb_queue_purge(list);
        return rc;
@@ -893,14 +865,6 @@ static void tipc_link_sync_rcv(struct tipc_node *n, struct sk_buff *buf)
        kfree_skb(buf);
 }
 
-struct sk_buff *tipc_skb_queue_next(const struct sk_buff_head *list,
-                                   const struct sk_buff *skb)
-{
-       if (skb_queue_is_last(list, skb))
-               return NULL;
-       return skb->next;
-}
-
 /*
  * tipc_link_push_packets - push unsent packets to bearer
  *
@@ -909,30 +873,24 @@ struct sk_buff *tipc_skb_queue_next(const struct sk_buff_head *list,
  *
  * Called with node locked
  */
-void tipc_link_push_packets(struct tipc_link *l_ptr)
+void tipc_link_push_packets(struct tipc_link *link)
 {
-       struct sk_buff_head *outqueue = &l_ptr->outqueue;
-       struct sk_buff *skb = l_ptr->next_out;
+       struct sk_buff *skb;
        struct tipc_msg *msg;
-       u32 next, first;
+       unsigned int ack = mod(link->next_in_no - 1);
 
-       skb_queue_walk_from(outqueue, skb) {
-               msg = buf_msg(skb);
-               next = msg_seqno(msg);
-               first = buf_seqno(skb_peek(outqueue));
-
-               if (mod(next - first) < l_ptr->queue_limit[0]) {
-                       msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
-                       msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
-                       if (msg_user(msg) == MSG_BUNDLER)
-                               TIPC_SKB_CB(skb)->bundling = false;
-                       tipc_bearer_send(l_ptr->owner->net,
-                                        l_ptr->bearer_id, skb,
-                                        &l_ptr->media_addr);
-                       l_ptr->next_out = tipc_skb_queue_next(outqueue, skb);
-               } else {
+       while (skb_queue_len(&link->transmq) < link->window) {
+               skb = __skb_dequeue(&link->backlogq);
+               if (!skb)
                        break;
-               }
+               msg = buf_msg(skb);
+               link->backlog[msg_importance(msg)].len--;
+               msg_set_ack(msg, ack);
+               msg_set_bcast_ack(msg, link->owner->bclink.last_in);
+               link->rcv_unacked = 0;
+               __skb_queue_tail(&link->transmq, skb);
+               tipc_bearer_send(link->owner->net, link->bearer_id,
+                                skb, &link->media_addr);
        }
 }
 
@@ -979,7 +937,6 @@ static void link_retransmit_failure(struct tipc_link *l_ptr,
                        (unsigned long) TIPC_SKB_CB(buf)->handle);
 
                n_ptr = tipc_bclink_retransmit_to(net);
-               tipc_node_lock(n_ptr);
 
                tipc_addr_string_fill(addr_string, n_ptr->addr);
                pr_info("Broadcast link info for %s\n", addr_string);
@@ -991,9 +948,7 @@ static void link_retransmit_failure(struct tipc_link *l_ptr,
                        n_ptr->bclink.oos_state,
                        n_ptr->bclink.last_sent);
 
-               tipc_node_unlock(n_ptr);
-
-               tipc_bclink_set_flags(net, TIPC_BCLINK_RESET);
+               n_ptr->action_flags |= TIPC_BCAST_RESET;
                l_ptr->stale_count = 0;
        }
 }
@@ -1019,8 +974,8 @@ void tipc_link_retransmit(struct tipc_link *l_ptr, struct sk_buff *skb,
                l_ptr->stale_count = 1;
        }
 
-       skb_queue_walk_from(&l_ptr->outqueue, skb) {
-               if (!retransmits || skb == l_ptr->next_out)
+       skb_queue_walk_from(&l_ptr->transmq, skb) {
+               if (!retransmits)
                        break;
                msg = buf_msg(skb);
                msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
@@ -1032,72 +987,43 @@ void tipc_link_retransmit(struct tipc_link *l_ptr, struct sk_buff *skb,
        }
 }
 
-static void link_retrieve_defq(struct tipc_link *link,
-                              struct sk_buff_head *list)
-{
-       u32 seq_no;
-
-       if (skb_queue_empty(&link->deferred_queue))
-               return;
-
-       seq_no = buf_seqno(skb_peek(&link->deferred_queue));
-       if (seq_no == mod(link->next_in_no))
-               skb_queue_splice_tail_init(&link->deferred_queue, list);
-}
-
-/**
- * link_recv_buf_validate - validate basic format of received message
- *
- * This routine ensures a TIPC message has an acceptable header, and at least
- * as much data as the header indicates it should.  The routine also ensures
- * that the entire message header is stored in the main fragment of the message
- * buffer, to simplify future access to message header fields.
- *
- * Note: Having extra info present in the message header or data areas is OK.
- * TIPC will ignore the excess, under the assumption that it is optional info
- * introduced by a later release of the protocol.
+/* link_synch(): check if all packets arrived before the synch
+ *               point have been consumed
+ * Returns true if the parallel links are synched, otherwise false
  */
-static int link_recv_buf_validate(struct sk_buff *buf)
+static bool link_synch(struct tipc_link *l)
 {
-       static u32 min_data_hdr_size[8] = {
-               SHORT_H_SIZE, MCAST_H_SIZE, NAMED_H_SIZE, BASIC_H_SIZE,
-               MAX_H_SIZE, MAX_H_SIZE, MAX_H_SIZE, MAX_H_SIZE
-               };
+       unsigned int post_synch;
+       struct tipc_link *pl;
 
-       struct tipc_msg *msg;
-       u32 tipc_hdr[2];
-       u32 size;
-       u32 hdr_size;
-       u32 min_hdr_size;
+       pl  = tipc_parallel_link(l);
+       if (pl == l)
+               goto synched;
 
-       /* If this packet comes from the defer queue, the skb has already
-        * been validated
-        */
-       if (unlikely(TIPC_SKB_CB(buf)->deferred))
-               return 1;
-
-       if (unlikely(buf->len < MIN_H_SIZE))
-               return 0;
-
-       msg = skb_header_pointer(buf, 0, sizeof(tipc_hdr), tipc_hdr);
-       if (msg == NULL)
-               return 0;
+       /* Was last pre-synch packet added to input queue ? */
+       if (less_eq(pl->next_in_no, l->synch_point))
+               return false;
 
-       if (unlikely(msg_version(msg) != TIPC_VERSION))
-               return 0;
+       /* Is it still in the input queue ? */
+       post_synch = mod(pl->next_in_no - l->synch_point) - 1;
+       if (skb_queue_len(&pl->inputq) > post_synch)
+               return false;
+synched:
+       l->flags &= ~LINK_SYNCHING;
+       return true;
+}
 
-       size = msg_size(msg);
-       hdr_size = msg_hdr_sz(msg);
-       min_hdr_size = msg_isdata(msg) ?
-               min_data_hdr_size[msg_type(msg)] : INT_H_SIZE;
+static void link_retrieve_defq(struct tipc_link *link,
+                              struct sk_buff_head *list)
+{
+       u32 seq_no;
 
-       if (unlikely((hdr_size < min_hdr_size) ||
-                    (size < hdr_size) ||
-                    (buf->len < size) ||
-                    (size - hdr_size > TIPC_MAX_USER_MSG_SIZE)))
-               return 0;
+       if (skb_queue_empty(&link->deferdq))
+               return;
 
-       return pskb_may_pull(buf, hdr_size);
+       seq_no = buf_seqno(skb_peek(&link->deferdq));
+       if (seq_no == mod(link->next_in_no))
+               skb_queue_splice_tail_init(&link->deferdq, list);
 }
 
 /**
@@ -1125,16 +1051,11 @@ void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b_ptr)
 
        while ((skb = __skb_dequeue(&head))) {
                /* Ensure message is well-formed */
-               if (unlikely(!link_recv_buf_validate(skb)))
-                       goto discard;
-
-               /* Ensure message data is a single contiguous unit */
-               if (unlikely(skb_linearize(skb)))
+               if (unlikely(!tipc_msg_validate(skb)))
                        goto discard;
 
                /* Handle arrival of a non-unicast link message */
                msg = buf_msg(skb);
-
                if (unlikely(msg_non_seq(msg))) {
                        if (msg_user(msg) ==  LINK_CONFIG)
                                tipc_disc_rcv(net, skb, b_ptr);
@@ -1152,8 +1073,8 @@ void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b_ptr)
                n_ptr = tipc_node_find(net, msg_prevnode(msg));
                if (unlikely(!n_ptr))
                        goto discard;
-               tipc_node_lock(n_ptr);
 
+               tipc_node_lock(n_ptr);
                /* Locate unicast link endpoint that should handle message */
                l_ptr = n_ptr->links[b_ptr->identity];
                if (unlikely(!l_ptr))
@@ -1175,21 +1096,20 @@ void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b_ptr)
                ackd = msg_ack(msg);
 
                /* Release acked messages */
-               if (n_ptr->bclink.recv_permitted)
+               if (unlikely(n_ptr->bclink.acked != msg_bcast_ack(msg)))
                        tipc_bclink_acknowledge(n_ptr, msg_bcast_ack(msg));
 
                released = 0;
-               skb_queue_walk_safe(&l_ptr->outqueue, skb1, tmp) {
-                       if (skb1 == l_ptr->next_out ||
-                           more(buf_seqno(skb1), ackd))
+               skb_queue_walk_safe(&l_ptr->transmq, skb1, tmp) {
+                       if (more(buf_seqno(skb1), ackd))
                                break;
-                        __skb_unlink(skb1, &l_ptr->outqueue);
+                        __skb_unlink(skb1, &l_ptr->transmq);
                         kfree_skb(skb1);
                         released = 1;
                }
 
                /* Try sending any messages link endpoint has pending */
-               if (unlikely(l_ptr->next_out))
+               if (unlikely(skb_queue_len(&l_ptr->backlogq)))
                        tipc_link_push_packets(l_ptr);
 
                if (released && !skb_queue_empty(&l_ptr->wakeupq))
@@ -1223,18 +1143,26 @@ void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b_ptr)
                        skb = NULL;
                        goto unlock;
                }
+               /* Synchronize with parallel link if applicable */
+               if (unlikely((l_ptr->flags & LINK_SYNCHING) && !msg_dup(msg))) {
+                       link_handle_out_of_seq_msg(l_ptr, skb);
+                       if (link_synch(l_ptr))
+                               link_retrieve_defq(l_ptr, &head);
+                       skb = NULL;
+                       goto unlock;
+               }
                l_ptr->next_in_no++;
-               if (unlikely(!skb_queue_empty(&l_ptr->deferred_queue)))
+               if (unlikely(!skb_queue_empty(&l_ptr->deferdq)))
                        link_retrieve_defq(l_ptr, &head);
-
-               if (unlikely(++l_ptr->unacked_window >= TIPC_MIN_LINK_WIN)) {
+               if (unlikely(++l_ptr->rcv_unacked >= TIPC_MIN_LINK_WIN)) {
                        l_ptr->stats.sent_acks++;
-                       tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, 0, 0, 0, 0);
+                       tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, 0, 0, 0);
                }
                tipc_link_input(l_ptr, skb);
                skb = NULL;
 unlock:
                tipc_node_unlock(n_ptr);
+               tipc_node_put(n_ptr);
 discard:
                if (unlikely(skb))
                        kfree_skb(skb);
@@ -1271,7 +1199,7 @@ static bool tipc_data_input(struct tipc_link *link, struct sk_buff *skb)
                        node->action_flags |= TIPC_NAMED_MSG_EVT;
                return true;
        case MSG_BUNDLER:
-       case CHANGEOVER_PROTOCOL:
+       case TUNNEL_PROTOCOL:
        case MSG_FRAGMENTER:
        case BCAST_PROTOCOL:
                return false;
@@ -1298,8 +1226,14 @@ static void tipc_link_input(struct tipc_link *link, struct sk_buff *skb)
                return;
 
        switch (msg_user(msg)) {
-       case CHANGEOVER_PROTOCOL:
-               if (!tipc_link_tunnel_rcv(node, &skb))
+       case TUNNEL_PROTOCOL:
+               if (msg_dup(msg)) {
+                       link->flags |= LINK_SYNCHING;
+                       link->synch_point = msg_seqno(msg_get_wrapped(msg));
+                       kfree_skb(skb);
+                       break;
+               }
+               if (!tipc_link_failover_rcv(link, &skb))
                        break;
                if (msg_user(buf_msg(skb)) != MSG_BUNDLER) {
                        tipc_data_input(link, skb);
@@ -1394,11 +1328,10 @@ static void link_handle_out_of_seq_msg(struct tipc_link *l_ptr,
                return;
        }
 
-       if (tipc_link_defer_pkt(&l_ptr->deferred_queue, buf)) {
+       if (tipc_link_defer_pkt(&l_ptr->deferdq, buf)) {
                l_ptr->stats.deferred_recv++;
-               TIPC_SKB_CB(buf)->deferred = true;
-               if ((skb_queue_len(&l_ptr->deferred_queue) % 16) == 1)
-                       tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, 0, 0, 0, 0);
+               if ((skb_queue_len(&l_ptr->deferdq) % TIPC_MIN_LINK_WIN) == 1)
+                       tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, 0, 0, 0);
        } else {
                l_ptr->stats.duplicates++;
        }
@@ -1408,15 +1341,15 @@ static void link_handle_out_of_seq_msg(struct tipc_link *l_ptr,
  * Send protocol message to the other endpoint.
  */
 void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int probe_msg,
-                         u32 gap, u32 tolerance, u32 priority, u32 ack_mtu)
+                         u32 gap, u32 tolerance, u32 priority)
 {
        struct sk_buff *buf = NULL;
        struct tipc_msg *msg = l_ptr->pmsg;
        u32 msg_size = sizeof(l_ptr->proto_msg);
        int r_flag;
 
-       /* Don't send protocol message during link changeover */
-       if (l_ptr->exp_msg_count)
+       /* Don't send protocol message during link failover */
+       if (l_ptr->flags & LINK_FAILINGOVER)
                return;
 
        /* Abort non-RESET send if communication with node is prohibited */
@@ -1434,11 +1367,11 @@ void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int probe_msg,
 
                if (!tipc_link_is_up(l_ptr))
                        return;
-               if (l_ptr->next_out)
-                       next_sent = buf_seqno(l_ptr->next_out);
+               if (skb_queue_len(&l_ptr->backlogq))
+                       next_sent = buf_seqno(skb_peek(&l_ptr->backlogq));
                msg_set_next_sent(msg, next_sent);
-               if (!skb_queue_empty(&l_ptr->deferred_queue)) {
-                       u32 rec = buf_seqno(skb_peek(&l_ptr->deferred_queue));
+               if (!skb_queue_empty(&l_ptr->deferdq)) {
+                       u32 rec = buf_seqno(skb_peek(&l_ptr->deferdq));
                        gap = mod(rec - mod(l_ptr->next_in_no));
                }
                msg_set_seq_gap(msg, gap);
@@ -1446,35 +1379,20 @@ void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int probe_msg,
                        l_ptr->stats.sent_nacks++;
                msg_set_link_tolerance(msg, tolerance);
                msg_set_linkprio(msg, priority);
-               msg_set_max_pkt(msg, ack_mtu);
+               msg_set_max_pkt(msg, l_ptr->mtu);
                msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
                msg_set_probe(msg, probe_msg != 0);
-               if (probe_msg) {
-                       u32 mtu = l_ptr->max_pkt;
-
-                       if ((mtu < l_ptr->max_pkt_target) &&
-                           link_working_working(l_ptr) &&
-                           l_ptr->fsm_msg_cnt) {
-                               msg_size = (mtu + (l_ptr->max_pkt_target - mtu)/2 + 2) & ~3;
-                               if (l_ptr->max_pkt_probes == 10) {
-                                       l_ptr->max_pkt_target = (msg_size - 4);
-                                       l_ptr->max_pkt_probes = 0;
-                                       msg_size = (mtu + (l_ptr->max_pkt_target - mtu)/2 + 2) & ~3;
-                               }
-                               l_ptr->max_pkt_probes++;
-                       }
-
+               if (probe_msg)
                        l_ptr->stats.sent_probes++;
-               }
                l_ptr->stats.sent_states++;
        } else {                /* RESET_MSG or ACTIVATE_MSG */
-               msg_set_ack(msg, mod(l_ptr->reset_checkpoint - 1));
+               msg_set_ack(msg, mod(l_ptr->failover_checkpt - 1));
                msg_set_seq_gap(msg, 0);
                msg_set_next_sent(msg, 1);
                msg_set_probe(msg, 0);
                msg_set_link_tolerance(msg, l_ptr->tolerance);
                msg_set_linkprio(msg, l_ptr->priority);
-               msg_set_max_pkt(msg, l_ptr->max_pkt_target);
+               msg_set_max_pkt(msg, l_ptr->advertised_mtu);
        }
 
        r_flag = (l_ptr->owner->working_links > tipc_link_is_up(l_ptr));
@@ -1490,10 +1408,9 @@ void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int probe_msg,
 
        skb_copy_to_linear_data(buf, msg, sizeof(l_ptr->proto_msg));
        buf->priority = TC_PRIO_CONTROL;
-
        tipc_bearer_send(l_ptr->owner->net, l_ptr->bearer_id, buf,
                         &l_ptr->media_addr);
-       l_ptr->unacked_window = 0;
+       l_ptr->rcv_unacked = 0;
        kfree_skb(buf);
 }
 
@@ -1506,13 +1423,10 @@ static void tipc_link_proto_rcv(struct tipc_link *l_ptr,
                                struct sk_buff *buf)
 {
        u32 rec_gap = 0;
-       u32 max_pkt_info;
-       u32 max_pkt_ack;
        u32 msg_tol;
        struct tipc_msg *msg = buf_msg(buf);
 
-       /* Discard protocol message during link changeover */
-       if (l_ptr->exp_msg_count)
+       if (l_ptr->flags & LINK_FAILINGOVER)
                goto exit;
 
        if (l_ptr->net_plane != msg_net_plane(msg))
@@ -1551,15 +1465,8 @@ static void tipc_link_proto_rcv(struct tipc_link *l_ptr,
                if (msg_linkprio(msg) > l_ptr->priority)
                        l_ptr->priority = msg_linkprio(msg);
 
-               max_pkt_info = msg_max_pkt(msg);
-               if (max_pkt_info) {
-                       if (max_pkt_info < l_ptr->max_pkt_target)
-                               l_ptr->max_pkt_target = max_pkt_info;
-                       if (l_ptr->max_pkt > l_ptr->max_pkt_target)
-                               l_ptr->max_pkt = l_ptr->max_pkt_target;
-               } else {
-                       l_ptr->max_pkt = l_ptr->max_pkt_target;
-               }
+               if (l_ptr->mtu > msg_max_pkt(msg))
+                       l_ptr->mtu = msg_max_pkt(msg);
 
                /* Synchronize broadcast link info, if not done previously */
                if (!tipc_node_is_up(l_ptr->owner)) {
@@ -1604,18 +1511,8 @@ static void tipc_link_proto_rcv(struct tipc_link *l_ptr,
                                      mod(l_ptr->next_in_no));
                }
 
-               max_pkt_ack = msg_max_pkt(msg);
-               if (max_pkt_ack > l_ptr->max_pkt) {
-                       l_ptr->max_pkt = max_pkt_ack;
-                       l_ptr->max_pkt_probes = 0;
-               }
-
-               max_pkt_ack = 0;
-               if (msg_probe(msg)) {
+               if (msg_probe(msg))
                        l_ptr->stats.recv_probes++;
-                       if (msg_size(msg) > sizeof(l_ptr->proto_msg))
-                               max_pkt_ack = msg_size(msg);
-               }
 
                /* Protocol message before retransmits, reduce loss risk */
                if (l_ptr->owner->bclink.recv_permitted)
@@ -1623,12 +1520,12 @@ static void tipc_link_proto_rcv(struct tipc_link *l_ptr,
                                                      msg_last_bcast(msg));
 
                if (rec_gap || (msg_probe(msg))) {
-                       tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, rec_gap, 0,
-                                            0, max_pkt_ack);
+                       tipc_link_proto_xmit(l_ptr, STATE_MSG, 0,
+                                            rec_gap, 0, 0);
                }
                if (msg_seq_gap(msg)) {
                        l_ptr->stats.recv_nacks++;
-                       tipc_link_retransmit(l_ptr, skb_peek(&l_ptr->outqueue),
+                       tipc_link_retransmit(l_ptr, skb_peek(&l_ptr->transmq),
                                             msg_seq_gap(msg));
                }
                break;
@@ -1675,7 +1572,7 @@ static void tipc_link_tunnel_xmit(struct tipc_link *l_ptr,
  */
 void tipc_link_failover_send_queue(struct tipc_link *l_ptr)
 {
-       u32 msgcount = skb_queue_len(&l_ptr->outqueue);
+       int msgcount;
        struct tipc_link *tunnel = l_ptr->owner->active_links[0];
        struct tipc_msg tunnel_hdr;
        struct sk_buff *skb;
@@ -1684,12 +1581,15 @@ void tipc_link_failover_send_queue(struct tipc_link *l_ptr)
        if (!tunnel)
                return;
 
-       tipc_msg_init(link_own_addr(l_ptr), &tunnel_hdr, CHANGEOVER_PROTOCOL,
-                     ORIGINAL_MSG, INT_H_SIZE, l_ptr->addr);
+       tipc_msg_init(link_own_addr(l_ptr), &tunnel_hdr, TUNNEL_PROTOCOL,
+                     FAILOVER_MSG, INT_H_SIZE, l_ptr->addr);
+       skb_queue_splice_tail_init(&l_ptr->backlogq, &l_ptr->transmq);
+       tipc_link_purge_backlog(l_ptr);
+       msgcount = skb_queue_len(&l_ptr->transmq);
        msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id);
        msg_set_msgcnt(&tunnel_hdr, msgcount);
 
-       if (skb_queue_empty(&l_ptr->outqueue)) {
+       if (skb_queue_empty(&l_ptr->transmq)) {
                skb = tipc_buf_acquire(INT_H_SIZE);
                if (skb) {
                        skb_copy_to_linear_data(skb, &tunnel_hdr, INT_H_SIZE);
@@ -1705,7 +1605,7 @@ void tipc_link_failover_send_queue(struct tipc_link *l_ptr)
        split_bundles = (l_ptr->owner->active_links[0] !=
                         l_ptr->owner->active_links[1]);
 
-       skb_queue_walk(&l_ptr->outqueue, skb) {
+       skb_queue_walk(&l_ptr->transmq, skb) {
                struct tipc_msg *msg = buf_msg(skb);
 
                if ((msg_user(msg) == MSG_BUNDLER) && split_bundles) {
@@ -1736,157 +1636,105 @@ void tipc_link_failover_send_queue(struct tipc_link *l_ptr)
  * and sequence order is preserved per sender/receiver socket pair.
  * Owner node is locked.
  */
-void tipc_link_dup_queue_xmit(struct tipc_link *l_ptr,
-                             struct tipc_link *tunnel)
+void tipc_link_dup_queue_xmit(struct tipc_link *link,
+                             struct tipc_link *tnl)
 {
        struct sk_buff *skb;
-       struct tipc_msg tunnel_hdr;
-
-       tipc_msg_init(link_own_addr(l_ptr), &tunnel_hdr, CHANGEOVER_PROTOCOL,
-                     DUPLICATE_MSG, INT_H_SIZE, l_ptr->addr);
-       msg_set_msgcnt(&tunnel_hdr, skb_queue_len(&l_ptr->outqueue));
-       msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id);
-       skb_queue_walk(&l_ptr->outqueue, skb) {
+       struct tipc_msg tnl_hdr;
+       struct sk_buff_head *queue = &link->transmq;
+       int mcnt;
+
+       tipc_msg_init(link_own_addr(link), &tnl_hdr, TUNNEL_PROTOCOL,
+                     SYNCH_MSG, INT_H_SIZE, link->addr);
+       mcnt = skb_queue_len(&link->transmq) + skb_queue_len(&link->backlogq);
+       msg_set_msgcnt(&tnl_hdr, mcnt);
+       msg_set_bearer_id(&tnl_hdr, link->peer_bearer_id);
+
+tunnel_queue:
+       skb_queue_walk(queue, skb) {
                struct sk_buff *outskb;
                struct tipc_msg *msg = buf_msg(skb);
-               u32 length = msg_size(msg);
+               u32 len = msg_size(msg);
 
-               if (msg_user(msg) == MSG_BUNDLER)
-                       msg_set_type(msg, CLOSED_MSG);
-               msg_set_ack(msg, mod(l_ptr->next_in_no - 1));   /* Update */
-               msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
-               msg_set_size(&tunnel_hdr, length + INT_H_SIZE);
-               outskb = tipc_buf_acquire(length + INT_H_SIZE);
+               msg_set_ack(msg, mod(link->next_in_no - 1));
+               msg_set_bcast_ack(msg, link->owner->bclink.last_in);
+               msg_set_size(&tnl_hdr, len + INT_H_SIZE);
+               outskb = tipc_buf_acquire(len + INT_H_SIZE);
                if (outskb == NULL) {
                        pr_warn("%sunable to send duplicate msg\n",
                                link_co_err);
                        return;
                }
-               skb_copy_to_linear_data(outskb, &tunnel_hdr, INT_H_SIZE);
-               skb_copy_to_linear_data_offset(outskb, INT_H_SIZE, skb->data,
-                                              length);
-               __tipc_link_xmit_skb(tunnel, outskb);
-               if (!tipc_link_is_up(l_ptr))
+               skb_copy_to_linear_data(outskb, &tnl_hdr, INT_H_SIZE);
+               skb_copy_to_linear_data_offset(outskb, INT_H_SIZE,
+                                              skb->data, len);
+               __tipc_link_xmit_skb(tnl, outskb);
+               if (!tipc_link_is_up(link))
                        return;
        }
-}
-
-/**
- * buf_extract - extracts embedded TIPC message from another message
- * @skb: encapsulating message buffer
- * @from_pos: offset to extract from
- *
- * Returns a new message buffer containing an embedded message.  The
- * encapsulating buffer is left unchanged.
- */
-static struct sk_buff *buf_extract(struct sk_buff *skb, u32 from_pos)
-{
-       struct tipc_msg *msg = (struct tipc_msg *)(skb->data + from_pos);
-       u32 size = msg_size(msg);
-       struct sk_buff *eb;
-
-       eb = tipc_buf_acquire(size);
-       if (eb)
-               skb_copy_to_linear_data(eb, msg, size);
-       return eb;
-}
-
-/* tipc_link_dup_rcv(): Receive a tunnelled DUPLICATE_MSG packet.
- * Owner node is locked.
- */
-static void tipc_link_dup_rcv(struct tipc_link *l_ptr,
-                             struct sk_buff *t_buf)
-{
-       struct sk_buff *buf;
-
-       if (!tipc_link_is_up(l_ptr))
+       if (queue == &link->backlogq)
                return;
-
-       buf = buf_extract(t_buf, INT_H_SIZE);
-       if (buf == NULL) {
-               pr_warn("%sfailed to extract inner dup pkt\n", link_co_err);
-               return;
-       }
-
-       /* Add buffer to deferred queue, if applicable: */
-       link_handle_out_of_seq_msg(l_ptr, buf);
+       queue = &link->backlogq;
+       goto tunnel_queue;
 }
 
-/*  tipc_link_failover_rcv(): Receive a tunnelled ORIGINAL_MSG packet
+/*  tipc_link_failover_rcv(): Receive a tunnelled FAILOVER_MSG packet
  *  Owner node is locked.
  */
-static struct sk_buff *tipc_link_failover_rcv(struct tipc_link *l_ptr,
-                                             struct sk_buff *t_buf)
+static bool tipc_link_failover_rcv(struct tipc_link *link,
+                                  struct sk_buff **skb)
 {
-       struct tipc_msg *t_msg = buf_msg(t_buf);
-       struct sk_buff *buf = NULL;
-       struct tipc_msg *msg;
-
-       if (tipc_link_is_up(l_ptr))
-               tipc_link_reset(l_ptr);
-
-       /* First failover packet? */
-       if (l_ptr->exp_msg_count == START_CHANGEOVER)
-               l_ptr->exp_msg_count = msg_msgcnt(t_msg);
-
-       /* Should there be an inner packet? */
-       if (l_ptr->exp_msg_count) {
-               l_ptr->exp_msg_count--;
-               buf = buf_extract(t_buf, INT_H_SIZE);
-               if (buf == NULL) {
-                       pr_warn("%sno inner failover pkt\n", link_co_err);
-                       goto exit;
-               }
-               msg = buf_msg(buf);
+       struct tipc_msg *msg = buf_msg(*skb);
+       struct sk_buff *iskb = NULL;
+       struct tipc_link *pl = NULL;
+       int bearer_id = msg_bearer_id(msg);
+       int pos = 0;
 
-               if (less(msg_seqno(msg), l_ptr->reset_checkpoint)) {
-                       kfree_skb(buf);
-                       buf = NULL;
-                       goto exit;
-               }
-               if (msg_user(msg) == MSG_FRAGMENTER) {
-                       l_ptr->stats.recv_fragments++;
-                       tipc_buf_append(&l_ptr->reasm_buf, &buf);
-               }
+       if (msg_type(msg) != FAILOVER_MSG) {
+               pr_warn("%sunknown tunnel pkt received\n", link_co_err);
+               goto exit;
        }
-exit:
-       if ((!l_ptr->exp_msg_count) && (l_ptr->flags & LINK_STOPPED))
-               tipc_link_delete(l_ptr);
-       return buf;
-}
+       if (bearer_id >= MAX_BEARERS)
+               goto exit;
 
-/*  tipc_link_tunnel_rcv(): Receive a tunnelled packet, sent
- *  via other link as result of a failover (ORIGINAL_MSG) or
- *  a new active link (DUPLICATE_MSG). Failover packets are
- *  returned to the active link for delivery upwards.
- *  Owner node is locked.
- */
-static int tipc_link_tunnel_rcv(struct tipc_node *n_ptr,
-                               struct sk_buff **buf)
-{
-       struct sk_buff *t_buf = *buf;
-       struct tipc_link *l_ptr;
-       struct tipc_msg *t_msg = buf_msg(t_buf);
-       u32 bearer_id = msg_bearer_id(t_msg);
+       if (bearer_id == link->bearer_id)
+               goto exit;
 
-       *buf = NULL;
+       pl = link->owner->links[bearer_id];
+       if (pl && tipc_link_is_up(pl))
+               tipc_link_reset(pl);
 
-       if (bearer_id >= MAX_BEARERS)
+       if (link->failover_pkts == FIRST_FAILOVER)
+               link->failover_pkts = msg_msgcnt(msg);
+
+       /* Should we expect an inner packet? */
+       if (!link->failover_pkts)
                goto exit;
 
-       l_ptr = n_ptr->links[bearer_id];
-       if (!l_ptr)
+       if (!tipc_msg_extract(*skb, &iskb, &pos)) {
+               pr_warn("%sno inner failover pkt\n", link_co_err);
+               *skb = NULL;
                goto exit;
+       }
+       link->failover_pkts--;
+       *skb = NULL;
 
-       if (msg_type(t_msg) == DUPLICATE_MSG)
-               tipc_link_dup_rcv(l_ptr, t_buf);
-       else if (msg_type(t_msg) == ORIGINAL_MSG)
-               *buf = tipc_link_failover_rcv(l_ptr, t_buf);
-       else
-               pr_warn("%sunknown tunnel pkt received\n", link_co_err);
+       /* Was this packet already delivered? */
+       if (less(buf_seqno(iskb), link->failover_checkpt)) {
+               kfree_skb(iskb);
+               iskb = NULL;
+               goto exit;
+       }
+       if (msg_user(buf_msg(iskb)) == MSG_FRAGMENTER) {
+               link->stats.recv_fragments++;
+               tipc_buf_append(&link->failover_skb, &iskb);
+       }
 exit:
-       kfree_skb(t_buf);
-       return *buf != NULL;
+       if (!link->failover_pkts && pl)
+               pl->flags &= ~LINK_FAILINGOVER;
+       kfree_skb(*skb);
+       *skb = iskb;
+       return *skb;
 }
 
 static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tol)
@@ -1901,23 +1749,16 @@ static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tol)
        l_ptr->abort_limit = tol / (jiffies_to_msecs(l_ptr->cont_intv) / 4);
 }
 
-void tipc_link_set_queue_limits(struct tipc_link *l_ptr, u32 window)
+void tipc_link_set_queue_limits(struct tipc_link *l, u32 win)
 {
-       /* Data messages from this node, inclusive FIRST_FRAGM */
-       l_ptr->queue_limit[TIPC_LOW_IMPORTANCE] = window;
-       l_ptr->queue_limit[TIPC_MEDIUM_IMPORTANCE] = (window / 3) * 4;
-       l_ptr->queue_limit[TIPC_HIGH_IMPORTANCE] = (window / 3) * 5;
-       l_ptr->queue_limit[TIPC_CRITICAL_IMPORTANCE] = (window / 3) * 6;
-       /* Transiting data messages,inclusive FIRST_FRAGM */
-       l_ptr->queue_limit[TIPC_LOW_IMPORTANCE + 4] = 300;
-       l_ptr->queue_limit[TIPC_MEDIUM_IMPORTANCE + 4] = 600;
-       l_ptr->queue_limit[TIPC_HIGH_IMPORTANCE + 4] = 900;
-       l_ptr->queue_limit[TIPC_CRITICAL_IMPORTANCE + 4] = 1200;
-       l_ptr->queue_limit[CONN_MANAGER] = 1200;
-       l_ptr->queue_limit[CHANGEOVER_PROTOCOL] = 2500;
-       l_ptr->queue_limit[NAME_DISTRIBUTOR] = 3000;
-       /* FRAGMENT and LAST_FRAGMENT packets */
-       l_ptr->queue_limit[MSG_FRAGMENTER] = 4000;
+       int max_bulk = TIPC_MAX_PUBLICATIONS / (l->mtu / ITEM_SIZE);
+
+       l->window = win;
+       l->backlog[TIPC_LOW_IMPORTANCE].limit      = win / 2;
+       l->backlog[TIPC_MEDIUM_IMPORTANCE].limit   = win;
+       l->backlog[TIPC_HIGH_IMPORTANCE].limit     = win / 2 * 3;
+       l->backlog[TIPC_CRITICAL_IMPORTANCE].limit = win * 2;
+       l->backlog[TIPC_SYSTEM_IMPORTANCE].limit   = max_bulk;
 }
 
 /* tipc_link_find_owner - locate owner node of link by link's name
@@ -2082,14 +1923,14 @@ int tipc_nl_link_set(struct sk_buff *skb, struct genl_info *info)
 
                        tol = nla_get_u32(props[TIPC_NLA_PROP_TOL]);
                        link_set_supervision_props(link, tol);
-                       tipc_link_proto_xmit(link, STATE_MSG, 0, 0, tol, 0, 0);
+                       tipc_link_proto_xmit(link, STATE_MSG, 0, 0, tol, 0);
                }
                if (props[TIPC_NLA_PROP_PRIO]) {
                        u32 prio;
 
                        prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]);
                        link->priority = prio;
-                       tipc_link_proto_xmit(link, STATE_MSG, 0, 0, 0, prio, 0);
+                       tipc_link_proto_xmit(link, STATE_MSG, 0, 0, 0, prio);
                }
                if (props[TIPC_NLA_PROP_WIN]) {
                        u32 win;
@@ -2194,7 +2035,7 @@ static int __tipc_nl_add_link(struct net *net, struct tipc_nl_msg *msg,
        if (nla_put_u32(msg->skb, TIPC_NLA_LINK_DEST,
                        tipc_cluster_mask(tn->own_addr)))
                goto attr_msg_full;
-       if (nla_put_u32(msg->skb, TIPC_NLA_LINK_MTU, link->max_pkt))
+       if (nla_put_u32(msg->skb, TIPC_NLA_LINK_MTU, link->mtu))
                goto attr_msg_full;
        if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, link->next_in_no))
                goto attr_msg_full;
@@ -2216,7 +2057,7 @@ static int __tipc_nl_add_link(struct net *net, struct tipc_nl_msg *msg,
        if (nla_put_u32(msg->skb, TIPC_NLA_PROP_TOL, link->tolerance))
                goto prop_msg_full;
        if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN,
-                       link->queue_limit[TIPC_LOW_IMPORTANCE]))
+                       link->window))
                goto prop_msg_full;
        if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, link->priority))
                goto prop_msg_full;
@@ -2282,7 +2123,6 @@ int tipc_nl_link_dump(struct sk_buff *skb, struct netlink_callback *cb)
        msg.seq = cb->nlh->nlmsg_seq;
 
        rcu_read_lock();
-
        if (prev_node) {
                node = tipc_node_find(net, prev_node);
                if (!node) {
@@ -2295,6 +2135,7 @@ int tipc_nl_link_dump(struct sk_buff *skb, struct netlink_callback *cb)
                        cb->prev_seq = 1;
                        goto out;
                }
+               tipc_node_put(node);
 
                list_for_each_entry_continue_rcu(node, &tn->node_list,
                                                 list) {
@@ -2302,6 +2143,7 @@ int tipc_nl_link_dump(struct sk_buff *skb, struct netlink_callback *cb)
                        err = __tipc_nl_add_node_links(net, &msg, node,
                                                       &prev_link);
                        tipc_node_unlock(node);
+                       tipc_node_put(node);
                        if (err)
                                goto out;
 
index 7aeb52092bf35ffdead0a65c0516785c17fc5a77..b5b4e3554d4e896873eba6c58ccb3ec48f712025 100644 (file)
 
 /* Link endpoint execution states
  */
-#define LINK_STARTED    0x0001
-#define LINK_STOPPED    0x0002
+#define LINK_STARTED     0x0001
+#define LINK_STOPPED     0x0002
+#define LINK_SYNCHING    0x0004
+#define LINK_FAILINGOVER 0x0008
 
 /* Starting value for maximum packet size negotiation on unicast links
  * (unless bearer MTU is less)
@@ -118,13 +120,13 @@ struct tipc_stats {
  * @pmsg: convenience pointer to "proto_msg" field
  * @priority: current link priority
  * @net_plane: current link network plane ('A' through 'H')
- * @queue_limit: outbound message queue congestion thresholds (indexed by user)
+ * @backlog_limit: backlog queue congestion thresholds (indexed by importance)
  * @exp_msg_count: # of tunnelled messages expected during link changeover
  * @reset_checkpoint: seq # of last acknowledged message at time of link reset
- * @max_pkt: current maximum packet size for this link
- * @max_pkt_target: desired maximum packet size for this link
- * @max_pkt_probes: # of probes based on current (max_pkt, max_pkt_target)
- * @outqueue: outbound message queue
+ * @mtu: current maximum packet size for this link
+ * @advertised_mtu: advertised own mtu when link is being established
+ * @transmitq: queue for sent, non-acked messages
+ * @backlogq: queue for messages waiting to be sent
  * @next_out_no: next sequence number to use for outbound messages
  * @last_retransmitted: sequence number of most recently retransmitted message
  * @stale_count: # of identical retransmit requests made by peer
@@ -165,36 +167,40 @@ struct tipc_link {
        struct tipc_msg *pmsg;
        u32 priority;
        char net_plane;
-       u32 queue_limit[15];    /* queue_limit[0]==window limit */
+       u16 synch_point;
 
-       /* Changeover */
-       u32 exp_msg_count;
-       u32 reset_checkpoint;
+       /* Failover */
+       u16 failover_pkts;
+       u16 failover_checkpt;
+       struct sk_buff *failover_skb;
 
        /* Max packet negotiation */
-       u32 max_pkt;
-       u32 max_pkt_target;
-       u32 max_pkt_probes;
+       u16 mtu;
+       u16 advertised_mtu;
 
        /* Sending */
-       struct sk_buff_head outqueue;
+       struct sk_buff_head transmq;
+       struct sk_buff_head backlogq;
+       struct {
+               u16 len;
+               u16 limit;
+       } backlog[5];
        u32 next_out_no;
+       u32 window;
        u32 last_retransmitted;
        u32 stale_count;
 
        /* Reception */
        u32 next_in_no;
-       struct sk_buff_head deferred_queue;
-       u32 unacked_window;
+       u32 rcv_unacked;
+       struct sk_buff_head deferdq;
        struct sk_buff_head inputq;
        struct sk_buff_head namedq;
 
        /* Congestion handling */
-       struct sk_buff *next_out;
        struct sk_buff_head wakeupq;
 
        /* Fragmentation/reassembly */
-       u32 long_msg_seq_no;
        struct sk_buff *reasm_buf;
 
        /* Statistics */
@@ -225,7 +231,7 @@ int tipc_link_xmit(struct net *net, struct sk_buff_head *list, u32 dest,
 int __tipc_link_xmit(struct net *net, struct tipc_link *link,
                     struct sk_buff_head *list);
 void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int prob,
-                         u32 gap, u32 tolerance, u32 priority, u32 acked_mtu);
+                         u32 gap, u32 tolerance, u32 priority);
 void tipc_link_push_packets(struct tipc_link *l_ptr);
 u32 tipc_link_defer_pkt(struct sk_buff_head *list, struct sk_buff *buf);
 void tipc_link_set_queue_limits(struct tipc_link *l_ptr, u32 window);
@@ -302,9 +308,4 @@ static inline int link_reset_reset(struct tipc_link *l_ptr)
        return l_ptr->state == RESET_RESET;
 }
 
-static inline int link_congested(struct tipc_link *l_ptr)
-{
-       return skb_queue_len(&l_ptr->outqueue) >= l_ptr->queue_limit[0];
-}
-
 #endif
index b6eb90cd3ef7053ffe8d73143a53b8021e2770d3..c3e96e8154188af27c0d5fc545fe95c54a29e9e9 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * net/tipc/msg.c: TIPC message header routines
  *
- * Copyright (c) 2000-2006, 2014, Ericsson AB
+ * Copyright (c) 2000-2006, 2014-2015, Ericsson AB
  * Copyright (c) 2005, 2010-2011, Wind River Systems
  * All rights reserved.
  *
@@ -165,6 +165,9 @@ int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf)
        }
 
        if (fragid == LAST_FRAGMENT) {
+               TIPC_SKB_CB(head)->validated = false;
+               if (unlikely(!tipc_msg_validate(head)))
+                       goto err;
                *buf = head;
                TIPC_SKB_CB(head)->tail = NULL;
                *headbuf = NULL;
@@ -172,7 +175,6 @@ int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf)
        }
        *buf = NULL;
        return 0;
-
 err:
        pr_warn_ratelimited("Unable to build fragment list\n");
        kfree_skb(*buf);
@@ -181,6 +183,48 @@ err:
        return 0;
 }
 
+/* tipc_msg_validate - validate basic format of received message
+ *
+ * This routine ensures a TIPC message has an acceptable header, and at least
+ * as much data as the header indicates it should.  The routine also ensures
+ * that the entire message header is stored in the main fragment of the message
+ * buffer, to simplify future access to message header fields.
+ *
+ * Note: Having extra info present in the message header or data areas is OK.
+ * TIPC will ignore the excess, under the assumption that it is optional info
+ * introduced by a later release of the protocol.
+ */
+bool tipc_msg_validate(struct sk_buff *skb)
+{
+       struct tipc_msg *msg;
+       int msz, hsz;
+
+       if (unlikely(TIPC_SKB_CB(skb)->validated))
+               return true;
+       if (unlikely(!pskb_may_pull(skb, MIN_H_SIZE)))
+               return false;
+
+       hsz = msg_hdr_sz(buf_msg(skb));
+       if (unlikely(hsz < MIN_H_SIZE) || (hsz > MAX_H_SIZE))
+               return false;
+       if (unlikely(!pskb_may_pull(skb, hsz)))
+               return false;
+
+       msg = buf_msg(skb);
+       if (unlikely(msg_version(msg) != TIPC_VERSION))
+               return false;
+
+       msz = msg_size(msg);
+       if (unlikely(msz < hsz))
+               return false;
+       if (unlikely((msz - hsz) > TIPC_MAX_USER_MSG_SIZE))
+               return false;
+       if (unlikely(skb->len < msz))
+               return false;
+
+       TIPC_SKB_CB(skb)->validated = true;
+       return true;
+}
 
 /**
  * tipc_msg_build - create buffer chain containing specified header and data
@@ -228,6 +272,7 @@ int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m,
                      FIRST_FRAGMENT, INT_H_SIZE, msg_destnode(mhdr));
        msg_set_size(&pkthdr, pktmax);
        msg_set_fragm_no(&pkthdr, pktno);
+       msg_set_importance(&pkthdr, msg_importance(mhdr));
 
        /* Prepare first fragment */
        skb = tipc_buf_acquire(pktmax);
@@ -286,33 +331,36 @@ error:
 
 /**
  * tipc_msg_bundle(): Append contents of a buffer to tail of an existing one
- * @list: the buffer chain of the existing buffer ("bundle")
+ * @bskb: the buffer to append to ("bundle")
  * @skb:  buffer to be appended
  * @mtu:  max allowable size for the bundle buffer
  * Consumes buffer if successful
  * Returns true if bundling could be performed, otherwise false
  */
-bool tipc_msg_bundle(struct sk_buff_head *list, struct sk_buff *skb, u32 mtu)
+bool tipc_msg_bundle(struct sk_buff *bskb, struct sk_buff *skb, u32 mtu)
 {
-       struct sk_buff *bskb = skb_peek_tail(list);
-       struct tipc_msg *bmsg = buf_msg(bskb);
+       struct tipc_msg *bmsg;
        struct tipc_msg *msg = buf_msg(skb);
-       unsigned int bsz = msg_size(bmsg);
+       unsigned int bsz;
        unsigned int msz = msg_size(msg);
-       u32 start = align(bsz);
+       u32 start, pad;
        u32 max = mtu - INT_H_SIZE;
-       u32 pad = start - bsz;
 
        if (likely(msg_user(msg) == MSG_FRAGMENTER))
                return false;
-       if (unlikely(msg_user(msg) == CHANGEOVER_PROTOCOL))
+       if (!bskb)
+               return false;
+       bmsg = buf_msg(bskb);
+       bsz = msg_size(bmsg);
+       start = align(bsz);
+       pad = start - bsz;
+
+       if (unlikely(msg_user(msg) == TUNNEL_PROTOCOL))
                return false;
        if (unlikely(msg_user(msg) == BCAST_PROTOCOL))
                return false;
        if (likely(msg_user(bmsg) != MSG_BUNDLER))
                return false;
-       if (likely(!TIPC_SKB_CB(bskb)->bundling))
-               return false;
        if (unlikely(skb_tailroom(bskb) < (pad + msz)))
                return false;
        if (unlikely(max < (start + msz)))
@@ -328,34 +376,40 @@ bool tipc_msg_bundle(struct sk_buff_head *list, struct sk_buff *skb, u32 mtu)
 
 /**
  *  tipc_msg_extract(): extract bundled inner packet from buffer
- *  @skb: linear outer buffer, to be extracted from.
+ *  @skb: buffer to be extracted from.
  *  @iskb: extracted inner buffer, to be returned
- *  @pos: position of msg to be extracted. Returns with pointer of next msg
+ *  @pos: position in outer message of msg to be extracted.
+ *        Returns position of next msg
  *  Consumes outer buffer when last packet extracted
  *  Returns true when when there is an extracted buffer, otherwise false
  */
 bool tipc_msg_extract(struct sk_buff *skb, struct sk_buff **iskb, int *pos)
 {
-       struct tipc_msg *msg = buf_msg(skb);
-       int imsz;
-       struct tipc_msg *imsg = (struct tipc_msg *)(msg_data(msg) + *pos);
+       struct tipc_msg *msg;
+       int imsz, offset;
 
-       /* Is there space left for shortest possible message? */
-       if (*pos > (msg_data_sz(msg) - SHORT_H_SIZE))
+       *iskb = NULL;
+       if (unlikely(skb_linearize(skb)))
+               goto none;
+
+       msg = buf_msg(skb);
+       offset = msg_hdr_sz(msg) + *pos;
+       if (unlikely(offset > (msg_size(msg) - MIN_H_SIZE)))
                goto none;
-       imsz = msg_size(imsg);
 
-       /* Is there space left for current message ? */
-       if ((*pos + imsz) > msg_data_sz(msg))
+       *iskb = skb_clone(skb, GFP_ATOMIC);
+       if (unlikely(!*iskb))
                goto none;
-       *iskb = tipc_buf_acquire(imsz);
-       if (!*iskb)
+       skb_pull(*iskb, offset);
+       imsz = msg_size(buf_msg(*iskb));
+       skb_trim(*iskb, imsz);
+       if (unlikely(!tipc_msg_validate(*iskb)))
                goto none;
-       skb_copy_to_linear_data(*iskb, imsg, imsz);
        *pos += align(imsz);
        return true;
 none:
        kfree_skb(skb);
+       kfree_skb(*iskb);
        *iskb = NULL;
        return false;
 }
@@ -369,18 +423,17 @@ none:
  * Replaces buffer if successful
  * Returns true if success, otherwise false
  */
-bool tipc_msg_make_bundle(struct sk_buff_head *list,
-                         struct sk_buff *skb, u32 mtu, u32 dnode)
+bool tipc_msg_make_bundle(struct sk_buff **skb, u32 mtu, u32 dnode)
 {
        struct sk_buff *bskb;
        struct tipc_msg *bmsg;
-       struct tipc_msg *msg = buf_msg(skb);
+       struct tipc_msg *msg = buf_msg(*skb);
        u32 msz = msg_size(msg);
        u32 max = mtu - INT_H_SIZE;
 
        if (msg_user(msg) == MSG_FRAGMENTER)
                return false;
-       if (msg_user(msg) == CHANGEOVER_PROTOCOL)
+       if (msg_user(msg) == TUNNEL_PROTOCOL)
                return false;
        if (msg_user(msg) == BCAST_PROTOCOL)
                return false;
@@ -398,9 +451,9 @@ bool tipc_msg_make_bundle(struct sk_buff_head *list,
        msg_set_seqno(bmsg, msg_seqno(msg));
        msg_set_ack(bmsg, msg_ack(msg));
        msg_set_bcast_ack(bmsg, msg_bcast_ack(msg));
-       TIPC_SKB_CB(bskb)->bundling = true;
-       __skb_queue_tail(list, bskb);
-       return tipc_msg_bundle(list, skb, mtu);
+       tipc_msg_bundle(bskb, *skb, mtu);
+       *skb = bskb;
+       return true;
 }
 
 /**
@@ -415,21 +468,17 @@ bool tipc_msg_reverse(u32 own_addr,  struct sk_buff *buf, u32 *dnode,
                      int err)
 {
        struct tipc_msg *msg = buf_msg(buf);
-       uint imp = msg_importance(msg);
        struct tipc_msg ohdr;
        uint rdsz = min_t(uint, msg_data_sz(msg), MAX_FORWARD_SIZE);
 
        if (skb_linearize(buf))
                goto exit;
+       msg = buf_msg(buf);
        if (msg_dest_droppable(msg))
                goto exit;
        if (msg_errcode(msg))
                goto exit;
-
        memcpy(&ohdr, msg, msg_hdr_sz(msg));
-       imp = min_t(uint, imp + 1, TIPC_CRITICAL_IMPORTANCE);
-       if (msg_isdata(msg))
-               msg_set_importance(msg, imp);
        msg_set_errcode(msg, err);
        msg_set_origport(msg, msg_destport(&ohdr));
        msg_set_destport(msg, msg_origport(&ohdr));
@@ -462,15 +511,18 @@ bool tipc_msg_lookup_dest(struct net *net, struct sk_buff *skb,
 {
        struct tipc_msg *msg = buf_msg(skb);
        u32 dport;
+       u32 own_addr = tipc_own_addr(net);
 
        if (!msg_isdata(msg))
                return false;
        if (!msg_named(msg))
                return false;
+       if (msg_errcode(msg))
+               return false;
        *err = -TIPC_ERR_NO_NAME;
        if (skb_linearize(skb))
                return false;
-       if (msg_reroute_cnt(msg) > 0)
+       if (msg_reroute_cnt(msg))
                return false;
        *dnode = addr_domain(net, msg_lookup_scope(msg));
        dport = tipc_nametbl_translate(net, msg_nametype(msg),
@@ -478,6 +530,8 @@ bool tipc_msg_lookup_dest(struct net *net, struct sk_buff *skb,
        if (!dport)
                return false;
        msg_incr_reroute_cnt(msg);
+       if (*dnode != own_addr)
+               msg_set_prevnode(msg, own_addr);
        msg_set_destnode(msg, *dnode);
        msg_set_destport(msg, dport);
        *err = TIPC_OK;
index 9ace47f44a69ed3000c6739aee55fbf30b04081c..e1d3595e2ee9577634b2bf5b215bd96f43ca473b 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * net/tipc/msg.h: Include file for TIPC message header routines
  *
- * Copyright (c) 2000-2007, 2014, Ericsson AB
+ * Copyright (c) 2000-2007, 2014-2015 Ericsson AB
  * Copyright (c) 2005-2008, 2010-2011, Wind River Systems
  * All rights reserved.
  *
@@ -54,6 +54,8 @@ struct plist;
  * - TIPC_HIGH_IMPORTANCE
  * - TIPC_CRITICAL_IMPORTANCE
  */
+#define TIPC_SYSTEM_IMPORTANCE 4
+
 
 /*
  * Payload message types
@@ -63,6 +65,19 @@ struct plist;
 #define TIPC_NAMED_MSG         2
 #define TIPC_DIRECT_MSG                3
 
+/*
+ * Internal message users
+ */
+#define  BCAST_PROTOCOL       5
+#define  MSG_BUNDLER          6
+#define  LINK_PROTOCOL        7
+#define  CONN_MANAGER         8
+#define  TUNNEL_PROTOCOL      10
+#define  NAME_DISTRIBUTOR     11
+#define  MSG_FRAGMENTER       12
+#define  LINK_CONFIG          13
+#define  SOCK_WAKEUP          14       /* pseudo user */
+
 /*
  * Message header sizes
  */
@@ -76,7 +91,7 @@ struct plist;
 
 #define MAX_MSG_SIZE (MAX_H_SIZE + TIPC_MAX_USER_MSG_SIZE)
 
-#define TIPC_MEDIA_ADDR_OFFSET 5
+#define TIPC_MEDIA_INFO_OFFSET 5
 
 /**
  * TIPC message buffer code
@@ -87,12 +102,12 @@ struct plist;
  * Note: Headroom should be a multiple of 4 to ensure the TIPC header fields
  *       are word aligned for quicker access
  */
-#define BUF_HEADROOM LL_MAX_HEADER
+#define BUF_HEADROOM (LL_MAX_HEADER + 48)
 
 struct tipc_skb_cb {
        void *handle;
        struct sk_buff *tail;
-       bool deferred;
+       bool validated;
        bool wakeup_pending;
        bool bundling;
        u16 chain_sz;
@@ -170,16 +185,6 @@ static inline void msg_set_user(struct tipc_msg *m, u32 n)
        msg_set_bits(m, 0, 25, 0xf, n);
 }
 
-static inline u32 msg_importance(struct tipc_msg *m)
-{
-       return msg_bits(m, 0, 25, 0xf);
-}
-
-static inline void msg_set_importance(struct tipc_msg *m, u32 i)
-{
-       msg_set_user(m, i);
-}
-
 static inline u32 msg_hdr_sz(struct tipc_msg *m)
 {
        return msg_bits(m, 0, 21, 0xf) << 2;
@@ -235,6 +240,15 @@ static inline void msg_set_size(struct tipc_msg *m, u32 sz)
        m->hdr[0] = htonl((msg_word(m, 0) & ~0x1ffff) | sz);
 }
 
+static inline unchar *msg_data(struct tipc_msg *m)
+{
+       return ((unchar *)m) + msg_hdr_sz(m);
+}
+
+static inline struct tipc_msg *msg_get_wrapped(struct tipc_msg *m)
+{
+       return (struct tipc_msg *)msg_data(m);
+}
 
 /*
  * Word 1
@@ -336,6 +350,25 @@ static inline void msg_set_seqno(struct tipc_msg *m, u32 n)
 /*
  * Words 3-10
  */
+static inline u32 msg_importance(struct tipc_msg *m)
+{
+       if (unlikely(msg_user(m) == MSG_FRAGMENTER))
+               return msg_bits(m, 5, 13, 0x7);
+       if (likely(msg_isdata(m) && !msg_errcode(m)))
+               return msg_user(m);
+       return TIPC_SYSTEM_IMPORTANCE;
+}
+
+static inline void msg_set_importance(struct tipc_msg *m, u32 i)
+{
+       if (unlikely(msg_user(m) == MSG_FRAGMENTER))
+               msg_set_bits(m, 5, 13, 0x7, i);
+       else if (likely(i < TIPC_SYSTEM_IMPORTANCE))
+               msg_set_user(m, i);
+       else
+               pr_warn("Trying to set illegal importance in message\n");
+}
+
 static inline u32 msg_prevnode(struct tipc_msg *m)
 {
        return msg_word(m, 3);
@@ -348,6 +381,8 @@ static inline void msg_set_prevnode(struct tipc_msg *m, u32 a)
 
 static inline u32 msg_origport(struct tipc_msg *m)
 {
+       if (msg_user(m) == MSG_FRAGMENTER)
+               m = msg_get_wrapped(m);
        return msg_word(m, 4);
 }
 
@@ -443,34 +478,10 @@ static inline void msg_set_nameupper(struct tipc_msg *m, u32 n)
        msg_set_word(m, 10, n);
 }
 
-static inline unchar *msg_data(struct tipc_msg *m)
-{
-       return ((unchar *)m) + msg_hdr_sz(m);
-}
-
-static inline struct tipc_msg *msg_get_wrapped(struct tipc_msg *m)
-{
-       return (struct tipc_msg *)msg_data(m);
-}
-
 /*
  * Constants and routines used to read and write TIPC internal message headers
  */
 
-/*
- * Internal message users
- */
-#define  BCAST_PROTOCOL       5
-#define  MSG_BUNDLER          6
-#define  LINK_PROTOCOL        7
-#define  CONN_MANAGER         8
-#define  ROUTE_DISTRIBUTOR    9                /* obsoleted */
-#define  CHANGEOVER_PROTOCOL  10
-#define  NAME_DISTRIBUTOR     11
-#define  MSG_FRAGMENTER       12
-#define  LINK_CONFIG          13
-#define  SOCK_WAKEUP          14       /* pseudo user */
-
 /*
  *  Connection management protocol message types
  */
@@ -501,8 +512,8 @@ static inline struct tipc_msg *msg_get_wrapped(struct tipc_msg *m)
 /*
  * Changeover tunnel message types
  */
-#define DUPLICATE_MSG          0
-#define ORIGINAL_MSG           1
+#define SYNCH_MSG              0
+#define FAILOVER_MSG           1
 
 /*
  * Config protocol message types
@@ -510,7 +521,6 @@ static inline struct tipc_msg *msg_get_wrapped(struct tipc_msg *m)
 #define DSC_REQ_MSG            0
 #define DSC_RESP_MSG           1
 
-
 /*
  * Word 1
  */
@@ -534,6 +544,24 @@ static inline void msg_set_node_sig(struct tipc_msg *m, u32 n)
        msg_set_bits(m, 1, 0, 0xffff, n);
 }
 
+static inline u32 msg_node_capabilities(struct tipc_msg *m)
+{
+       return msg_bits(m, 1, 15, 0x1fff);
+}
+
+static inline void msg_set_node_capabilities(struct tipc_msg *m, u32 n)
+{
+       msg_set_bits(m, 1, 15, 0x1fff, n);
+}
+
+static inline bool msg_dup(struct tipc_msg *m)
+{
+       if (likely(msg_user(m) != TUNNEL_PROTOCOL))
+               return false;
+       if (msg_type(m) != SYNCH_MSG)
+               return false;
+       return true;
+}
 
 /*
  * Word 2
@@ -688,7 +716,7 @@ static inline void msg_set_redundant_link(struct tipc_msg *m, u32 r)
 
 static inline char *msg_media_addr(struct tipc_msg *m)
 {
-       return (char *)&m->hdr[TIPC_MEDIA_ADDR_OFFSET];
+       return (char *)&m->hdr[TIPC_MEDIA_INFO_OFFSET];
 }
 
 /*
@@ -734,21 +762,8 @@ static inline void msg_set_link_tolerance(struct tipc_msg *m, u32 n)
        msg_set_bits(m, 9, 0, 0xffff, n);
 }
 
-static inline u32 tipc_msg_tot_importance(struct tipc_msg *m)
-{
-       if ((msg_user(m) == MSG_FRAGMENTER) && (msg_type(m) == FIRST_FRAGMENT))
-               return msg_importance(msg_get_wrapped(m));
-       return msg_importance(m);
-}
-
-static inline u32 msg_tot_origport(struct tipc_msg *m)
-{
-       if ((msg_user(m) == MSG_FRAGMENTER) && (msg_type(m) == FIRST_FRAGMENT))
-               return msg_origport(msg_get_wrapped(m));
-       return msg_origport(m);
-}
-
 struct sk_buff *tipc_buf_acquire(u32 size);
+bool tipc_msg_validate(struct sk_buff *skb);
 bool tipc_msg_reverse(u32 own_addr, struct sk_buff *buf, u32 *dnode,
                      int err);
 void tipc_msg_init(u32 own_addr, struct tipc_msg *m, u32 user, u32 type,
@@ -757,9 +772,9 @@ struct sk_buff *tipc_msg_create(uint user, uint type, uint hdr_sz,
                                uint data_sz, u32 dnode, u32 onode,
                                u32 dport, u32 oport, int errcode);
 int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf);
-bool tipc_msg_bundle(struct sk_buff_head *list, struct sk_buff *skb, u32 mtu);
-bool tipc_msg_make_bundle(struct sk_buff_head *list,
-                         struct sk_buff *skb, u32 mtu, u32 dnode);
+bool tipc_msg_bundle(struct sk_buff *bskb, struct sk_buff *skb, u32 mtu);
+
+bool tipc_msg_make_bundle(struct sk_buff **skb, u32 mtu, u32 dnode);
 bool tipc_msg_extract(struct sk_buff *skb, struct sk_buff **iskb, int *pos);
 int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m,
                   int offset, int dsz, int mtu, struct sk_buff_head *list);
index fcb07915aaacc244bedc54f660b774c2550b85a5..41e7b7e4dda0818469c17ff8b6e48aa1654a23ff 100644 (file)
@@ -98,7 +98,7 @@ void named_cluster_distribute(struct net *net, struct sk_buff *skb)
                        continue;
                if (!tipc_node_active_links(node))
                        continue;
-               oskb = skb_copy(skb, GFP_ATOMIC);
+               oskb = pskb_copy(skb, GFP_ATOMIC);
                if (!oskb)
                        break;
                msg_set_destnode(buf_msg(oskb), dnode);
@@ -244,6 +244,7 @@ static void tipc_publ_subscribe(struct net *net, struct publication *publ,
        tipc_node_lock(node);
        list_add_tail(&publ->nodesub_list, &node->publ_list);
        tipc_node_unlock(node);
+       tipc_node_put(node);
 }
 
 static void tipc_publ_unsubscribe(struct net *net, struct publication *publ,
@@ -258,6 +259,7 @@ static void tipc_publ_unsubscribe(struct net *net, struct publication *publ,
        tipc_node_lock(node);
        list_del_init(&publ->nodesub_list);
        tipc_node_unlock(node);
+       tipc_node_put(node);
 }
 
 /**
index 105ba7adf06f1dcc6be667b1de261ce3ad010506..ab0ac62a12879b068ef4d34aa360bf8839676b2c 100644 (file)
@@ -811,8 +811,8 @@ static void tipc_purge_publications(struct net *net, struct name_seq *seq)
        sseq = seq->sseqs;
        info = sseq->info;
        list_for_each_entry_safe(publ, safe, &info->zone_list, zone_list) {
-               tipc_nametbl_remove_publ(net, publ->type, publ->lower,
-                                        publ->node, publ->ref, publ->key);
+               tipc_nameseq_remove_publ(net, seq, publ->lower, publ->node,
+                                        publ->ref, publ->key);
                kfree_rcu(publ, rcu);
        }
        hlist_del_init_rcu(&seq->ns_list);
index 86152de8248da7164cde4b932ac7be1b6b6b2245..22c059ad29991abbdc40e3eca4a09de78df2c1d0 100644 (file)
@@ -42,6 +42,7 @@
 
 static void node_lost_contact(struct tipc_node *n_ptr);
 static void node_established_contact(struct tipc_node *n_ptr);
+static void tipc_node_delete(struct tipc_node *node);
 
 struct tipc_sock_conn {
        u32 port;
@@ -67,6 +68,23 @@ static unsigned int tipc_hashfn(u32 addr)
        return addr & (NODE_HTABLE_SIZE - 1);
 }
 
+static void tipc_node_kref_release(struct kref *kref)
+{
+       struct tipc_node *node = container_of(kref, struct tipc_node, kref);
+
+       tipc_node_delete(node);
+}
+
+void tipc_node_put(struct tipc_node *node)
+{
+       kref_put(&node->kref, tipc_node_kref_release);
+}
+
+static void tipc_node_get(struct tipc_node *node)
+{
+       kref_get(&node->kref);
+}
+
 /*
  * tipc_node_find - locate specified node object, if it exists
  */
@@ -82,6 +100,7 @@ struct tipc_node *tipc_node_find(struct net *net, u32 addr)
        hlist_for_each_entry_rcu(node, &tn->node_htable[tipc_hashfn(addr)],
                                 hash) {
                if (node->addr == addr) {
+                       tipc_node_get(node);
                        rcu_read_unlock();
                        return node;
                }
@@ -106,12 +125,13 @@ struct tipc_node *tipc_node_create(struct net *net, u32 addr)
        }
        n_ptr->addr = addr;
        n_ptr->net = net;
+       kref_init(&n_ptr->kref);
        spin_lock_init(&n_ptr->lock);
        INIT_HLIST_NODE(&n_ptr->hash);
        INIT_LIST_HEAD(&n_ptr->list);
        INIT_LIST_HEAD(&n_ptr->publ_list);
        INIT_LIST_HEAD(&n_ptr->conn_sks);
-       __skb_queue_head_init(&n_ptr->bclink.deferred_queue);
+       __skb_queue_head_init(&n_ptr->bclink.deferdq);
        hlist_add_head_rcu(&n_ptr->hash, &tn->node_htable[tipc_hashfn(addr)]);
        list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
                if (n_ptr->addr < temp_node->addr)
@@ -120,16 +140,17 @@ struct tipc_node *tipc_node_create(struct net *net, u32 addr)
        list_add_tail_rcu(&n_ptr->list, &temp_node->list);
        n_ptr->action_flags = TIPC_WAIT_PEER_LINKS_DOWN;
        n_ptr->signature = INVALID_NODE_SIG;
+       tipc_node_get(n_ptr);
 exit:
        spin_unlock_bh(&tn->node_list_lock);
        return n_ptr;
 }
 
-static void tipc_node_delete(struct tipc_net *tn, struct tipc_node *n_ptr)
+static void tipc_node_delete(struct tipc_node *node)
 {
-       list_del_rcu(&n_ptr->list);
-       hlist_del_rcu(&n_ptr->hash);
-       kfree_rcu(n_ptr, rcu);
+       list_del_rcu(&node->list);
+       hlist_del_rcu(&node->hash);
+       kfree_rcu(node, rcu);
 }
 
 void tipc_node_stop(struct net *net)
@@ -139,7 +160,7 @@ void tipc_node_stop(struct net *net)
 
        spin_lock_bh(&tn->node_list_lock);
        list_for_each_entry_safe(node, t_node, &tn->node_list, list)
-               tipc_node_delete(tn, node);
+               tipc_node_put(node);
        spin_unlock_bh(&tn->node_list_lock);
 }
 
@@ -147,6 +168,7 @@ int tipc_node_add_conn(struct net *net, u32 dnode, u32 port, u32 peer_port)
 {
        struct tipc_node *node;
        struct tipc_sock_conn *conn;
+       int err = 0;
 
        if (in_own_node(net, dnode))
                return 0;
@@ -157,8 +179,10 @@ int tipc_node_add_conn(struct net *net, u32 dnode, u32 port, u32 peer_port)
                return -EHOSTUNREACH;
        }
        conn = kmalloc(sizeof(*conn), GFP_ATOMIC);
-       if (!conn)
-               return -EHOSTUNREACH;
+       if (!conn) {
+               err = -EHOSTUNREACH;
+               goto exit;
+       }
        conn->peer_node = dnode;
        conn->port = port;
        conn->peer_port = peer_port;
@@ -166,7 +190,9 @@ int tipc_node_add_conn(struct net *net, u32 dnode, u32 port, u32 peer_port)
        tipc_node_lock(node);
        list_add_tail(&conn->list, &node->conn_sks);
        tipc_node_unlock(node);
-       return 0;
+exit:
+       tipc_node_put(node);
+       return err;
 }
 
 void tipc_node_remove_conn(struct net *net, u32 dnode, u32 port)
@@ -189,6 +215,7 @@ void tipc_node_remove_conn(struct net *net, u32 dnode, u32 port)
                kfree(conn);
        }
        tipc_node_unlock(node);
+       tipc_node_put(node);
 }
 
 /**
@@ -227,8 +254,8 @@ void tipc_node_link_up(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
        active[0] = active[1] = l_ptr;
 exit:
        /* Leave room for changeover header when returning 'mtu' to users: */
-       n_ptr->act_mtus[0] = active[0]->max_pkt - INT_H_SIZE;
-       n_ptr->act_mtus[1] = active[1]->max_pkt - INT_H_SIZE;
+       n_ptr->act_mtus[0] = active[0]->mtu - INT_H_SIZE;
+       n_ptr->act_mtus[1] = active[1]->mtu - INT_H_SIZE;
 }
 
 /**
@@ -292,11 +319,10 @@ void tipc_node_link_down(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
 
        /* Leave room for changeover header when returning 'mtu' to users: */
        if (active[0]) {
-               n_ptr->act_mtus[0] = active[0]->max_pkt - INT_H_SIZE;
-               n_ptr->act_mtus[1] = active[1]->max_pkt - INT_H_SIZE;
+               n_ptr->act_mtus[0] = active[0]->mtu - INT_H_SIZE;
+               n_ptr->act_mtus[1] = active[1]->mtu - INT_H_SIZE;
                return;
        }
-
        /* Loopback link went down? No fragmentation needed from now on. */
        if (n_ptr->addr == tn->own_addr) {
                n_ptr->act_mtus[0] = MAX_MSG_SIZE;
@@ -354,7 +380,7 @@ static void node_lost_contact(struct tipc_node *n_ptr)
 
        /* Flush broadcast link info associated with lost node */
        if (n_ptr->bclink.recv_permitted) {
-               __skb_queue_purge(&n_ptr->bclink.deferred_queue);
+               __skb_queue_purge(&n_ptr->bclink.deferdq);
 
                if (n_ptr->bclink.reasm_buf) {
                        kfree_skb(n_ptr->bclink.reasm_buf);
@@ -367,18 +393,17 @@ static void node_lost_contact(struct tipc_node *n_ptr)
                n_ptr->bclink.recv_permitted = false;
        }
 
-       /* Abort link changeover */
+       /* Abort any ongoing link failover */
        for (i = 0; i < MAX_BEARERS; i++) {
                struct tipc_link *l_ptr = n_ptr->links[i];
                if (!l_ptr)
                        continue;
-               l_ptr->reset_checkpoint = l_ptr->next_in_no;
-               l_ptr->exp_msg_count = 0;
+               l_ptr->flags &= ~LINK_FAILINGOVER;
+               l_ptr->failover_checkpt = 0;
+               l_ptr->failover_pkts = 0;
+               kfree_skb(l_ptr->failover_skb);
+               l_ptr->failover_skb = NULL;
                tipc_link_reset_fragments(l_ptr);
-
-               /* Link marked for deletion after failover? => do it now */
-               if (l_ptr->flags & LINK_STOPPED)
-                       tipc_link_delete(l_ptr);
        }
 
        n_ptr->action_flags &= ~TIPC_WAIT_OWN_LINKS_DOWN;
@@ -417,19 +442,25 @@ int tipc_node_get_linkname(struct net *net, u32 bearer_id, u32 addr,
                           char *linkname, size_t len)
 {
        struct tipc_link *link;
+       int err = -EINVAL;
        struct tipc_node *node = tipc_node_find(net, addr);
 
-       if ((bearer_id >= MAX_BEARERS) || !node)
-               return -EINVAL;
+       if (!node)
+               return err;
+
+       if (bearer_id >= MAX_BEARERS)
+               goto exit;
+
        tipc_node_lock(node);
        link = node->links[bearer_id];
        if (link) {
                strncpy(linkname, link->name, len);
-               tipc_node_unlock(node);
-               return 0;
+               err = 0;
        }
+exit:
        tipc_node_unlock(node);
-       return -EINVAL;
+       tipc_node_put(node);
+       return err;
 }
 
 void tipc_node_unlock(struct tipc_node *node)
@@ -459,7 +490,7 @@ void tipc_node_unlock(struct tipc_node *node)
                                TIPC_NOTIFY_NODE_DOWN | TIPC_NOTIFY_NODE_UP |
                                TIPC_NOTIFY_LINK_DOWN | TIPC_NOTIFY_LINK_UP |
                                TIPC_WAKEUP_BCAST_USERS | TIPC_BCAST_MSG_EVT |
-                               TIPC_NAMED_MSG_EVT);
+                               TIPC_NAMED_MSG_EVT | TIPC_BCAST_RESET);
 
        spin_unlock_bh(&node->lock);
 
@@ -488,6 +519,9 @@ void tipc_node_unlock(struct tipc_node *node)
 
        if (flags & TIPC_BCAST_MSG_EVT)
                tipc_bclink_input(net);
+
+       if (flags & TIPC_BCAST_RESET)
+               tipc_link_reset_all(node);
 }
 
 /* Caller should hold node lock for the passed node */
@@ -542,17 +576,21 @@ int tipc_nl_node_dump(struct sk_buff *skb, struct netlink_callback *cb)
        msg.seq = cb->nlh->nlmsg_seq;
 
        rcu_read_lock();
-
-       if (last_addr && !tipc_node_find(net, last_addr)) {
-               rcu_read_unlock();
-               /* We never set seq or call nl_dump_check_consistent() this
-                * means that setting prev_seq here will cause the consistence
-                * check to fail in the netlink callback handler. Resulting in
-                * the NLMSG_DONE message having the NLM_F_DUMP_INTR flag set if
-                * the node state changed while we released the lock.
-                */
-               cb->prev_seq = 1;
-               return -EPIPE;
+       if (last_addr) {
+               node = tipc_node_find(net, last_addr);
+               if (!node) {
+                       rcu_read_unlock();
+                       /* We never set seq or call nl_dump_check_consistent()
+                        * this means that setting prev_seq here will cause the
+                        * consistence check to fail in the netlink callback
+                        * handler. Resulting in the NLMSG_DONE message having
+                        * the NLM_F_DUMP_INTR flag set if the node state
+                        * changed while we released the lock.
+                        */
+                       cb->prev_seq = 1;
+                       return -EPIPE;
+               }
+               tipc_node_put(node);
        }
 
        list_for_each_entry_rcu(node, &tn->node_list, list) {
index 3d18c66b7f7895e030e0ab7de705df903638975b..02d5c20dc5511a1669c0262e17d1127a5b2b6a1e 100644 (file)
@@ -64,7 +64,8 @@ enum {
        TIPC_NOTIFY_LINK_UP             = (1 << 6),
        TIPC_NOTIFY_LINK_DOWN           = (1 << 7),
        TIPC_NAMED_MSG_EVT              = (1 << 8),
-       TIPC_BCAST_MSG_EVT              = (1 << 9)
+       TIPC_BCAST_MSG_EVT              = (1 << 9),
+       TIPC_BCAST_RESET                = (1 << 10)
 };
 
 /**
@@ -84,7 +85,7 @@ struct tipc_node_bclink {
        u32 last_sent;
        u32 oos_state;
        u32 deferred_size;
-       struct sk_buff_head deferred_queue;
+       struct sk_buff_head deferdq;
        struct sk_buff *reasm_buf;
        int inputq_map;
        bool recv_permitted;
@@ -93,6 +94,7 @@ struct tipc_node_bclink {
 /**
  * struct tipc_node - TIPC node structure
  * @addr: network address of node
+ * @ref: reference counter to node object
  * @lock: spinlock governing access to structure
  * @net: the applicable net namespace
  * @hash: links to adjacent nodes in unsorted hash chain
@@ -106,6 +108,7 @@ struct tipc_node_bclink {
  * @list: links to adjacent nodes in sorted list of cluster's nodes
  * @working_links: number of working links to node (both active and standby)
  * @link_cnt: number of links to node
+ * @capabilities: bitmap, indicating peer node's functional capabilities
  * @signature: node instance identifier
  * @link_id: local and remote bearer ids of changing link, if any
  * @publ_list: list of publications
@@ -113,6 +116,7 @@ struct tipc_node_bclink {
  */
 struct tipc_node {
        u32 addr;
+       struct kref kref;
        spinlock_t lock;
        struct net *net;
        struct hlist_node hash;
@@ -125,7 +129,8 @@ struct tipc_node {
        struct tipc_node_bclink bclink;
        struct list_head list;
        int link_cnt;
-       int working_links;
+       u16 working_links;
+       u16 capabilities;
        u32 signature;
        u32 link_id;
        struct list_head publ_list;
@@ -134,6 +139,7 @@ struct tipc_node {
 };
 
 struct tipc_node *tipc_node_find(struct net *net, u32 addr);
+void tipc_node_put(struct tipc_node *node);
 struct tipc_node *tipc_node_create(struct net *net, u32 addr);
 void tipc_node_stop(struct net *net);
 void tipc_node_attach_link(struct tipc_node *n_ptr, struct tipc_link *l_ptr);
@@ -168,10 +174,12 @@ static inline uint tipc_node_get_mtu(struct net *net, u32 addr, u32 selector)
 
        node = tipc_node_find(net, addr);
 
-       if (likely(node))
+       if (likely(node)) {
                mtu = node->act_mtus[selector & 1];
-       else
+               tipc_node_put(node);
+       } else {
                mtu = MAX_MSG_SIZE;
+       }
 
        return mtu;
 }
index eadd4ed459051ddc776a82f4f2fcc9fe34ab89cd..ab6183cdb12113565e3575e746470d5564f2ce22 100644 (file)
 #include "core.h"
 #include "socket.h"
 #include <net/sock.h>
+#include <linux/module.h>
 
 /* Number of messages to send before rescheduling */
 #define MAX_SEND_MSG_COUNT     25
 #define MAX_RECV_MSG_COUNT     25
 #define CF_CONNECTED           1
+#define CF_SERVER              2
 
 #define sock2con(x) ((struct tipc_conn *)(x)->sk_user_data)
 
@@ -88,9 +90,19 @@ static void tipc_clean_outqueues(struct tipc_conn *con);
 static void tipc_conn_kref_release(struct kref *kref)
 {
        struct tipc_conn *con = container_of(kref, struct tipc_conn, kref);
+       struct sockaddr_tipc *saddr = con->server->saddr;
+       struct socket *sock = con->sock;
+       struct sock *sk;
 
-       if (con->sock) {
-               tipc_sock_release_local(con->sock);
+       if (sock) {
+               sk = sock->sk;
+               if (test_bit(CF_SERVER, &con->flags)) {
+                       __module_get(sock->ops->owner);
+                       __module_get(sk->sk_prot_creator->owner);
+               }
+               saddr->scope = -TIPC_NODE_SCOPE;
+               kernel_bind(sock, (struct sockaddr *)saddr, sizeof(*saddr));
+               sk_release_kernel(sk);
                con->sock = NULL;
        }
 
@@ -281,7 +293,7 @@ static int tipc_accept_from_sock(struct tipc_conn *con)
        struct tipc_conn *newcon;
        int ret;
 
-       ret = tipc_sock_accept_local(sock, &newsock, O_NONBLOCK);
+       ret = kernel_accept(sock, &newsock, O_NONBLOCK);
        if (ret < 0)
                return ret;
 
@@ -309,9 +321,12 @@ static struct socket *tipc_create_listen_sock(struct tipc_conn *con)
        struct socket *sock = NULL;
        int ret;
 
-       ret = tipc_sock_create_local(s->net, s->type, &sock);
+       ret = sock_create_kern(AF_TIPC, SOCK_SEQPACKET, 0, &sock);
        if (ret < 0)
                return NULL;
+
+       sk_change_net(sock->sk, s->net);
+
        ret = kernel_setsockopt(sock, SOL_TIPC, TIPC_IMPORTANCE,
                                (char *)&s->imp, sizeof(s->imp));
        if (ret < 0)
@@ -337,11 +352,31 @@ static struct socket *tipc_create_listen_sock(struct tipc_conn *con)
                pr_err("Unknown socket type %d\n", s->type);
                goto create_err;
        }
+
+       /* As server's listening socket owner and creator is the same module,
+        * we have to decrease TIPC module reference count to guarantee that
+        * it remains zero after the server socket is created, otherwise,
+        * executing "rmmod" command is unable to make TIPC module deleted
+        * after TIPC module is inserted successfully.
+        *
+        * However, the reference count is ever increased twice in
+        * sock_create_kern(): one is to increase the reference count of owner
+        * of TIPC socket's proto_ops struct; another is to increment the
+        * reference count of owner of TIPC proto struct. Therefore, we must
+        * decrement the module reference count twice to ensure that it keeps
+        * zero after server's listening socket is created. Of course, we
+        * must bump the module reference count twice as well before the socket
+        * is closed.
+        */
+       module_put(sock->ops->owner);
+       module_put(sock->sk->sk_prot_creator->owner);
+       set_bit(CF_SERVER, &con->flags);
+
        return sock;
 
 create_err:
-       sock_release(sock);
-       con->sock = NULL;
+       kernel_sock_shutdown(sock, SHUT_RDWR);
+       sk_release_kernel(sock->sk);
        return NULL;
 }
 
index b4d4467d0bb051b09243c62aae2b3b2dca6d90a6..ee90d74d7516e93af0587f5898d81e41f870e267 100644 (file)
@@ -35,7 +35,6 @@
  */
 
 #include <linux/rhashtable.h>
-#include <linux/jhash.h>
 #include "core.h"
 #include "name_table.h"
 #include "node.h"
@@ -74,6 +73,7 @@
  * @link_cong: non-zero if owner must sleep because of link congestion
  * @sent_unacked: # messages sent by socket, and not yet acked by peer
  * @rcv_unacked: # messages read by user, but not yet acked back to peer
+ * @remote: 'connected' peer for dgram/rdm
  * @node: hash table node
  * @rcu: rcu struct for tipc_sock
  */
@@ -96,6 +96,7 @@ struct tipc_sock {
        bool link_cong;
        uint sent_unacked;
        uint rcv_unacked;
+       struct sockaddr_tipc remote;
        struct rhash_head node;
        struct rcu_head rcu;
 };
@@ -114,13 +115,14 @@ static int tipc_sk_withdraw(struct tipc_sock *tsk, uint scope,
 static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid);
 static int tipc_sk_insert(struct tipc_sock *tsk);
 static void tipc_sk_remove(struct tipc_sock *tsk);
+static int __tipc_send_stream(struct socket *sock, struct msghdr *m,
+                             size_t dsz);
+static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dsz);
 
 static const struct proto_ops packet_ops;
 static const struct proto_ops stream_ops;
 static const struct proto_ops msg_ops;
-
 static struct proto tipc_proto;
-static struct proto tipc_proto_kern;
 
 static const struct nla_policy tipc_nl_sock_policy[TIPC_NLA_SOCK_MAX + 1] = {
        [TIPC_NLA_SOCK_UNSPEC]          = { .type = NLA_UNSPEC },
@@ -130,6 +132,8 @@ static const struct nla_policy tipc_nl_sock_policy[TIPC_NLA_SOCK_MAX + 1] = {
        [TIPC_NLA_SOCK_HAS_PUBL]        = { .type = NLA_FLAG }
 };
 
+static const struct rhashtable_params tsk_rht_params;
+
 /*
  * Revised TIPC socket locking policy:
  *
@@ -338,11 +342,7 @@ static int tipc_sk_create(struct net *net, struct socket *sock,
        }
 
        /* Allocate socket's protocol area */
-       if (!kern)
-               sk = sk_alloc(net, AF_TIPC, GFP_KERNEL, &tipc_proto);
-       else
-               sk = sk_alloc(net, AF_TIPC, GFP_KERNEL, &tipc_proto_kern);
-
+       sk = sk_alloc(net, AF_TIPC, GFP_KERNEL, &tipc_proto);
        if (sk == NULL)
                return -ENOMEM;
 
@@ -380,75 +380,6 @@ static int tipc_sk_create(struct net *net, struct socket *sock,
        return 0;
 }
 
-/**
- * tipc_sock_create_local - create TIPC socket from inside TIPC module
- * @type: socket type - SOCK_RDM or SOCK_SEQPACKET
- *
- * We cannot use sock_creat_kern here because it bumps module user count.
- * Since socket owner and creator is the same module we must make sure
- * that module count remains zero for module local sockets, otherwise
- * we cannot do rmmod.
- *
- * Returns 0 on success, errno otherwise
- */
-int tipc_sock_create_local(struct net *net, int type, struct socket **res)
-{
-       int rc;
-
-       rc = sock_create_lite(AF_TIPC, type, 0, res);
-       if (rc < 0) {
-               pr_err("Failed to create kernel socket\n");
-               return rc;
-       }
-       tipc_sk_create(net, *res, 0, 1);
-
-       return 0;
-}
-
-/**
- * tipc_sock_release_local - release socket created by tipc_sock_create_local
- * @sock: the socket to be released.
- *
- * Module reference count is not incremented when such sockets are created,
- * so we must keep it from being decremented when they are released.
- */
-void tipc_sock_release_local(struct socket *sock)
-{
-       tipc_release(sock);
-       sock->ops = NULL;
-       sock_release(sock);
-}
-
-/**
- * tipc_sock_accept_local - accept a connection on a socket created
- * with tipc_sock_create_local. Use this function to avoid that
- * module reference count is inadvertently incremented.
- *
- * @sock:    the accepting socket
- * @newsock: reference to the new socket to be created
- * @flags:   socket flags
- */
-
-int tipc_sock_accept_local(struct socket *sock, struct socket **newsock,
-                          int flags)
-{
-       struct sock *sk = sock->sk;
-       int ret;
-
-       ret = sock_create_lite(sk->sk_family, sk->sk_type,
-                              sk->sk_protocol, newsock);
-       if (ret < 0)
-               return ret;
-
-       ret = tipc_accept(sock, *newsock, flags);
-       if (ret < 0) {
-               sock_release(*newsock);
-               return ret;
-       }
-       (*newsock)->ops = sock->ops;
-       return ret;
-}
-
 static void tipc_sk_callback(struct rcu_head *head)
 {
        struct tipc_sock *tsk = container_of(head, struct tipc_sock, rcu);
@@ -892,7 +823,6 @@ static int tipc_wait_for_sndmsg(struct socket *sock, long *timeo_p)
 
 /**
  * tipc_sendmsg - send message in connectionless manner
- * @iocb: if NULL, indicates that socket lock is already held
  * @sock: socket structure
  * @m: message to send
  * @dsz: amount of user data to be sent
@@ -904,8 +834,20 @@ static int tipc_wait_for_sndmsg(struct socket *sock, long *timeo_p)
  *
  * Returns the number of bytes sent on success, or errno otherwise
  */
-static int tipc_sendmsg(struct kiocb *iocb, struct socket *sock,
+static int tipc_sendmsg(struct socket *sock,
                        struct msghdr *m, size_t dsz)
+{
+       struct sock *sk = sock->sk;
+       int ret;
+
+       lock_sock(sk);
+       ret = __tipc_sendmsg(sock, m, dsz);
+       release_sock(sk);
+
+       return ret;
+}
+
+static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dsz)
 {
        DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
        struct sock *sk = sock->sk;
@@ -915,49 +857,40 @@ static int tipc_sendmsg(struct kiocb *iocb, struct socket *sock,
        u32 dnode, dport;
        struct sk_buff_head *pktchain = &sk->sk_write_queue;
        struct sk_buff *skb;
-       struct tipc_name_seq *seq = &dest->addr.nameseq;
+       struct tipc_name_seq *seq;
        struct iov_iter save;
        u32 mtu;
        long timeo;
        int rc;
 
-       if (unlikely(!dest))
-               return -EDESTADDRREQ;
-
-       if (unlikely((m->msg_namelen < sizeof(*dest)) ||
-                    (dest->family != AF_TIPC)))
-               return -EINVAL;
-
        if (dsz > TIPC_MAX_USER_MSG_SIZE)
                return -EMSGSIZE;
-
-       if (iocb)
-               lock_sock(sk);
-
+       if (unlikely(!dest)) {
+               if (tsk->connected && sock->state == SS_READY)
+                       dest = &tsk->remote;
+               else
+                       return -EDESTADDRREQ;
+       } else if (unlikely(m->msg_namelen < sizeof(*dest)) ||
+                  dest->family != AF_TIPC) {
+               return -EINVAL;
+       }
        if (unlikely(sock->state != SS_READY)) {
-               if (sock->state == SS_LISTENING) {
-                       rc = -EPIPE;
-                       goto exit;
-               }
-               if (sock->state != SS_UNCONNECTED) {
-                       rc = -EISCONN;
-                       goto exit;
-               }
-               if (tsk->published) {
-                       rc = -EOPNOTSUPP;
-                       goto exit;
-               }
+               if (sock->state == SS_LISTENING)
+                       return -EPIPE;
+               if (sock->state != SS_UNCONNECTED)
+                       return -EISCONN;
+               if (tsk->published)
+                       return -EOPNOTSUPP;
                if (dest->addrtype == TIPC_ADDR_NAME) {
                        tsk->conn_type = dest->addr.name.name.type;
                        tsk->conn_instance = dest->addr.name.name.instance;
                }
        }
-
+       seq = &dest->addr.nameseq;
        timeo = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT);
 
        if (dest->addrtype == TIPC_ADDR_MCAST) {
-               rc = tipc_sendmcast(sock, seq, m, dsz, timeo);
-               goto exit;
+               return tipc_sendmcast(sock, seq, m, dsz, timeo);
        } else if (dest->addrtype == TIPC_ADDR_NAME) {
                u32 type = dest->addr.name.name.type;
                u32 inst = dest->addr.name.name.instance;
@@ -972,10 +905,8 @@ static int tipc_sendmsg(struct kiocb *iocb, struct socket *sock,
                dport = tipc_nametbl_translate(net, type, inst, &dnode);
                msg_set_destnode(mhdr, dnode);
                msg_set_destport(mhdr, dport);
-               if (unlikely(!dport && !dnode)) {
-                       rc = -EHOSTUNREACH;
-                       goto exit;
-               }
+               if (unlikely(!dport && !dnode))
+                       return -EHOSTUNREACH;
        } else if (dest->addrtype == TIPC_ADDR_ID) {
                dnode = dest->addr.id.node;
                msg_set_type(mhdr, TIPC_DIRECT_MSG);
@@ -990,7 +921,7 @@ new_mtu:
        mtu = tipc_node_get_mtu(net, dnode, tsk->portid);
        rc = tipc_msg_build(mhdr, m, 0, dsz, mtu, pktchain);
        if (rc < 0)
-               goto exit;
+               return rc;
 
        do {
                skb = skb_peek(pktchain);
@@ -1013,9 +944,6 @@ new_mtu:
                if (rc)
                        __skb_queue_purge(pktchain);
        } while (!rc);
-exit:
-       if (iocb)
-               release_sock(sk);
 
        return rc;
 }
@@ -1052,7 +980,6 @@ static int tipc_wait_for_sndpkt(struct socket *sock, long *timeo_p)
 
 /**
  * tipc_send_stream - send stream-oriented data
- * @iocb: (unused)
  * @sock: socket structure
  * @m: data to send
  * @dsz: total length of data to be transmitted
@@ -1062,8 +989,19 @@ static int tipc_wait_for_sndpkt(struct socket *sock, long *timeo_p)
  * Returns the number of bytes sent on success (or partial success),
  * or errno if no data sent
  */
-static int tipc_send_stream(struct kiocb *iocb, struct socket *sock,
-                           struct msghdr *m, size_t dsz)
+static int tipc_send_stream(struct socket *sock, struct msghdr *m, size_t dsz)
+{
+       struct sock *sk = sock->sk;
+       int ret;
+
+       lock_sock(sk);
+       ret = __tipc_send_stream(sock, m, dsz);
+       release_sock(sk);
+
+       return ret;
+}
+
+static int __tipc_send_stream(struct socket *sock, struct msghdr *m, size_t dsz)
 {
        struct sock *sk = sock->sk;
        struct net *net = sock_net(sk);
@@ -1080,7 +1018,7 @@ static int tipc_send_stream(struct kiocb *iocb, struct socket *sock,
 
        /* Handle implied connection establishment */
        if (unlikely(dest)) {
-               rc = tipc_sendmsg(iocb, sock, m, dsz);
+               rc = __tipc_sendmsg(sock, m, dsz);
                if (dsz && (dsz == rc))
                        tsk->sent_unacked = 1;
                return rc;
@@ -1088,15 +1026,11 @@ static int tipc_send_stream(struct kiocb *iocb, struct socket *sock,
        if (dsz > (uint)INT_MAX)
                return -EMSGSIZE;
 
-       if (iocb)
-               lock_sock(sk);
-
        if (unlikely(sock->state != SS_CONNECTED)) {
                if (sock->state == SS_DISCONNECTING)
-                       rc = -EPIPE;
+                       return -EPIPE;
                else
-                       rc = -ENOTCONN;
-               goto exit;
+                       return -ENOTCONN;
        }
 
        timeo = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT);
@@ -1108,7 +1042,7 @@ next:
        send = min_t(uint, dsz - sent, TIPC_MAX_USER_MSG_SIZE);
        rc = tipc_msg_build(mhdr, m, sent, send, mtu, pktchain);
        if (unlikely(rc < 0))
-               goto exit;
+               return rc;
        do {
                if (likely(!tsk_conn_cong(tsk))) {
                        rc = tipc_link_xmit(net, pktchain, dnode, portid);
@@ -1133,15 +1067,12 @@ next:
                if (rc)
                        __skb_queue_purge(pktchain);
        } while (!rc);
-exit:
-       if (iocb)
-               release_sock(sk);
+
        return sent ? sent : rc;
 }
 
 /**
  * tipc_send_packet - send a connection-oriented message
- * @iocb: if NULL, indicates that socket lock is already held
  * @sock: socket structure
  * @m: message to send
  * @dsz: length of data to be transmitted
@@ -1150,13 +1081,12 @@ exit:
  *
  * Returns the number of bytes sent on success, or errno otherwise
  */
-static int tipc_send_packet(struct kiocb *iocb, struct socket *sock,
-                           struct msghdr *m, size_t dsz)
+static int tipc_send_packet(struct socket *sock, struct msghdr *m, size_t dsz)
 {
        if (dsz > TIPC_MAX_USER_MSG_SIZE)
                return -EMSGSIZE;
 
-       return tipc_send_stream(iocb, sock, m, dsz);
+       return tipc_send_stream(sock, m, dsz);
 }
 
 /* tipc_sk_finish_conn - complete the setup of a connection
@@ -1317,12 +1247,12 @@ static int tipc_wait_for_rcvmsg(struct socket *sock, long *timeop)
                err = 0;
                if (!skb_queue_empty(&sk->sk_receive_queue))
                        break;
-               err = sock_intr_errno(timeo);
-               if (signal_pending(current))
-                       break;
                err = -EAGAIN;
                if (!timeo)
                        break;
+               err = sock_intr_errno(timeo);
+               if (signal_pending(current))
+                       break;
        }
        finish_wait(sk_sleep(sk), &wait);
        *timeop = timeo;
@@ -1331,7 +1261,6 @@ static int tipc_wait_for_rcvmsg(struct socket *sock, long *timeop)
 
 /**
  * tipc_recvmsg - receive packet-oriented message
- * @iocb: (unused)
  * @m: descriptor for message info
  * @buf_len: total size of user buffer area
  * @flags: receive flags
@@ -1341,8 +1270,8 @@ static int tipc_wait_for_rcvmsg(struct socket *sock, long *timeop)
  *
  * Returns size of returned message data, errno otherwise
  */
-static int tipc_recvmsg(struct kiocb *iocb, struct socket *sock,
-                       struct msghdr *m, size_t buf_len, int flags)
+static int tipc_recvmsg(struct socket *sock, struct msghdr *m, size_t buf_len,
+                       int flags)
 {
        struct sock *sk = sock->sk;
        struct tipc_sock *tsk = tipc_sk(sk);
@@ -1426,7 +1355,6 @@ exit:
 
 /**
  * tipc_recv_stream - receive stream-oriented data
- * @iocb: (unused)
  * @m: descriptor for message info
  * @buf_len: total size of user buffer area
  * @flags: receive flags
@@ -1436,8 +1364,8 @@ exit:
  *
  * Returns size of returned message data, errno otherwise
  */
-static int tipc_recv_stream(struct kiocb *iocb, struct socket *sock,
-                           struct msghdr *m, size_t buf_len, int flags)
+static int tipc_recv_stream(struct socket *sock, struct msghdr *m,
+                           size_t buf_len, int flags)
 {
        struct sock *sk = sock->sk;
        struct tipc_sock *tsk = tipc_sk(sk);
@@ -1909,17 +1837,26 @@ static int tipc_connect(struct socket *sock, struct sockaddr *dest,
                        int destlen, int flags)
 {
        struct sock *sk = sock->sk;
+       struct tipc_sock *tsk = tipc_sk(sk);
        struct sockaddr_tipc *dst = (struct sockaddr_tipc *)dest;
        struct msghdr m = {NULL,};
-       long timeout = (flags & O_NONBLOCK) ? 0 : tipc_sk(sk)->conn_timeout;
+       long timeout = (flags & O_NONBLOCK) ? 0 : tsk->conn_timeout;
        socket_state previous;
-       int res;
+       int res = 0;
 
        lock_sock(sk);
 
-       /* For now, TIPC does not allow use of connect() with DGRAM/RDM types */
+       /* DGRAM/RDM connect(), just save the destaddr */
        if (sock->state == SS_READY) {
-               res = -EOPNOTSUPP;
+               if (dst->family == AF_UNSPEC) {
+                       memset(&tsk->remote, 0, sizeof(struct sockaddr_tipc));
+                       tsk->connected = 0;
+               } else if (destlen != sizeof(struct sockaddr_tipc)) {
+                       res = -EINVAL;
+               } else {
+                       memcpy(&tsk->remote, dest, destlen);
+                       tsk->connected = 1;
+               }
                goto exit;
        }
 
@@ -1947,7 +1884,7 @@ static int tipc_connect(struct socket *sock, struct sockaddr *dest,
                if (!timeout)
                        m.msg_flags = MSG_DONTWAIT;
 
-               res = tipc_sendmsg(NULL, sock, &m, 0);
+               res = __tipc_sendmsg(sock, &m, 0);
                if ((res < 0) && (res != -EWOULDBLOCK))
                        goto exit;
 
@@ -2027,12 +1964,12 @@ static int tipc_wait_for_accept(struct socket *sock, long timeo)
                err = -EINVAL;
                if (sock->state != SS_LISTENING)
                        break;
-               err = sock_intr_errno(timeo);
-               if (signal_pending(current))
-                       break;
                err = -EAGAIN;
                if (!timeo)
                        break;
+               err = sock_intr_errno(timeo);
+               if (signal_pending(current))
+                       break;
        }
        finish_wait(sk_sleep(sk), &wait);
        return err;
@@ -2103,7 +2040,7 @@ static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags)
                struct msghdr m = {NULL,};
 
                tsk_advance_rx_queue(sk);
-               tipc_send_packet(NULL, new_sock, &m, 0);
+               __tipc_send_stream(new_sock, &m, 0);
        } else {
                __skb_dequeue(&sk->sk_receive_queue);
                __skb_queue_head(&new_sk->sk_receive_queue, buf);
@@ -2154,7 +2091,6 @@ restart:
                                             TIPC_CONN_SHUTDOWN))
                                tipc_link_xmit_skb(net, skb, dnode,
                                                   tsk->portid);
-                       tipc_node_remove_conn(net, dnode, tsk->portid);
                } else {
                        dnode = tsk_peer_node(tsk);
 
@@ -2312,7 +2248,7 @@ static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid)
        struct tipc_sock *tsk;
 
        rcu_read_lock();
-       tsk = rhashtable_lookup(&tn->sk_rht, &portid);
+       tsk = rhashtable_lookup_fast(&tn->sk_rht, &portid, tsk_rht_params);
        if (tsk)
                sock_hold(&tsk->sk);
        rcu_read_unlock();
@@ -2334,7 +2270,8 @@ static int tipc_sk_insert(struct tipc_sock *tsk)
                        portid = TIPC_MIN_PORT;
                tsk->portid = portid;
                sock_hold(&tsk->sk);
-               if (rhashtable_lookup_insert(&tn->sk_rht, &tsk->node))
+               if (!rhashtable_lookup_insert_fast(&tn->sk_rht, &tsk->node,
+                                                  tsk_rht_params))
                        return 0;
                sock_put(&tsk->sk);
        }
@@ -2347,26 +2284,27 @@ static void tipc_sk_remove(struct tipc_sock *tsk)
        struct sock *sk = &tsk->sk;
        struct tipc_net *tn = net_generic(sock_net(sk), tipc_net_id);
 
-       if (rhashtable_remove(&tn->sk_rht, &tsk->node)) {
+       if (!rhashtable_remove_fast(&tn->sk_rht, &tsk->node, tsk_rht_params)) {
                WARN_ON(atomic_read(&sk->sk_refcnt) == 1);
                __sock_put(sk);
        }
 }
 
+static const struct rhashtable_params tsk_rht_params = {
+       .nelem_hint = 192,
+       .head_offset = offsetof(struct tipc_sock, node),
+       .key_offset = offsetof(struct tipc_sock, portid),
+       .key_len = sizeof(u32), /* portid */
+       .max_size = 1048576,
+       .min_size = 256,
+       .automatic_shrinking = true,
+};
+
 int tipc_sk_rht_init(struct net *net)
 {
        struct tipc_net *tn = net_generic(net, tipc_net_id);
-       struct rhashtable_params rht_params = {
-               .nelem_hint = 192,
-               .head_offset = offsetof(struct tipc_sock, node),
-               .key_offset = offsetof(struct tipc_sock, portid),
-               .key_len = sizeof(u32), /* portid */
-               .hashfn = jhash,
-               .max_shift = 20, /* 1M */
-               .min_shift = 8,  /* 256 */
-       };
 
-       return rhashtable_init(&tn->sk_rht, &rht_params);
+       return rhashtable_init(&tn->sk_rht, &tsk_rht_params);
 }
 
 void tipc_sk_rht_destroy(struct net *net)
@@ -2609,12 +2547,6 @@ static struct proto tipc_proto = {
        .sysctl_rmem    = sysctl_tipc_rmem
 };
 
-static struct proto tipc_proto_kern = {
-       .name           = "TIPC",
-       .obj_size       = sizeof(struct tipc_sock),
-       .sysctl_rmem    = sysctl_tipc_rmem
-};
-
 /**
  * tipc_socket_init - initialize TIPC socket interface
  *
index 238f1b7bd9bdb09bd0bfbd745145a8f23580c001..bf6551389522dfda37fb0eff4bb5d15221bb2b91 100644 (file)
                                  SKB_TRUESIZE(TIPC_MAX_USER_MSG_SIZE))
 int tipc_socket_init(void);
 void tipc_socket_stop(void);
-int tipc_sock_create_local(struct net *net, int type, struct socket **res);
-void tipc_sock_release_local(struct socket *sock);
-int tipc_sock_accept_local(struct socket *sock, struct socket **newsock,
-                          int flags);
 int tipc_sk_rcv(struct net *net, struct sk_buff_head *inputq);
 void tipc_sk_mcast_rcv(struct net *net, struct sk_buff_head *arrvq,
                       struct sk_buff_head *inputq);
index 72c339e432aa6693a30957060b022bb2bc8f8785..1c147c869c2e68312ee02b5275589ba0e18f7727 100644 (file)
@@ -162,19 +162,6 @@ static void subscr_del(struct tipc_subscription *sub)
        atomic_dec(&tn->subscription_count);
 }
 
-/**
- * subscr_terminate - terminate communication with a subscriber
- *
- * Note: Must call it in process context since it might sleep.
- */
-static void subscr_terminate(struct tipc_subscription *sub)
-{
-       struct tipc_subscriber *subscriber = sub->subscriber;
-       struct tipc_net *tn = net_generic(sub->net, tipc_net_id);
-
-       tipc_conn_terminate(tn->topsrv, subscriber->conid);
-}
-
 static void subscr_release(struct tipc_subscriber *subscriber)
 {
        struct tipc_subscription *sub;
@@ -312,16 +299,14 @@ static void subscr_conn_msg_event(struct net *net, int conid,
 {
        struct tipc_subscriber *subscriber = usr_data;
        struct tipc_subscription *sub = NULL;
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
 
        spin_lock_bh(&subscriber->lock);
-       if (subscr_subscribe(net, (struct tipc_subscr *)buf, subscriber,
-                            &sub) < 0) {
-               spin_unlock_bh(&subscriber->lock);
-               subscr_terminate(sub);
-               return;
-       }
+       subscr_subscribe(net, (struct tipc_subscr *)buf, subscriber, &sub);
        if (sub)
                tipc_nametbl_subscribe(sub);
+       else
+               tipc_conn_terminate(tn->topsrv, subscriber->conid);
        spin_unlock_bh(&subscriber->lock);
 }
 
diff --git a/net/tipc/udp_media.c b/net/tipc/udp_media.c
new file mode 100644 (file)
index 0000000..ef3d7aa
--- /dev/null
@@ -0,0 +1,446 @@
+/* net/tipc/udp_media.c: IP bearer support for TIPC
+ *
+ * Copyright (c) 2015, Ericsson AB
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/socket.h>
+#include <linux/ip.h>
+#include <linux/udp.h>
+#include <linux/inet.h>
+#include <linux/inetdevice.h>
+#include <linux/igmp.h>
+#include <linux/kernel.h>
+#include <linux/workqueue.h>
+#include <linux/list.h>
+#include <net/sock.h>
+#include <net/ip.h>
+#include <net/udp_tunnel.h>
+#include <net/addrconf.h>
+#include <linux/tipc_netlink.h>
+#include "core.h"
+#include "bearer.h"
+
+/* IANA assigned UDP port */
+#define UDP_PORT_DEFAULT       6118
+
+static const struct nla_policy tipc_nl_udp_policy[TIPC_NLA_UDP_MAX + 1] = {
+       [TIPC_NLA_UDP_UNSPEC]   = {.type = NLA_UNSPEC},
+       [TIPC_NLA_UDP_LOCAL]    = {.type = NLA_BINARY,
+                                  .len = sizeof(struct sockaddr_storage)},
+       [TIPC_NLA_UDP_REMOTE]   = {.type = NLA_BINARY,
+                                  .len = sizeof(struct sockaddr_storage)},
+};
+
+/**
+ * struct udp_media_addr - IP/UDP addressing information
+ *
+ * This is the bearer level originating address used in neighbor discovery
+ * messages, and all fields should be in network byte order
+ */
+struct udp_media_addr {
+       __be16  proto;
+       __be16  udp_port;
+       union {
+               struct in_addr ipv4;
+               struct in6_addr ipv6;
+       };
+};
+
+/**
+ * struct udp_bearer - ip/udp bearer data structure
+ * @bearer:    associated generic tipc bearer
+ * @ubsock:    bearer associated socket
+ * @ifindex:   local address scope
+ * @work:      used to schedule deferred work on a bearer
+ */
+struct udp_bearer {
+       struct tipc_bearer __rcu *bearer;
+       struct socket *ubsock;
+       u32 ifindex;
+       struct work_struct work;
+};
+
+/* udp_media_addr_set - convert a ip/udp address to a TIPC media address */
+static void tipc_udp_media_addr_set(struct tipc_media_addr *addr,
+                                   struct udp_media_addr *ua)
+{
+       memset(addr, 0, sizeof(struct tipc_media_addr));
+       addr->media_id = TIPC_MEDIA_TYPE_UDP;
+       memcpy(addr->value, ua, sizeof(struct udp_media_addr));
+       if (ntohs(ua->proto) == ETH_P_IP) {
+               if (ipv4_is_multicast(ua->ipv4.s_addr))
+                       addr->broadcast = 1;
+       } else if (ntohs(ua->proto) == ETH_P_IPV6) {
+               if (ipv6_addr_type(&ua->ipv6) & IPV6_ADDR_MULTICAST)
+                       addr->broadcast = 1;
+       } else {
+               pr_err("Invalid UDP media address\n");
+       }
+}
+
+/* tipc_udp_addr2str - convert ip/udp address to string */
+static int tipc_udp_addr2str(struct tipc_media_addr *a, char *buf, int size)
+{
+       struct udp_media_addr *ua = (struct udp_media_addr *)&a->value;
+
+       if (ntohs(ua->proto) == ETH_P_IP)
+               snprintf(buf, size, "%pI4:%u", &ua->ipv4, ntohs(ua->udp_port));
+       else if (ntohs(ua->proto) == ETH_P_IPV6)
+               snprintf(buf, size, "%pI6:%u", &ua->ipv6, ntohs(ua->udp_port));
+       else
+               pr_err("Invalid UDP media address\n");
+       return 0;
+}
+
+/* tipc_udp_msg2addr - extract an ip/udp address from a TIPC ndisc message */
+static int tipc_udp_msg2addr(struct tipc_bearer *b, struct tipc_media_addr *a,
+                            char *msg)
+{
+       struct udp_media_addr *ua;
+
+       ua = (struct udp_media_addr *) (msg + TIPC_MEDIA_ADDR_OFFSET);
+       if (msg[TIPC_MEDIA_TYPE_OFFSET] != TIPC_MEDIA_TYPE_UDP)
+               return -EINVAL;
+       tipc_udp_media_addr_set(a, ua);
+       return 0;
+}
+
+/* tipc_udp_addr2msg - write an ip/udp address to a TIPC ndisc message */
+static int tipc_udp_addr2msg(char *msg, struct tipc_media_addr *a)
+{
+       memset(msg, 0, TIPC_MEDIA_INFO_SIZE);
+       msg[TIPC_MEDIA_TYPE_OFFSET] = TIPC_MEDIA_TYPE_UDP;
+       memcpy(msg + TIPC_MEDIA_ADDR_OFFSET, a->value,
+              sizeof(struct udp_media_addr));
+       return 0;
+}
+
+/* tipc_send_msg - enqueue a send request */
+static int tipc_udp_send_msg(struct net *net, struct sk_buff *skb,
+                            struct tipc_bearer *b,
+                            struct tipc_media_addr *dest)
+{
+       int ttl, err = 0;
+       struct udp_bearer *ub;
+       struct udp_media_addr *dst = (struct udp_media_addr *)&dest->value;
+       struct udp_media_addr *src = (struct udp_media_addr *)&b->addr.value;
+       struct sk_buff *clone;
+       struct rtable *rt;
+
+       clone = skb_clone(skb, GFP_ATOMIC);
+       skb_set_inner_protocol(clone, htons(ETH_P_TIPC));
+       ub = rcu_dereference_rtnl(b->media_ptr);
+       if (!ub) {
+               err = -ENODEV;
+               goto tx_error;
+       }
+       if (dst->proto == htons(ETH_P_IP)) {
+               struct flowi4 fl = {
+                       .daddr = dst->ipv4.s_addr,
+                       .saddr = src->ipv4.s_addr,
+                       .flowi4_mark = clone->mark,
+                       .flowi4_proto = IPPROTO_UDP
+               };
+               rt = ip_route_output_key(net, &fl);
+               if (IS_ERR(rt)) {
+                       err = PTR_ERR(rt);
+                       goto tx_error;
+               }
+               ttl = ip4_dst_hoplimit(&rt->dst);
+               err = udp_tunnel_xmit_skb(rt, clone, src->ipv4.s_addr,
+                                         dst->ipv4.s_addr, 0, ttl, 0,
+                                         src->udp_port, dst->udp_port,
+                                         false, true);
+               if (err < 0) {
+                       ip_rt_put(rt);
+                       goto tx_error;
+               }
+#if IS_ENABLED(CONFIG_IPV6)
+       } else {
+               struct dst_entry *ndst;
+               struct flowi6 fl6 = {
+                       .flowi6_oif = ub->ifindex,
+                       .daddr = dst->ipv6,
+                       .saddr = src->ipv6,
+                       .flowi6_proto = IPPROTO_UDP
+               };
+               err = ipv6_stub->ipv6_dst_lookup(ub->ubsock->sk, &ndst, &fl6);
+               if (err)
+                       goto tx_error;
+               ttl = ip6_dst_hoplimit(ndst);
+               err = udp_tunnel6_xmit_skb(ndst, clone, ndst->dev, &src->ipv6,
+                                          &dst->ipv6, 0, ttl, src->udp_port,
+                                          dst->udp_port, false);
+#endif
+       }
+       return err;
+
+tx_error:
+       kfree_skb(clone);
+       return err;
+}
+
+/* tipc_udp_recv - read data from bearer socket */
+static int tipc_udp_recv(struct sock *sk, struct sk_buff *skb)
+{
+       struct udp_bearer *ub;
+       struct tipc_bearer *b;
+
+       ub = rcu_dereference_sk_user_data(sk);
+       if (!ub) {
+               pr_err_ratelimited("Failed to get UDP bearer reference");
+               kfree_skb(skb);
+               return 0;
+       }
+
+       skb_pull(skb, sizeof(struct udphdr));
+       rcu_read_lock();
+       b = rcu_dereference_rtnl(ub->bearer);
+
+       if (b) {
+               tipc_rcv(sock_net(sk), skb, b);
+               rcu_read_unlock();
+               return 0;
+       }
+       rcu_read_unlock();
+       kfree_skb(skb);
+       return 0;
+}
+
+static int enable_mcast(struct udp_bearer *ub, struct udp_media_addr *remote)
+{
+       int err = 0;
+       struct ip_mreqn mreqn;
+       struct sock *sk = ub->ubsock->sk;
+
+       if (ntohs(remote->proto) == ETH_P_IP) {
+               if (!ipv4_is_multicast(remote->ipv4.s_addr))
+                       return 0;
+               mreqn.imr_multiaddr = remote->ipv4;
+               mreqn.imr_ifindex = ub->ifindex;
+               err = ip_mc_join_group(sk, &mreqn);
+#if IS_ENABLED(CONFIG_IPV6)
+       } else {
+               if (!ipv6_addr_is_multicast(&remote->ipv6))
+                       return 0;
+               err = ipv6_stub->ipv6_sock_mc_join(sk, ub->ifindex,
+                                                  &remote->ipv6);
+#endif
+       }
+       return err;
+}
+
+/**
+ * parse_options - build local/remote addresses from configuration
+ * @attrs:     netlink config data
+ * @ub:                UDP bearer instance
+ * @local:     local bearer IP address/port
+ * @remote:    peer or multicast IP/port
+ */
+static int parse_options(struct nlattr *attrs[], struct udp_bearer *ub,
+                        struct udp_media_addr *local,
+                        struct udp_media_addr *remote)
+{
+       struct nlattr *opts[TIPC_NLA_UDP_MAX + 1];
+       struct sockaddr_storage *sa_local, *sa_remote;
+
+       if (!attrs[TIPC_NLA_BEARER_UDP_OPTS])
+               goto err;
+       if (nla_parse_nested(opts, TIPC_NLA_UDP_MAX,
+                            attrs[TIPC_NLA_BEARER_UDP_OPTS],
+                            tipc_nl_udp_policy))
+               goto err;
+       if (opts[TIPC_NLA_UDP_LOCAL] && opts[TIPC_NLA_UDP_REMOTE]) {
+               sa_local = nla_data(opts[TIPC_NLA_UDP_LOCAL]);
+               sa_remote = nla_data(opts[TIPC_NLA_UDP_REMOTE]);
+       } else {
+err:
+               pr_err("Invalid UDP bearer configuration");
+               return -EINVAL;
+       }
+       if ((sa_local->ss_family & sa_remote->ss_family) == AF_INET) {
+               struct sockaddr_in *ip4;
+
+               ip4 = (struct sockaddr_in *)sa_local;
+               local->proto = htons(ETH_P_IP);
+               local->udp_port = ip4->sin_port;
+               local->ipv4.s_addr = ip4->sin_addr.s_addr;
+
+               ip4 = (struct sockaddr_in *)sa_remote;
+               remote->proto = htons(ETH_P_IP);
+               remote->udp_port = ip4->sin_port;
+               remote->ipv4.s_addr = ip4->sin_addr.s_addr;
+               return 0;
+
+#if IS_ENABLED(CONFIG_IPV6)
+       } else if ((sa_local->ss_family & sa_remote->ss_family) == AF_INET6) {
+               struct sockaddr_in6 *ip6;
+
+               ip6 = (struct sockaddr_in6 *)sa_local;
+               local->proto = htons(ETH_P_IPV6);
+               local->udp_port = ip6->sin6_port;
+               local->ipv6 = ip6->sin6_addr;
+               ub->ifindex = ip6->sin6_scope_id;
+
+               ip6 = (struct sockaddr_in6 *)sa_remote;
+               remote->proto = htons(ETH_P_IPV6);
+               remote->udp_port = ip6->sin6_port;
+               remote->ipv6 = ip6->sin6_addr;
+               return 0;
+#endif
+       }
+       return -EADDRNOTAVAIL;
+}
+
+/**
+ * tipc_udp_enable - callback to create a new udp bearer instance
+ * @net:       network namespace
+ * @b:         pointer to generic tipc_bearer
+ * @attrs:     netlink bearer configuration
+ *
+ * validate the bearer parameters and initialize the udp bearer
+ * rtnl_lock should be held
+ */
+static int tipc_udp_enable(struct net *net, struct tipc_bearer *b,
+                          struct nlattr *attrs[])
+{
+       int err = -EINVAL;
+       struct udp_bearer *ub;
+       struct udp_media_addr *remote;
+       struct udp_media_addr local = {0};
+       struct udp_port_cfg udp_conf = {0};
+       struct udp_tunnel_sock_cfg tuncfg = {NULL};
+
+       ub = kzalloc(sizeof(*ub), GFP_ATOMIC);
+       if (!ub)
+               return -ENOMEM;
+
+       remote = (struct udp_media_addr *)&b->bcast_addr.value;
+       memset(remote, 0, sizeof(struct udp_media_addr));
+       err = parse_options(attrs, ub, &local, remote);
+       if (err)
+               goto err;
+
+       b->bcast_addr.media_id = TIPC_MEDIA_TYPE_UDP;
+       b->bcast_addr.broadcast = 1;
+       rcu_assign_pointer(b->media_ptr, ub);
+       rcu_assign_pointer(ub->bearer, b);
+       tipc_udp_media_addr_set(&b->addr, &local);
+       if (local.proto == htons(ETH_P_IP)) {
+               struct net_device *dev;
+
+               dev = __ip_dev_find(net, local.ipv4.s_addr, false);
+               if (!dev) {
+                       err = -ENODEV;
+                       goto err;
+               }
+               udp_conf.family = AF_INET;
+               udp_conf.local_ip.s_addr = htonl(INADDR_ANY);
+               udp_conf.use_udp_checksums = false;
+               ub->ifindex = dev->ifindex;
+               b->mtu = dev->mtu - sizeof(struct iphdr)
+                       - sizeof(struct udphdr);
+#if IS_ENABLED(CONFIG_IPV6)
+       } else if (local.proto == htons(ETH_P_IPV6)) {
+               udp_conf.family = AF_INET6;
+               udp_conf.use_udp6_tx_checksums = true;
+               udp_conf.use_udp6_rx_checksums = true;
+               udp_conf.local_ip6 = in6addr_any;
+               b->mtu = 1280;
+#endif
+       } else {
+               err = -EAFNOSUPPORT;
+               goto err;
+       }
+       udp_conf.local_udp_port = local.udp_port;
+       err = udp_sock_create(net, &udp_conf, &ub->ubsock);
+       if (err)
+               goto err;
+       tuncfg.sk_user_data = ub;
+       tuncfg.encap_type = 1;
+       tuncfg.encap_rcv = tipc_udp_recv;
+       tuncfg.encap_destroy = NULL;
+       setup_udp_tunnel_sock(net, ub->ubsock, &tuncfg);
+
+       if (enable_mcast(ub, remote))
+               goto err;
+       return 0;
+err:
+       kfree(ub);
+       return err;
+}
+
+/* cleanup_bearer - break the socket/bearer association */
+static void cleanup_bearer(struct work_struct *work)
+{
+       struct udp_bearer *ub = container_of(work, struct udp_bearer, work);
+
+       if (ub->ubsock)
+               udp_tunnel_sock_release(ub->ubsock);
+       synchronize_net();
+       kfree(ub);
+}
+
+/* tipc_udp_disable - detach bearer from socket */
+static void tipc_udp_disable(struct tipc_bearer *b)
+{
+       struct udp_bearer *ub;
+
+       ub = rcu_dereference_rtnl(b->media_ptr);
+       if (!ub) {
+               pr_err("UDP bearer instance not found\n");
+               return;
+       }
+       if (ub->ubsock)
+               sock_set_flag(ub->ubsock->sk, SOCK_DEAD);
+       RCU_INIT_POINTER(b->media_ptr, NULL);
+       RCU_INIT_POINTER(ub->bearer, NULL);
+
+       /* sock_release need to be done outside of rtnl lock */
+       INIT_WORK(&ub->work, cleanup_bearer);
+       schedule_work(&ub->work);
+}
+
+struct tipc_media udp_media_info = {
+       .send_msg       = tipc_udp_send_msg,
+       .enable_media   = tipc_udp_enable,
+       .disable_media  = tipc_udp_disable,
+       .addr2str       = tipc_udp_addr2str,
+       .addr2msg       = tipc_udp_addr2msg,
+       .msg2addr       = tipc_udp_msg2addr,
+       .priority       = TIPC_DEF_LINK_PRI,
+       .tolerance      = TIPC_DEF_LINK_TOL,
+       .window         = TIPC_DEF_LINK_WIN,
+       .type_id        = TIPC_MEDIA_TYPE_UDP,
+       .hwaddr_len     = 0,
+       .name           = "udp"
+};
index 526b6edab018eefde3c77f259c0a5caeff51a6bc..433f287ee548391183c63d80b4db3736454a7f28 100644 (file)
@@ -516,20 +516,15 @@ static unsigned int unix_dgram_poll(struct file *, struct socket *,
                                    poll_table *);
 static int unix_ioctl(struct socket *, unsigned int, unsigned long);
 static int unix_shutdown(struct socket *, int);
-static int unix_stream_sendmsg(struct kiocb *, struct socket *,
-                              struct msghdr *, size_t);
-static int unix_stream_recvmsg(struct kiocb *, struct socket *,
-                              struct msghdr *, size_t, int);
-static int unix_dgram_sendmsg(struct kiocb *, struct socket *,
-                             struct msghdr *, size_t);
-static int unix_dgram_recvmsg(struct kiocb *, struct socket *,
-                             struct msghdr *, size_t, int);
+static int unix_stream_sendmsg(struct socket *, struct msghdr *, size_t);
+static int unix_stream_recvmsg(struct socket *, struct msghdr *, size_t, int);
+static int unix_dgram_sendmsg(struct socket *, struct msghdr *, size_t);
+static int unix_dgram_recvmsg(struct socket *, struct msghdr *, size_t, int);
 static int unix_dgram_connect(struct socket *, struct sockaddr *,
                              int, int);
-static int unix_seqpacket_sendmsg(struct kiocb *, struct socket *,
-                                 struct msghdr *, size_t);
-static int unix_seqpacket_recvmsg(struct kiocb *, struct socket *,
-                                 struct msghdr *, size_t, int);
+static int unix_seqpacket_sendmsg(struct socket *, struct msghdr *, size_t);
+static int unix_seqpacket_recvmsg(struct socket *, struct msghdr *, size_t,
+                                 int);
 
 static int unix_set_peek_off(struct sock *sk, int val)
 {
@@ -1442,8 +1437,8 @@ static void maybe_add_creds(struct sk_buff *skb, const struct socket *sock,
  *     Send AF_UNIX data.
  */
 
-static int unix_dgram_sendmsg(struct kiocb *kiocb, struct socket *sock,
-                             struct msghdr *msg, size_t len)
+static int unix_dgram_sendmsg(struct socket *sock, struct msghdr *msg,
+                             size_t len)
 {
        struct sock *sk = sock->sk;
        struct net *net = sock_net(sk);
@@ -1622,8 +1617,8 @@ out:
  */
 #define UNIX_SKB_FRAGS_SZ (PAGE_SIZE << get_order(32768))
 
-static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
-                              struct msghdr *msg, size_t len)
+static int unix_stream_sendmsg(struct socket *sock, struct msghdr *msg,
+                              size_t len)
 {
        struct sock *sk = sock->sk;
        struct sock *other = NULL;
@@ -1725,8 +1720,8 @@ out_err:
        return sent ? : err;
 }
 
-static int unix_seqpacket_sendmsg(struct kiocb *kiocb, struct socket *sock,
-                                 struct msghdr *msg, size_t len)
+static int unix_seqpacket_sendmsg(struct socket *sock, struct msghdr *msg,
+                                 size_t len)
 {
        int err;
        struct sock *sk = sock->sk;
@@ -1741,19 +1736,18 @@ static int unix_seqpacket_sendmsg(struct kiocb *kiocb, struct socket *sock,
        if (msg->msg_namelen)
                msg->msg_namelen = 0;
 
-       return unix_dgram_sendmsg(kiocb, sock, msg, len);
+       return unix_dgram_sendmsg(sock, msg, len);
 }
 
-static int unix_seqpacket_recvmsg(struct kiocb *iocb, struct socket *sock,
-                             struct msghdr *msg, size_t size,
-                             int flags)
+static int unix_seqpacket_recvmsg(struct socket *sock, struct msghdr *msg,
+                                 size_t size, int flags)
 {
        struct sock *sk = sock->sk;
 
        if (sk->sk_state != TCP_ESTABLISHED)
                return -ENOTCONN;
 
-       return unix_dgram_recvmsg(iocb, sock, msg, size, flags);
+       return unix_dgram_recvmsg(sock, msg, size, flags);
 }
 
 static void unix_copy_addr(struct msghdr *msg, struct sock *sk)
@@ -1766,9 +1760,8 @@ static void unix_copy_addr(struct msghdr *msg, struct sock *sk)
        }
 }
 
-static int unix_dgram_recvmsg(struct kiocb *iocb, struct socket *sock,
-                             struct msghdr *msg, size_t size,
-                             int flags)
+static int unix_dgram_recvmsg(struct socket *sock, struct msghdr *msg,
+                             size_t size, int flags)
 {
        struct scm_cookie scm;
        struct sock *sk = sock->sk;
@@ -1900,9 +1893,8 @@ static unsigned int unix_skb_len(const struct sk_buff *skb)
        return skb->len - UNIXCB(skb).consumed;
 }
 
-static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
-                              struct msghdr *msg, size_t size,
-                              int flags)
+static int unix_stream_recvmsg(struct socket *sock, struct msghdr *msg,
+                              size_t size, int flags)
 {
        struct scm_cookie scm;
        struct sock *sk = sock->sk;
index 1d0e39c9a3e2a5deb62130bc7fbdc31a4b5288a9..2ec86e652a19802ec7f81e7443d29bbdf468b174 100644 (file)
@@ -949,8 +949,8 @@ static unsigned int vsock_poll(struct file *file, struct socket *sock,
        return mask;
 }
 
-static int vsock_dgram_sendmsg(struct kiocb *kiocb, struct socket *sock,
-                              struct msghdr *msg, size_t len)
+static int vsock_dgram_sendmsg(struct socket *sock, struct msghdr *msg,
+                              size_t len)
 {
        int err;
        struct sock *sk;
@@ -1062,11 +1062,10 @@ out:
        return err;
 }
 
-static int vsock_dgram_recvmsg(struct kiocb *kiocb, struct socket *sock,
-                              struct msghdr *msg, size_t len, int flags)
+static int vsock_dgram_recvmsg(struct socket *sock, struct msghdr *msg,
+                              size_t len, int flags)
 {
-       return transport->dgram_dequeue(kiocb, vsock_sk(sock->sk), msg, len,
-                                       flags);
+       return transport->dgram_dequeue(vsock_sk(sock->sk), msg, len, flags);
 }
 
 static const struct proto_ops vsock_dgram_ops = {
@@ -1505,8 +1504,8 @@ static int vsock_stream_getsockopt(struct socket *sock,
        return 0;
 }
 
-static int vsock_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
-                               struct msghdr *msg, size_t len)
+static int vsock_stream_sendmsg(struct socket *sock, struct msghdr *msg,
+                               size_t len)
 {
        struct sock *sk;
        struct vsock_sock *vsk;
@@ -1644,9 +1643,8 @@ out:
 
 
 static int
-vsock_stream_recvmsg(struct kiocb *kiocb,
-                    struct socket *sock,
-                    struct msghdr *msg, size_t len, int flags)
+vsock_stream_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
+                    int flags)
 {
        struct sock *sk;
        struct vsock_sock *vsk;
index 7f3255084a6c036074664240f4da6fb3cdd231ce..c294da095461bcf67f662d751d00d7156c092366 100644 (file)
@@ -1730,8 +1730,7 @@ static int vmci_transport_dgram_enqueue(
        return err - sizeof(*dg);
 }
 
-static int vmci_transport_dgram_dequeue(struct kiocb *kiocb,
-                                       struct vsock_sock *vsk,
+static int vmci_transport_dgram_dequeue(struct vsock_sock *vsk,
                                        struct msghdr *msg, size_t len,
                                        int flags)
 {
index 29c8675f9a1189db65c185f2ad04f96a67702989..b13dfb4ff001908160ba087db5d25377d43c5491 100644 (file)
@@ -178,10 +178,18 @@ config CFG80211_WEXT
        bool "cfg80211 wireless extensions compatibility"
        depends on CFG80211
        select WEXT_CORE
+       default y if CFG80211_WEXT_EXPORT
        help
          Enable this option if you need old userspace for wireless
          extensions with cfg80211-based drivers.
 
+config CFG80211_WEXT_EXPORT
+       bool
+       depends on CFG80211
+       help
+         Drivers should select this option if they require cfg80211's
+         wext compatibility symbols to be exported.
+
 config LIB80211
        tristate
        default n
index e24fc585c8834782295558481f0f592be743e6dc..4c55fab9b4e46c68ee480eb71aec969088aa7920 100644 (file)
@@ -30,7 +30,7 @@ void __cfg80211_ibss_joined(struct net_device *dev, const u8 *bssid,
                return;
 
        bss = cfg80211_get_bss(wdev->wiphy, channel, bssid, NULL, 0,
-                              WLAN_CAPABILITY_IBSS, WLAN_CAPABILITY_IBSS);
+                              IEEE80211_BSS_TYPE_IBSS, IEEE80211_PRIVACY_ANY);
 
        if (WARN_ON(!bss))
                return;
@@ -533,7 +533,7 @@ int cfg80211_ibss_wext_giwap(struct net_device *dev,
        else if (wdev->wext.ibss.bssid)
                memcpy(ap_addr->sa_data, wdev->wext.ibss.bssid, ETH_ALEN);
        else
-               memset(ap_addr->sa_data, 0, ETH_ALEN);
+               eth_zero_addr(ap_addr->sa_data);
 
        wdev_unlock(wdev);
 
index 2c52b59e43f319670e6ce054b0eda1bde995ce1c..7aae329e2b4e4a8e3afa943f670852a6e26e89dd 100644 (file)
@@ -229,7 +229,8 @@ int cfg80211_mlme_auth(struct cfg80211_registered_device *rdev,
                return -EALREADY;
 
        req.bss = cfg80211_get_bss(&rdev->wiphy, chan, bssid, ssid, ssid_len,
-                                  WLAN_CAPABILITY_ESS, WLAN_CAPABILITY_ESS);
+                                  IEEE80211_BSS_TYPE_ESS,
+                                  IEEE80211_PRIVACY_ANY);
        if (!req.bss)
                return -ENOENT;
 
@@ -296,7 +297,8 @@ int cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev,
                                   rdev->wiphy.vht_capa_mod_mask);
 
        req->bss = cfg80211_get_bss(&rdev->wiphy, chan, bssid, ssid, ssid_len,
-                                   WLAN_CAPABILITY_ESS, WLAN_CAPABILITY_ESS);
+                                   IEEE80211_BSS_TYPE_ESS,
+                                   IEEE80211_PRIVACY_ANY);
        if (!req->bss)
                return -ENOENT;
 
index b6f84f6a2a095ef0c94891f796ef6cf7cf9af65f..6dd1ab3b10ea25c76e20a73998bb5f1ebc41b6b8 100644 (file)
@@ -399,6 +399,7 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
        [NL80211_ATTR_WIPHY_SELF_MANAGED_REG] = { .type = NLA_FLAG },
        [NL80211_ATTR_NETNS_FD] = { .type = NLA_U32 },
        [NL80211_ATTR_SCHED_SCAN_DELAY] = { .type = NLA_U32 },
+       [NL80211_ATTR_REG_INDOOR] = { .type = NLA_FLAG },
 };
 
 /* policy for the key attributes */
@@ -1098,8 +1099,6 @@ static int nl80211_send_wowlan(struct sk_buff *msg,
        if (large && nl80211_send_wowlan_tcp_caps(rdev, msg))
                return -ENOBUFS;
 
-       /* TODO: send wowlan net detect */
-
        nla_nest_end(msg, nl_wowlan);
 
        return 0;
@@ -2668,7 +2667,8 @@ static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info)
 
        wdev = rdev_add_virtual_intf(rdev,
                                nla_data(info->attrs[NL80211_ATTR_IFNAME]),
-                               type, err ? NULL : &flags, &params);
+                               NET_NAME_USER, type, err ? NULL : &flags,
+                               &params);
        if (WARN_ON(!wdev)) {
                nlmsg_free(msg);
                return -EPROTO;
@@ -4968,7 +4968,10 @@ static int parse_reg_rule(struct nlattr *tb[],
 static int nl80211_req_set_reg(struct sk_buff *skb, struct genl_info *info)
 {
        char *data = NULL;
+       bool is_indoor;
        enum nl80211_user_reg_hint_type user_reg_hint_type;
+       u32 owner_nlportid;
+
 
        /*
         * You should only get this when cfg80211 hasn't yet initialized
@@ -4994,7 +4997,15 @@ static int nl80211_req_set_reg(struct sk_buff *skb, struct genl_info *info)
                data = nla_data(info->attrs[NL80211_ATTR_REG_ALPHA2]);
                return regulatory_hint_user(data, user_reg_hint_type);
        case NL80211_USER_REG_HINT_INDOOR:
-               return regulatory_hint_indoor_user();
+               if (info->attrs[NL80211_ATTR_SOCKET_OWNER]) {
+                       owner_nlportid = info->snd_portid;
+                       is_indoor = !!info->attrs[NL80211_ATTR_REG_INDOOR];
+               } else {
+                       owner_nlportid = 0;
+                       is_indoor = true;
+               }
+
+               return regulatory_hint_indoor(is_indoor, owner_nlportid);
        default:
                return -EINVAL;
        }
@@ -5275,7 +5286,7 @@ do {                                                                          \
        FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshAwakeWindowDuration,
                                  0, 65535, mask,
                                  NL80211_MESHCONF_AWAKE_WINDOW, nla_get_u16);
-       FILL_IN_MESH_PARAM_IF_SET(tb, cfg, plink_timeout, 1, 0xffffffff,
+       FILL_IN_MESH_PARAM_IF_SET(tb, cfg, plink_timeout, 0, 0xffffffff,
                                  mask, NL80211_MESHCONF_PLINK_TIMEOUT,
                                  nla_get_u32);
        if (mask_out)
@@ -5693,8 +5704,8 @@ static int nl80211_parse_random_mac(struct nlattr **attrs,
        int i;
 
        if (!attrs[NL80211_ATTR_MAC] && !attrs[NL80211_ATTR_MAC_MASK]) {
-               memset(mac_addr, 0, ETH_ALEN);
-               memset(mac_addr_mask, 0, ETH_ALEN);
+               eth_zero_addr(mac_addr);
+               eth_zero_addr(mac_addr_mask);
                mac_addr[0] = 0x2;
                mac_addr_mask[0] = 0x3;
 
@@ -7275,8 +7286,18 @@ static int nl80211_join_ibss(struct sk_buff *skb, struct genl_info *info)
                break;
        case NL80211_CHAN_WIDTH_20:
        case NL80211_CHAN_WIDTH_40:
-               if (rdev->wiphy.features & NL80211_FEATURE_HT_IBSS)
-                       break;
+               if (!(rdev->wiphy.features & NL80211_FEATURE_HT_IBSS))
+                       return -EINVAL;
+               break;
+       case NL80211_CHAN_WIDTH_80:
+       case NL80211_CHAN_WIDTH_80P80:
+       case NL80211_CHAN_WIDTH_160:
+               if (!(rdev->wiphy.features & NL80211_FEATURE_HT_IBSS))
+                       return -EINVAL;
+               if (!wiphy_ext_feature_isset(&rdev->wiphy,
+                                            NL80211_EXT_FEATURE_VHT_IBSS))
+                       return -EINVAL;
+               break;
        default:
                return -EINVAL;
        }
@@ -7389,8 +7410,8 @@ static int nl80211_set_mcast_rate(struct sk_buff *skb, struct genl_info *info)
 
 static struct sk_buff *
 __cfg80211_alloc_vendor_skb(struct cfg80211_registered_device *rdev,
-                           int approxlen, u32 portid, u32 seq,
-                           enum nl80211_commands cmd,
+                           struct wireless_dev *wdev, int approxlen,
+                           u32 portid, u32 seq, enum nl80211_commands cmd,
                            enum nl80211_attrs attr,
                            const struct nl80211_vendor_cmd_info *info,
                            gfp_t gfp)
@@ -7421,6 +7442,16 @@ __cfg80211_alloc_vendor_skb(struct cfg80211_registered_device *rdev,
                        goto nla_put_failure;
        }
 
+       if (wdev) {
+               if (nla_put_u64(skb, NL80211_ATTR_WDEV,
+                               wdev_id(wdev)))
+                       goto nla_put_failure;
+               if (wdev->netdev &&
+                   nla_put_u32(skb, NL80211_ATTR_IFINDEX,
+                               wdev->netdev->ifindex))
+                       goto nla_put_failure;
+       }
+
        data = nla_nest_start(skb, attr);
 
        ((void **)skb->cb)[0] = rdev;
@@ -7435,6 +7466,7 @@ __cfg80211_alloc_vendor_skb(struct cfg80211_registered_device *rdev,
 }
 
 struct sk_buff *__cfg80211_alloc_event_skb(struct wiphy *wiphy,
+                                          struct wireless_dev *wdev,
                                           enum nl80211_commands cmd,
                                           enum nl80211_attrs attr,
                                           int vendor_event_idx,
@@ -7460,7 +7492,7 @@ struct sk_buff *__cfg80211_alloc_event_skb(struct wiphy *wiphy,
                return NULL;
        }
 
-       return __cfg80211_alloc_vendor_skb(rdev, approxlen, 0, 0,
+       return __cfg80211_alloc_vendor_skb(rdev, wdev, approxlen, 0, 0,
                                           cmd, attr, info, gfp);
 }
 EXPORT_SYMBOL(__cfg80211_alloc_event_skb);
@@ -8761,8 +8793,8 @@ static int nl80211_send_wowlan_tcp(struct sk_buff *msg,
        if (!nl_tcp)
                return -ENOBUFS;
 
-       if (nla_put_be32(msg, NL80211_WOWLAN_TCP_SRC_IPV4, tcp->src) ||
-           nla_put_be32(msg, NL80211_WOWLAN_TCP_DST_IPV4, tcp->dst) ||
+       if (nla_put_in_addr(msg, NL80211_WOWLAN_TCP_SRC_IPV4, tcp->src) ||
+           nla_put_in_addr(msg, NL80211_WOWLAN_TCP_DST_IPV4, tcp->dst) ||
            nla_put(msg, NL80211_WOWLAN_TCP_DST_MAC, ETH_ALEN, tcp->dst_mac) ||
            nla_put_u16(msg, NL80211_WOWLAN_TCP_SRC_PORT, tcp->src_port) ||
            nla_put_u16(msg, NL80211_WOWLAN_TCP_DST_PORT, tcp->dst_port) ||
@@ -8808,6 +8840,9 @@ static int nl80211_send_wowlan_nd(struct sk_buff *msg,
        if (nla_put_u32(msg, NL80211_ATTR_SCHED_SCAN_INTERVAL, req->interval))
                return -ENOBUFS;
 
+       if (nla_put_u32(msg, NL80211_ATTR_SCHED_SCAN_DELAY, req->delay))
+               return -ENOBUFS;
+
        freqs = nla_nest_start(msg, NL80211_ATTR_SCAN_FREQUENCIES);
        if (!freqs)
                return -ENOBUFS;
@@ -8993,8 +9028,8 @@ static int nl80211_parse_wowlan_tcp(struct cfg80211_registered_device *rdev,
        cfg = kzalloc(size, GFP_KERNEL);
        if (!cfg)
                return -ENOMEM;
-       cfg->src = nla_get_be32(tb[NL80211_WOWLAN_TCP_SRC_IPV4]);
-       cfg->dst = nla_get_be32(tb[NL80211_WOWLAN_TCP_DST_IPV4]);
+       cfg->src = nla_get_in_addr(tb[NL80211_WOWLAN_TCP_SRC_IPV4]);
+       cfg->dst = nla_get_in_addr(tb[NL80211_WOWLAN_TCP_DST_IPV4]);
        memcpy(cfg->dst_mac, nla_data(tb[NL80211_WOWLAN_TCP_DST_MAC]),
               ETH_ALEN);
        if (tb[NL80211_WOWLAN_TCP_SRC_PORT])
@@ -9094,6 +9129,7 @@ static int nl80211_set_wowlan(struct sk_buff *skb, struct genl_info *info)
        const struct wiphy_wowlan_support *wowlan = rdev->wiphy.wowlan;
        int err, i;
        bool prev_enabled = rdev->wiphy.wowlan_config;
+       bool regular = false;
 
        if (!wowlan)
                return -EOPNOTSUPP;
@@ -9121,12 +9157,14 @@ static int nl80211_set_wowlan(struct sk_buff *skb, struct genl_info *info)
                if (!(wowlan->flags & WIPHY_WOWLAN_DISCONNECT))
                        return -EINVAL;
                new_triggers.disconnect = true;
+               regular = true;
        }
 
        if (tb[NL80211_WOWLAN_TRIG_MAGIC_PKT]) {
                if (!(wowlan->flags & WIPHY_WOWLAN_MAGIC_PKT))
                        return -EINVAL;
                new_triggers.magic_pkt = true;
+               regular = true;
        }
 
        if (tb[NL80211_WOWLAN_TRIG_GTK_REKEY_SUPPORTED])
@@ -9136,24 +9174,28 @@ static int nl80211_set_wowlan(struct sk_buff *skb, struct genl_info *info)
                if (!(wowlan->flags & WIPHY_WOWLAN_GTK_REKEY_FAILURE))
                        return -EINVAL;
                new_triggers.gtk_rekey_failure = true;
+               regular = true;
        }
 
        if (tb[NL80211_WOWLAN_TRIG_EAP_IDENT_REQUEST]) {
                if (!(wowlan->flags & WIPHY_WOWLAN_EAP_IDENTITY_REQ))
                        return -EINVAL;
                new_triggers.eap_identity_req = true;
+               regular = true;
        }
 
        if (tb[NL80211_WOWLAN_TRIG_4WAY_HANDSHAKE]) {
                if (!(wowlan->flags & WIPHY_WOWLAN_4WAY_HANDSHAKE))
                        return -EINVAL;
                new_triggers.four_way_handshake = true;
+               regular = true;
        }
 
        if (tb[NL80211_WOWLAN_TRIG_RFKILL_RELEASE]) {
                if (!(wowlan->flags & WIPHY_WOWLAN_RFKILL_RELEASE))
                        return -EINVAL;
                new_triggers.rfkill_release = true;
+               regular = true;
        }
 
        if (tb[NL80211_WOWLAN_TRIG_PKT_PATTERN]) {
@@ -9162,6 +9204,8 @@ static int nl80211_set_wowlan(struct sk_buff *skb, struct genl_info *info)
                int rem, pat_len, mask_len, pkt_offset;
                struct nlattr *pat_tb[NUM_NL80211_PKTPAT];
 
+               regular = true;
+
                nla_for_each_nested(pat, tb[NL80211_WOWLAN_TRIG_PKT_PATTERN],
                                    rem)
                        n_patterns++;
@@ -9223,6 +9267,7 @@ static int nl80211_set_wowlan(struct sk_buff *skb, struct genl_info *info)
        }
 
        if (tb[NL80211_WOWLAN_TRIG_TCP_CONNECTION]) {
+               regular = true;
                err = nl80211_parse_wowlan_tcp(
                        rdev, tb[NL80211_WOWLAN_TRIG_TCP_CONNECTION],
                        &new_triggers);
@@ -9231,6 +9276,7 @@ static int nl80211_set_wowlan(struct sk_buff *skb, struct genl_info *info)
        }
 
        if (tb[NL80211_WOWLAN_TRIG_NET_DETECT]) {
+               regular = true;
                err = nl80211_parse_wowlan_nd(
                        rdev, wowlan, tb[NL80211_WOWLAN_TRIG_NET_DETECT],
                        &new_triggers);
@@ -9238,6 +9284,17 @@ static int nl80211_set_wowlan(struct sk_buff *skb, struct genl_info *info)
                        goto error;
        }
 
+       /* The 'any' trigger means the device continues operating more or less
+        * as in its normal operation mode and wakes up the host on most of the
+        * normal interrupts (like packet RX, ...)
+        * It therefore makes little sense to combine with the more constrained
+        * wakeup trigger modes.
+        */
+       if (new_triggers.any && regular) {
+               err = -EINVAL;
+               goto error;
+       }
+
        ntrig = kmemdup(&new_triggers, sizeof(new_triggers), GFP_KERNEL);
        if (!ntrig) {
                err = -ENOMEM;
@@ -9906,7 +9963,7 @@ struct sk_buff *__cfg80211_alloc_reply_skb(struct wiphy *wiphy,
        if (WARN_ON(!rdev->cur_cmd_info))
                return NULL;
 
-       return __cfg80211_alloc_vendor_skb(rdev, approxlen,
+       return __cfg80211_alloc_vendor_skb(rdev, NULL, approxlen,
                                           rdev->cur_cmd_info->snd_portid,
                                           rdev->cur_cmd_info->snd_seq,
                                           cmd, attr, NULL, GFP_KERNEL);
@@ -12775,6 +12832,11 @@ static int nl80211_netlink_notify(struct notifier_block * nb,
 
        rcu_read_unlock();
 
+       /*
+        * It is possible that the user space process that is controlling the
+        * indoor setting disappeared, so notify the regulatory core.
+        */
+       regulatory_netlink_notify(notify->portid);
        return NOTIFY_OK;
 }
 
index 35cfb7134bdbaa2559cd222a8410b4f66f7ea41d..c6e83a7468c0c43baea32bbb0291c312017226f5 100644 (file)
@@ -35,13 +35,14 @@ static inline void rdev_set_wakeup(struct cfg80211_registered_device *rdev,
 
 static inline struct wireless_dev
 *rdev_add_virtual_intf(struct cfg80211_registered_device *rdev, char *name,
+                      unsigned char name_assign_type,
                       enum nl80211_iftype type, u32 *flags,
                       struct vif_params *params)
 {
        struct wireless_dev *ret;
        trace_rdev_add_virtual_intf(&rdev->wiphy, name, type);
-       ret = rdev->ops->add_virtual_intf(&rdev->wiphy, name, type, flags,
-                                         params);
+       ret = rdev->ops->add_virtual_intf(&rdev->wiphy, name, name_assign_type,
+                                         type, flags, params);
        trace_rdev_return_wdev(&rdev->wiphy, ret);
        return ret;
 }
index 48dfc7b4e98130e4d8d5b265fceadd9004ed4f5e..be5f81caa488bf24a0bfc79c6ffb9ba534d1e4aa 100644 (file)
  *     be intersected with the current one.
  * @REG_REQ_ALREADY_SET: the regulatory request will not change the current
  *     regulatory settings, and no further processing is required.
- * @REG_REQ_USER_HINT_HANDLED: a non alpha2  user hint was handled and no
- *     further processing is required, i.e., not need to update last_request
- *     etc. This should be used for user hints that do not provide an alpha2
- *     but some other type of regulatory hint, i.e., indoor operation.
  */
 enum reg_request_treatment {
        REG_REQ_OK,
        REG_REQ_IGNORE,
        REG_REQ_INTERSECT,
        REG_REQ_ALREADY_SET,
-       REG_REQ_USER_HINT_HANDLED,
 };
 
 static struct regulatory_request core_request_world = {
@@ -133,9 +128,12 @@ static int reg_num_devs_support_basehint;
  * State variable indicating if the platform on which the devices
  * are attached is operating in an indoor environment. The state variable
  * is relevant for all registered devices.
- * (protected by RTNL)
  */
 static bool reg_is_indoor;
+static spinlock_t reg_indoor_lock;
+
+/* Used to track the userspace process controlling the indoor setting */
+static u32 reg_is_indoor_portid;
 
 static const struct ieee80211_regdomain *get_cfg80211_regdom(void)
 {
@@ -554,6 +552,9 @@ reg_call_crda(struct regulatory_request *request)
 {
        if (call_crda(request->alpha2))
                return REG_REQ_IGNORE;
+
+       queue_delayed_work(system_power_efficient_wq,
+                          &reg_timeout, msecs_to_jiffies(3142));
        return REG_REQ_OK;
 }
 
@@ -1248,13 +1249,6 @@ static bool reg_request_cell_base(struct regulatory_request *request)
        return request->user_reg_hint_type == NL80211_USER_REG_HINT_CELL_BASE;
 }
 
-static bool reg_request_indoor(struct regulatory_request *request)
-{
-       if (request->initiator != NL80211_REGDOM_SET_BY_USER)
-               return false;
-       return request->user_reg_hint_type == NL80211_USER_REG_HINT_INDOOR;
-}
-
 bool reg_last_request_cell_base(void)
 {
        return reg_request_cell_base(get_last_request());
@@ -1800,8 +1794,7 @@ static void reg_set_request_processed(void)
                need_more_processing = true;
        spin_unlock(&reg_requests_lock);
 
-       if (lr->initiator == NL80211_REGDOM_SET_BY_USER)
-               cancel_delayed_work(&reg_timeout);
+       cancel_delayed_work(&reg_timeout);
 
        if (need_more_processing)
                schedule_work(&reg_work);
@@ -1833,11 +1826,6 @@ __reg_process_hint_user(struct regulatory_request *user_request)
 {
        struct regulatory_request *lr = get_last_request();
 
-       if (reg_request_indoor(user_request)) {
-               reg_is_indoor = true;
-               return REG_REQ_USER_HINT_HANDLED;
-       }
-
        if (reg_request_cell_base(user_request))
                return reg_ignore_cell_hint(user_request);
 
@@ -1885,8 +1873,7 @@ reg_process_hint_user(struct regulatory_request *user_request)
 
        treatment = __reg_process_hint_user(user_request);
        if (treatment == REG_REQ_IGNORE ||
-           treatment == REG_REQ_ALREADY_SET ||
-           treatment == REG_REQ_USER_HINT_HANDLED) {
+           treatment == REG_REQ_ALREADY_SET) {
                reg_free_request(user_request);
                return treatment;
        }
@@ -1947,7 +1934,6 @@ reg_process_hint_driver(struct wiphy *wiphy,
        case REG_REQ_OK:
                break;
        case REG_REQ_IGNORE:
-       case REG_REQ_USER_HINT_HANDLED:
                reg_free_request(driver_request);
                return treatment;
        case REG_REQ_INTERSECT:
@@ -2047,7 +2033,6 @@ reg_process_hint_country_ie(struct wiphy *wiphy,
        case REG_REQ_OK:
                break;
        case REG_REQ_IGNORE:
-       case REG_REQ_USER_HINT_HANDLED:
                /* fall through */
        case REG_REQ_ALREADY_SET:
                reg_free_request(country_ie_request);
@@ -2086,11 +2071,8 @@ static void reg_process_hint(struct regulatory_request *reg_request)
        case NL80211_REGDOM_SET_BY_USER:
                treatment = reg_process_hint_user(reg_request);
                if (treatment == REG_REQ_IGNORE ||
-                   treatment == REG_REQ_ALREADY_SET ||
-                   treatment == REG_REQ_USER_HINT_HANDLED)
+                   treatment == REG_REQ_ALREADY_SET)
                        return;
-               queue_delayed_work(system_power_efficient_wq,
-                                  &reg_timeout, msecs_to_jiffies(3142));
                return;
        case NL80211_REGDOM_SET_BY_DRIVER:
                if (!wiphy)
@@ -2177,6 +2159,13 @@ static void reg_process_pending_hints(void)
        }
 
        reg_process_hint(reg_request);
+
+       lr = get_last_request();
+
+       spin_lock(&reg_requests_lock);
+       if (!list_empty(&reg_requests_list) && lr && lr->processed)
+               schedule_work(&reg_work);
+       spin_unlock(&reg_requests_lock);
 }
 
 /* Processes beacon hints -- this has nothing to do with country IEs */
@@ -2309,22 +2298,50 @@ int regulatory_hint_user(const char *alpha2,
        return 0;
 }
 
-int regulatory_hint_indoor_user(void)
+int regulatory_hint_indoor(bool is_indoor, u32 portid)
 {
-       struct regulatory_request *request;
+       spin_lock(&reg_indoor_lock);
 
-       request = kzalloc(sizeof(struct regulatory_request), GFP_KERNEL);
-       if (!request)
-               return -ENOMEM;
+       /* It is possible that more than one user space process is trying to
+        * configure the indoor setting. To handle such cases, clear the indoor
+        * setting in case that some process does not think that the device
+        * is operating in an indoor environment. In addition, if a user space
+        * process indicates that it is controlling the indoor setting, save its
+        * portid, i.e., make it the owner.
+        */
+       reg_is_indoor = is_indoor;
+       if (reg_is_indoor) {
+               if (!reg_is_indoor_portid)
+                       reg_is_indoor_portid = portid;
+       } else {
+               reg_is_indoor_portid = 0;
+       }
 
-       request->wiphy_idx = WIPHY_IDX_INVALID;
-       request->initiator = NL80211_REGDOM_SET_BY_USER;
-       request->user_reg_hint_type = NL80211_USER_REG_HINT_INDOOR;
-       queue_regulatory_request(request);
+       spin_unlock(&reg_indoor_lock);
+
+       if (!is_indoor)
+               reg_check_channels();
 
        return 0;
 }
 
+void regulatory_netlink_notify(u32 portid)
+{
+       spin_lock(&reg_indoor_lock);
+
+       if (reg_is_indoor_portid != portid) {
+               spin_unlock(&reg_indoor_lock);
+               return;
+       }
+
+       reg_is_indoor = false;
+       reg_is_indoor_portid = 0;
+
+       spin_unlock(&reg_indoor_lock);
+
+       reg_check_channels();
+}
+
 /* Driver hints */
 int regulatory_hint(struct wiphy *wiphy, const char *alpha2)
 {
@@ -2486,13 +2503,22 @@ static void restore_regulatory_settings(bool reset_user)
        char alpha2[2];
        char world_alpha2[2];
        struct reg_beacon *reg_beacon, *btmp;
-       struct regulatory_request *reg_request, *tmp;
        LIST_HEAD(tmp_reg_req_list);
        struct cfg80211_registered_device *rdev;
 
        ASSERT_RTNL();
 
-       reg_is_indoor = false;
+       /*
+        * Clear the indoor setting in case that it is not controlled by user
+        * space, as otherwise there is no guarantee that the device is still
+        * operating in an indoor environment.
+        */
+       spin_lock(&reg_indoor_lock);
+       if (reg_is_indoor && !reg_is_indoor_portid) {
+               reg_is_indoor = false;
+               reg_check_channels();
+       }
+       spin_unlock(&reg_indoor_lock);
 
        reset_regdomains(true, &world_regdom);
        restore_alpha2(alpha2, reset_user);
@@ -2504,11 +2530,7 @@ static void restore_regulatory_settings(bool reset_user)
         * settings.
         */
        spin_lock(&reg_requests_lock);
-       list_for_each_entry_safe(reg_request, tmp, &reg_requests_list, list) {
-               if (reg_request->initiator != NL80211_REGDOM_SET_BY_USER)
-                       continue;
-               list_move_tail(&reg_request->list, &tmp_reg_req_list);
-       }
+       list_splice_tail_init(&reg_requests_list, &tmp_reg_req_list);
        spin_unlock(&reg_requests_lock);
 
        /* Clear beacon hints */
@@ -3089,6 +3111,7 @@ int __init regulatory_init(void)
 
        spin_lock_init(&reg_requests_lock);
        spin_lock_init(&reg_pending_beacons_lock);
+       spin_lock_init(&reg_indoor_lock);
 
        reg_regdb_size_check();
 
index 4b45d6e61d24ad71ca2fe0074922ecb1e4a3a1b3..a2c4e16459da06a3b6ebad974aaab665eda717f0 100644 (file)
@@ -25,7 +25,20 @@ enum nl80211_dfs_regions reg_get_dfs_region(struct wiphy *wiphy);
 
 int regulatory_hint_user(const char *alpha2,
                         enum nl80211_user_reg_hint_type user_reg_hint_type);
-int regulatory_hint_indoor_user(void);
+
+/**
+ * regulatory_hint_indoor - hint operation in indoor env. or not
+ * @is_indoor: if true indicates that user space thinks that the
+ * device is operating in an indoor environment.
+ * @portid: the netlink port ID on which the hint was given.
+ */
+int regulatory_hint_indoor(bool is_indoor, u32 portid);
+
+/**
+ * regulatory_netlink_notify - notify on released netlink socket
+ * @portid: the netlink socket port ID
+ */
+void regulatory_netlink_notify(u32 portid);
 
 void wiphy_regulatory_register(struct wiphy *wiphy);
 void wiphy_regulatory_deregister(struct wiphy *wiphy);
index c705c3e2b7510dd818bf0599db0a0ec887a43c2f..3a50aa2553bfd777cce2657eee38f0c3a7b34081 100644 (file)
@@ -531,24 +531,78 @@ static int cmp_bss(struct cfg80211_bss *a,
        }
 }
 
+static bool cfg80211_bss_type_match(u16 capability,
+                                   enum ieee80211_band band,
+                                   enum ieee80211_bss_type bss_type)
+{
+       bool ret = true;
+       u16 mask, val;
+
+       if (bss_type == IEEE80211_BSS_TYPE_ANY)
+               return ret;
+
+       if (band == IEEE80211_BAND_60GHZ) {
+               mask = WLAN_CAPABILITY_DMG_TYPE_MASK;
+               switch (bss_type) {
+               case IEEE80211_BSS_TYPE_ESS:
+                       val = WLAN_CAPABILITY_DMG_TYPE_AP;
+                       break;
+               case IEEE80211_BSS_TYPE_PBSS:
+                       val = WLAN_CAPABILITY_DMG_TYPE_PBSS;
+                       break;
+               case IEEE80211_BSS_TYPE_IBSS:
+                       val = WLAN_CAPABILITY_DMG_TYPE_IBSS;
+                       break;
+               default:
+                       return false;
+               }
+       } else {
+               mask = WLAN_CAPABILITY_ESS | WLAN_CAPABILITY_IBSS;
+               switch (bss_type) {
+               case IEEE80211_BSS_TYPE_ESS:
+                       val = WLAN_CAPABILITY_ESS;
+                       break;
+               case IEEE80211_BSS_TYPE_IBSS:
+                       val = WLAN_CAPABILITY_IBSS;
+                       break;
+               case IEEE80211_BSS_TYPE_MBSS:
+                       val = 0;
+                       break;
+               default:
+                       return false;
+               }
+       }
+
+       ret = ((capability & mask) == val);
+       return ret;
+}
+
 /* Returned bss is reference counted and must be cleaned up appropriately. */
 struct cfg80211_bss *cfg80211_get_bss(struct wiphy *wiphy,
                                      struct ieee80211_channel *channel,
                                      const u8 *bssid,
                                      const u8 *ssid, size_t ssid_len,
-                                     u16 capa_mask, u16 capa_val)
+                                     enum ieee80211_bss_type bss_type,
+                                     enum ieee80211_privacy privacy)
 {
        struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
        struct cfg80211_internal_bss *bss, *res = NULL;
        unsigned long now = jiffies;
+       int bss_privacy;
 
-       trace_cfg80211_get_bss(wiphy, channel, bssid, ssid, ssid_len, capa_mask,
-                              capa_val);
+       trace_cfg80211_get_bss(wiphy, channel, bssid, ssid, ssid_len, bss_type,
+                              privacy);
 
        spin_lock_bh(&rdev->bss_lock);
 
        list_for_each_entry(bss, &rdev->bss_list, list) {
-               if ((bss->pub.capability & capa_mask) != capa_val)
+               if (!cfg80211_bss_type_match(bss->pub.capability,
+                                            bss->pub.channel->band, bss_type))
+                       continue;
+
+               bss_privacy = (bss->pub.capability & WLAN_CAPABILITY_PRIVACY);
+               if ((privacy == IEEE80211_PRIVACY_ON && !bss_privacy) ||
+                   (privacy == IEEE80211_PRIVACY_OFF && bss_privacy))
                        continue;
                if (channel && bss->pub.channel != channel)
                        continue;
@@ -896,6 +950,7 @@ cfg80211_inform_bss_width(struct wiphy *wiphy,
        struct cfg80211_bss_ies *ies;
        struct ieee80211_channel *channel;
        struct cfg80211_internal_bss tmp = {}, *res;
+       int bss_type;
        bool signal_valid;
 
        if (WARN_ON(!wiphy))
@@ -950,8 +1005,15 @@ cfg80211_inform_bss_width(struct wiphy *wiphy,
        if (!res)
                return NULL;
 
-       if (res->pub.capability & WLAN_CAPABILITY_ESS)
-               regulatory_hint_found_beacon(wiphy, channel, gfp);
+       if (channel->band == IEEE80211_BAND_60GHZ) {
+               bss_type = res->pub.capability & WLAN_CAPABILITY_DMG_TYPE_MASK;
+               if (bss_type == WLAN_CAPABILITY_DMG_TYPE_AP ||
+                   bss_type == WLAN_CAPABILITY_DMG_TYPE_PBSS)
+                       regulatory_hint_found_beacon(wiphy, channel, gfp);
+       } else {
+               if (res->pub.capability & WLAN_CAPABILITY_ESS)
+                       regulatory_hint_found_beacon(wiphy, channel, gfp);
+       }
 
        trace_cfg80211_return_bss(&res->pub);
        /* cfg80211_bss_update gives us a referenced result */
@@ -973,6 +1035,7 @@ cfg80211_inform_bss_width_frame(struct wiphy *wiphy,
        bool signal_valid;
        size_t ielen = len - offsetof(struct ieee80211_mgmt,
                                      u.probe_resp.variable);
+       int bss_type;
 
        BUILD_BUG_ON(offsetof(struct ieee80211_mgmt, u.probe_resp.variable) !=
                        offsetof(struct ieee80211_mgmt, u.beacon.variable));
@@ -1025,8 +1088,15 @@ cfg80211_inform_bss_width_frame(struct wiphy *wiphy,
        if (!res)
                return NULL;
 
-       if (res->pub.capability & WLAN_CAPABILITY_ESS)
-               regulatory_hint_found_beacon(wiphy, channel, gfp);
+       if (channel->band == IEEE80211_BAND_60GHZ) {
+               bss_type = res->pub.capability & WLAN_CAPABILITY_DMG_TYPE_MASK;
+               if (bss_type == WLAN_CAPABILITY_DMG_TYPE_AP ||
+                   bss_type == WLAN_CAPABILITY_DMG_TYPE_PBSS)
+                       regulatory_hint_found_beacon(wiphy, channel, gfp);
+       } else {
+               if (res->pub.capability & WLAN_CAPABILITY_ESS)
+                       regulatory_hint_found_beacon(wiphy, channel, gfp);
+       }
 
        trace_cfg80211_return_bss(&res->pub);
        /* cfg80211_bss_update gives us a referenced result */
@@ -1237,17 +1307,17 @@ int cfg80211_wext_siwscan(struct net_device *dev,
        kfree(creq);
        return err;
 }
-EXPORT_SYMBOL_GPL(cfg80211_wext_siwscan);
+EXPORT_WEXT_HANDLER(cfg80211_wext_siwscan);
 
-static void ieee80211_scan_add_ies(struct iw_request_info *info,
-                                  const struct cfg80211_bss_ies *ies,
-                                  char **current_ev, char *end_buf)
+static char *ieee80211_scan_add_ies(struct iw_request_info *info,
+                                   const struct cfg80211_bss_ies *ies,
+                                   char *current_ev, char *end_buf)
 {
        const u8 *pos, *end, *next;
        struct iw_event iwe;
 
        if (!ies)
-               return;
+               return current_ev;
 
        /*
         * If needed, fragment the IEs buffer (at IE boundaries) into short
@@ -1264,10 +1334,11 @@ static void ieee80211_scan_add_ies(struct iw_request_info *info,
                memset(&iwe, 0, sizeof(iwe));
                iwe.cmd = IWEVGENIE;
                iwe.u.data.length = next - pos;
-               *current_ev = iwe_stream_add_point(info, *current_ev,
-                                                  end_buf, &iwe,
-                                                  (void *)pos);
-
+               current_ev = iwe_stream_add_point_check(info, current_ev,
+                                                       end_buf, &iwe,
+                                                       (void *)pos);
+               if (IS_ERR(current_ev))
+                       return current_ev;
                pos = next;
        }
 
@@ -1275,10 +1346,14 @@ static void ieee80211_scan_add_ies(struct iw_request_info *info,
                memset(&iwe, 0, sizeof(iwe));
                iwe.cmd = IWEVGENIE;
                iwe.u.data.length = end - pos;
-               *current_ev = iwe_stream_add_point(info, *current_ev,
-                                                  end_buf, &iwe,
-                                                  (void *)pos);
+               current_ev = iwe_stream_add_point_check(info, current_ev,
+                                                       end_buf, &iwe,
+                                                       (void *)pos);
+               if (IS_ERR(current_ev))
+                       return current_ev;
        }
+
+       return current_ev;
 }
 
 static char *
@@ -1289,7 +1364,8 @@ ieee80211_bss(struct wiphy *wiphy, struct iw_request_info *info,
        const struct cfg80211_bss_ies *ies;
        struct iw_event iwe;
        const u8 *ie;
-       u8 *buf, *cfg, *p;
+       u8 buf[50];
+       u8 *cfg, *p, *tmp;
        int rem, i, sig;
        bool ismesh = false;
 
@@ -1297,22 +1373,28 @@ ieee80211_bss(struct wiphy *wiphy, struct iw_request_info *info,
        iwe.cmd = SIOCGIWAP;
        iwe.u.ap_addr.sa_family = ARPHRD_ETHER;
        memcpy(iwe.u.ap_addr.sa_data, bss->pub.bssid, ETH_ALEN);
-       current_ev = iwe_stream_add_event(info, current_ev, end_buf, &iwe,
-                                         IW_EV_ADDR_LEN);
+       current_ev = iwe_stream_add_event_check(info, current_ev, end_buf, &iwe,
+                                               IW_EV_ADDR_LEN);
+       if (IS_ERR(current_ev))
+               return current_ev;
 
        memset(&iwe, 0, sizeof(iwe));
        iwe.cmd = SIOCGIWFREQ;
        iwe.u.freq.m = ieee80211_frequency_to_channel(bss->pub.channel->center_freq);
        iwe.u.freq.e = 0;
-       current_ev = iwe_stream_add_event(info, current_ev, end_buf, &iwe,
-                                         IW_EV_FREQ_LEN);
+       current_ev = iwe_stream_add_event_check(info, current_ev, end_buf, &iwe,
+                                               IW_EV_FREQ_LEN);
+       if (IS_ERR(current_ev))
+               return current_ev;
 
        memset(&iwe, 0, sizeof(iwe));
        iwe.cmd = SIOCGIWFREQ;
        iwe.u.freq.m = bss->pub.channel->center_freq;
        iwe.u.freq.e = 6;
-       current_ev = iwe_stream_add_event(info, current_ev, end_buf, &iwe,
-                                         IW_EV_FREQ_LEN);
+       current_ev = iwe_stream_add_event_check(info, current_ev, end_buf, &iwe,
+                                               IW_EV_FREQ_LEN);
+       if (IS_ERR(current_ev))
+               return current_ev;
 
        if (wiphy->signal_type != CFG80211_SIGNAL_TYPE_NONE) {
                memset(&iwe, 0, sizeof(iwe));
@@ -1341,8 +1423,11 @@ ieee80211_bss(struct wiphy *wiphy, struct iw_request_info *info,
                        /* not reached */
                        break;
                }
-               current_ev = iwe_stream_add_event(info, current_ev, end_buf,
-                                                 &iwe, IW_EV_QUAL_LEN);
+               current_ev = iwe_stream_add_event_check(info, current_ev,
+                                                       end_buf, &iwe,
+                                                       IW_EV_QUAL_LEN);
+               if (IS_ERR(current_ev))
+                       return current_ev;
        }
 
        memset(&iwe, 0, sizeof(iwe));
@@ -1352,8 +1437,10 @@ ieee80211_bss(struct wiphy *wiphy, struct iw_request_info *info,
        else
                iwe.u.data.flags = IW_ENCODE_DISABLED;
        iwe.u.data.length = 0;
-       current_ev = iwe_stream_add_point(info, current_ev, end_buf,
-                                         &iwe, "");
+       current_ev = iwe_stream_add_point_check(info, current_ev, end_buf,
+                                               &iwe, "");
+       if (IS_ERR(current_ev))
+               return current_ev;
 
        rcu_read_lock();
        ies = rcu_dereference(bss->pub.ies);
@@ -1371,66 +1458,91 @@ ieee80211_bss(struct wiphy *wiphy, struct iw_request_info *info,
                        iwe.cmd = SIOCGIWESSID;
                        iwe.u.data.length = ie[1];
                        iwe.u.data.flags = 1;
-                       current_ev = iwe_stream_add_point(info, current_ev, end_buf,
-                                                         &iwe, (u8 *)ie + 2);
+                       current_ev = iwe_stream_add_point_check(info,
+                                                               current_ev,
+                                                               end_buf, &iwe,
+                                                               (u8 *)ie + 2);
+                       if (IS_ERR(current_ev))
+                               goto unlock;
                        break;
                case WLAN_EID_MESH_ID:
                        memset(&iwe, 0, sizeof(iwe));
                        iwe.cmd = SIOCGIWESSID;
                        iwe.u.data.length = ie[1];
                        iwe.u.data.flags = 1;
-                       current_ev = iwe_stream_add_point(info, current_ev, end_buf,
-                                                         &iwe, (u8 *)ie + 2);
+                       current_ev = iwe_stream_add_point_check(info,
+                                                               current_ev,
+                                                               end_buf, &iwe,
+                                                               (u8 *)ie + 2);
+                       if (IS_ERR(current_ev))
+                               goto unlock;
                        break;
                case WLAN_EID_MESH_CONFIG:
                        ismesh = true;
                        if (ie[1] != sizeof(struct ieee80211_meshconf_ie))
                                break;
-                       buf = kmalloc(50, GFP_ATOMIC);
-                       if (!buf)
-                               break;
                        cfg = (u8 *)ie + 2;
                        memset(&iwe, 0, sizeof(iwe));
                        iwe.cmd = IWEVCUSTOM;
                        sprintf(buf, "Mesh Network Path Selection Protocol ID: "
                                "0x%02X", cfg[0]);
                        iwe.u.data.length = strlen(buf);
-                       current_ev = iwe_stream_add_point(info, current_ev,
-                                                         end_buf,
-                                                         &iwe, buf);
+                       current_ev = iwe_stream_add_point_check(info,
+                                                               current_ev,
+                                                               end_buf,
+                                                               &iwe, buf);
+                       if (IS_ERR(current_ev))
+                               goto unlock;
                        sprintf(buf, "Path Selection Metric ID: 0x%02X",
                                cfg[1]);
                        iwe.u.data.length = strlen(buf);
-                       current_ev = iwe_stream_add_point(info, current_ev,
-                                                         end_buf,
-                                                         &iwe, buf);
+                       current_ev = iwe_stream_add_point_check(info,
+                                                               current_ev,
+                                                               end_buf,
+                                                               &iwe, buf);
+                       if (IS_ERR(current_ev))
+                               goto unlock;
                        sprintf(buf, "Congestion Control Mode ID: 0x%02X",
                                cfg[2]);
                        iwe.u.data.length = strlen(buf);
-                       current_ev = iwe_stream_add_point(info, current_ev,
-                                                         end_buf,
-                                                         &iwe, buf);
+                       current_ev = iwe_stream_add_point_check(info,
+                                                               current_ev,
+                                                               end_buf,
+                                                               &iwe, buf);
+                       if (IS_ERR(current_ev))
+                               goto unlock;
                        sprintf(buf, "Synchronization ID: 0x%02X", cfg[3]);
                        iwe.u.data.length = strlen(buf);
-                       current_ev = iwe_stream_add_point(info, current_ev,
-                                                         end_buf,
-                                                         &iwe, buf);
+                       current_ev = iwe_stream_add_point_check(info,
+                                                               current_ev,
+                                                               end_buf,
+                                                               &iwe, buf);
+                       if (IS_ERR(current_ev))
+                               goto unlock;
                        sprintf(buf, "Authentication ID: 0x%02X", cfg[4]);
                        iwe.u.data.length = strlen(buf);
-                       current_ev = iwe_stream_add_point(info, current_ev,
-                                                         end_buf,
-                                                         &iwe, buf);
+                       current_ev = iwe_stream_add_point_check(info,
+                                                               current_ev,
+                                                               end_buf,
+                                                               &iwe, buf);
+                       if (IS_ERR(current_ev))
+                               goto unlock;
                        sprintf(buf, "Formation Info: 0x%02X", cfg[5]);
                        iwe.u.data.length = strlen(buf);
-                       current_ev = iwe_stream_add_point(info, current_ev,
-                                                         end_buf,
-                                                         &iwe, buf);
+                       current_ev = iwe_stream_add_point_check(info,
+                                                               current_ev,
+                                                               end_buf,
+                                                               &iwe, buf);
+                       if (IS_ERR(current_ev))
+                               goto unlock;
                        sprintf(buf, "Capabilities: 0x%02X", cfg[6]);
                        iwe.u.data.length = strlen(buf);
-                       current_ev = iwe_stream_add_point(info, current_ev,
-                                                         end_buf,
-                                                         &iwe, buf);
-                       kfree(buf);
+                       current_ev = iwe_stream_add_point_check(info,
+                                                               current_ev,
+                                                               end_buf,
+                                                               &iwe, buf);
+                       if (IS_ERR(current_ev))
+                               goto unlock;
                        break;
                case WLAN_EID_SUPP_RATES:
                case WLAN_EID_EXT_SUPP_RATES:
@@ -1445,8 +1557,14 @@ ieee80211_bss(struct wiphy *wiphy, struct iw_request_info *info,
                        for (i = 0; i < ie[1]; i++) {
                                iwe.u.bitrate.value =
                                        ((ie[i + 2] & 0x7f) * 500000);
+                               tmp = p;
                                p = iwe_stream_add_value(info, current_ev, p,
-                                               end_buf, &iwe, IW_EV_PARAM_LEN);
+                                                        end_buf, &iwe,
+                                                        IW_EV_PARAM_LEN);
+                               if (p == tmp) {
+                                       current_ev = ERR_PTR(-E2BIG);
+                                       goto unlock;
+                               }
                        }
                        current_ev = p;
                        break;
@@ -1465,31 +1583,35 @@ ieee80211_bss(struct wiphy *wiphy, struct iw_request_info *info,
                        iwe.u.mode = IW_MODE_MASTER;
                else
                        iwe.u.mode = IW_MODE_ADHOC;
-               current_ev = iwe_stream_add_event(info, current_ev, end_buf,
-                                                 &iwe, IW_EV_UINT_LEN);
-       }
-
-       buf = kmalloc(31, GFP_ATOMIC);
-       if (buf) {
-               memset(&iwe, 0, sizeof(iwe));
-               iwe.cmd = IWEVCUSTOM;
-               sprintf(buf, "tsf=%016llx", (unsigned long long)(ies->tsf));
-               iwe.u.data.length = strlen(buf);
-               current_ev = iwe_stream_add_point(info, current_ev, end_buf,
-                                                 &iwe, buf);
-               memset(&iwe, 0, sizeof(iwe));
-               iwe.cmd = IWEVCUSTOM;
-               sprintf(buf, " Last beacon: %ums ago",
-                       elapsed_jiffies_msecs(bss->ts));
-               iwe.u.data.length = strlen(buf);
-               current_ev = iwe_stream_add_point(info, current_ev,
-                                                 end_buf, &iwe, buf);
-               kfree(buf);
+               current_ev = iwe_stream_add_event_check(info, current_ev,
+                                                       end_buf, &iwe,
+                                                       IW_EV_UINT_LEN);
+               if (IS_ERR(current_ev))
+                       goto unlock;
        }
 
-       ieee80211_scan_add_ies(info, ies, &current_ev, end_buf);
+       memset(&iwe, 0, sizeof(iwe));
+       iwe.cmd = IWEVCUSTOM;
+       sprintf(buf, "tsf=%016llx", (unsigned long long)(ies->tsf));
+       iwe.u.data.length = strlen(buf);
+       current_ev = iwe_stream_add_point_check(info, current_ev, end_buf,
+                                               &iwe, buf);
+       if (IS_ERR(current_ev))
+               goto unlock;
+       memset(&iwe, 0, sizeof(iwe));
+       iwe.cmd = IWEVCUSTOM;
+       sprintf(buf, " Last beacon: %ums ago",
+               elapsed_jiffies_msecs(bss->ts));
+       iwe.u.data.length = strlen(buf);
+       current_ev = iwe_stream_add_point_check(info, current_ev,
+                                               end_buf, &iwe, buf);
+       if (IS_ERR(current_ev))
+               goto unlock;
+
+       current_ev = ieee80211_scan_add_ies(info, ies, current_ev, end_buf);
+
+ unlock:
        rcu_read_unlock();
-
        return current_ev;
 }
 
@@ -1501,19 +1623,27 @@ static int ieee80211_scan_results(struct cfg80211_registered_device *rdev,
        char *current_ev = buf;
        char *end_buf = buf + len;
        struct cfg80211_internal_bss *bss;
+       int err = 0;
 
        spin_lock_bh(&rdev->bss_lock);
        cfg80211_bss_expire(rdev);
 
        list_for_each_entry(bss, &rdev->bss_list, list) {
                if (buf + len - current_ev <= IW_EV_ADDR_LEN) {
-                       spin_unlock_bh(&rdev->bss_lock);
-                       return -E2BIG;
+                       err = -E2BIG;
+                       break;
                }
                current_ev = ieee80211_bss(&rdev->wiphy, info, bss,
                                           current_ev, end_buf);
+               if (IS_ERR(current_ev)) {
+                       err = PTR_ERR(current_ev);
+                       break;
+               }
        }
        spin_unlock_bh(&rdev->bss_lock);
+
+       if (err)
+               return err;
        return current_ev - buf;
 }
 
@@ -1545,5 +1675,5 @@ int cfg80211_wext_giwscan(struct net_device *dev,
 
        return res;
 }
-EXPORT_SYMBOL_GPL(cfg80211_wext_giwscan);
+EXPORT_WEXT_HANDLER(cfg80211_wext_giwscan);
 #endif
index 0ab3711c79a01ae98cdaaafa64bf12ac94e80271..ea1da6621ff051028970f50154af56d6f2e01b46 100644 (file)
@@ -257,19 +257,15 @@ static struct cfg80211_bss *cfg80211_get_conn_bss(struct wireless_dev *wdev)
 {
        struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
        struct cfg80211_bss *bss;
-       u16 capa = WLAN_CAPABILITY_ESS;
 
        ASSERT_WDEV_LOCK(wdev);
 
-       if (wdev->conn->params.privacy)
-               capa |= WLAN_CAPABILITY_PRIVACY;
-
        bss = cfg80211_get_bss(wdev->wiphy, wdev->conn->params.channel,
                               wdev->conn->params.bssid,
                               wdev->conn->params.ssid,
                               wdev->conn->params.ssid_len,
-                              WLAN_CAPABILITY_ESS | WLAN_CAPABILITY_PRIVACY,
-                              capa);
+                              IEEE80211_BSS_TYPE_ESS,
+                              IEEE80211_PRIVACY(wdev->conn->params.privacy));
        if (!bss)
                return NULL;
 
@@ -637,8 +633,8 @@ void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
                WARN_ON_ONCE(!wiphy_to_rdev(wdev->wiphy)->ops->connect);
                bss = cfg80211_get_bss(wdev->wiphy, NULL, bssid,
                                       wdev->ssid, wdev->ssid_len,
-                                      WLAN_CAPABILITY_ESS,
-                                      WLAN_CAPABILITY_ESS);
+                                      IEEE80211_BSS_TYPE_ESS,
+                                      IEEE80211_PRIVACY_ANY);
                if (bss)
                        cfg80211_hold_bss(bss_from_pub(bss));
        }
@@ -795,8 +791,8 @@ void cfg80211_roamed(struct net_device *dev,
        struct cfg80211_bss *bss;
 
        bss = cfg80211_get_bss(wdev->wiphy, channel, bssid, wdev->ssid,
-                              wdev->ssid_len, WLAN_CAPABILITY_ESS,
-                              WLAN_CAPABILITY_ESS);
+                              wdev->ssid_len,
+                              IEEE80211_BSS_TYPE_ESS, IEEE80211_PRIVACY_ANY);
        if (WARN_ON(!bss))
                return;
 
index b17b3692f8c239d918a072d2fab9031eb91073b4..af3617c9879e33f8be5c325ffdc3b5558e60b217 100644 (file)
@@ -7,6 +7,7 @@
 #include <linux/tracepoint.h>
 
 #include <linux/rtnetlink.h>
+#include <linux/etherdevice.h>
 #include <net/cfg80211.h>
 #include "core.h"
 
@@ -15,7 +16,7 @@
        if (given_mac)                                               \
                memcpy(__entry->entry_mac, given_mac, ETH_ALEN);     \
        else                                                         \
-               memset(__entry->entry_mac, 0, ETH_ALEN);             \
+               eth_zero_addr(__entry->entry_mac);                   \
        } while (0)
 #define MAC_PR_FMT "%pM"
 #define MAC_PR_ARG(entry_mac) (__entry->entry_mac)
@@ -627,6 +628,7 @@ DECLARE_EVENT_CLASS(station_add_change,
                __field(u8, plink_state)
                __field(u8, uapsd_queues)
                __array(u8, ht_capa, (int)sizeof(struct ieee80211_ht_cap))
+               __array(char, vlan, IFNAMSIZ)
        ),
        TP_fast_assign(
                WIPHY_ASSIGN;
@@ -644,16 +646,19 @@ DECLARE_EVENT_CLASS(station_add_change,
                if (params->ht_capa)
                        memcpy(__entry->ht_capa, params->ht_capa,
                               sizeof(struct ieee80211_ht_cap));
+               memset(__entry->vlan, 0, sizeof(__entry->vlan));
+               if (params->vlan)
+                       memcpy(__entry->vlan, params->vlan->name, IFNAMSIZ);
        ),
        TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", station mac: " MAC_PR_FMT
                  ", station flags mask: %u, station flags set: %u, "
                  "station modify mask: %u, listen interval: %d, aid: %u, "
-                 "plink action: %u, plink state: %u, uapsd queues: %u",
+                 "plink action: %u, plink state: %u, uapsd queues: %u, vlan:%s",
                  WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(sta_mac),
                  __entry->sta_flags_mask, __entry->sta_flags_set,
                  __entry->sta_modify_mask, __entry->listen_interval,
                  __entry->aid, __entry->plink_action, __entry->plink_state,
-                 __entry->uapsd_queues)
+                 __entry->uapsd_queues, __entry->vlan)
 );
 
 DEFINE_EVENT(station_add_change, rdev_add_station,
@@ -1077,7 +1082,7 @@ TRACE_EVENT(rdev_auth,
                if (req->bss)
                        MAC_ASSIGN(bssid, req->bss->bssid);
                else
-                       memset(__entry->bssid, 0, ETH_ALEN);
+                       eth_zero_addr(__entry->bssid);
                __entry->auth_type = req->auth_type;
        ),
        TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", auth type: %d, bssid: " MAC_PR_FMT,
@@ -1103,7 +1108,7 @@ TRACE_EVENT(rdev_assoc,
                if (req->bss)
                        MAC_ASSIGN(bssid, req->bss->bssid);
                else
-                       memset(__entry->bssid, 0, ETH_ALEN);
+                       eth_zero_addr(__entry->bssid);
                MAC_ASSIGN(prev_bssid, req->prev_bssid);
                __entry->use_mfp = req->use_mfp;
                __entry->flags = req->flags;
@@ -1153,7 +1158,7 @@ TRACE_EVENT(rdev_disassoc,
                if (req->bss)
                        MAC_ASSIGN(bssid, req->bss->bssid);
                else
-                       memset(__entry->bssid, 0, ETH_ALEN);
+                       eth_zero_addr(__entry->bssid);
                __entry->reason_code = req->reason_code;
                __entry->local_state_change = req->local_state_change;
        ),
@@ -2636,28 +2641,30 @@ DEFINE_EVENT(wiphy_only_evt, cfg80211_sched_scan_stopped,
 TRACE_EVENT(cfg80211_get_bss,
        TP_PROTO(struct wiphy *wiphy, struct ieee80211_channel *channel,
                 const u8 *bssid, const u8 *ssid, size_t ssid_len,
-                u16 capa_mask, u16 capa_val),
-       TP_ARGS(wiphy, channel, bssid, ssid, ssid_len, capa_mask, capa_val),
+                enum ieee80211_bss_type bss_type,
+                enum ieee80211_privacy privacy),
+       TP_ARGS(wiphy, channel, bssid, ssid, ssid_len, bss_type, privacy),
        TP_STRUCT__entry(
                WIPHY_ENTRY
                CHAN_ENTRY
                MAC_ENTRY(bssid)
                __dynamic_array(u8, ssid, ssid_len)
-               __field(u16, capa_mask)
-               __field(u16, capa_val)
+               __field(enum ieee80211_bss_type, bss_type)
+               __field(enum ieee80211_privacy, privacy)
        ),
        TP_fast_assign(
                WIPHY_ASSIGN;
                CHAN_ASSIGN(channel);
                MAC_ASSIGN(bssid, bssid);
                memcpy(__get_dynamic_array(ssid), ssid, ssid_len);
-               __entry->capa_mask = capa_mask;
-               __entry->capa_val = capa_val;
-       ),
-       TP_printk(WIPHY_PR_FMT ", " CHAN_PR_FMT ", " MAC_PR_FMT ", buf: %#.2x, "
-                 "capa_mask: %d, capa_val: %u", WIPHY_PR_ARG, CHAN_PR_ARG,
-                 MAC_PR_ARG(bssid), ((u8 *)__get_dynamic_array(ssid))[0],
-                 __entry->capa_mask, __entry->capa_val)
+               __entry->bss_type = bss_type;
+               __entry->privacy = privacy;
+       ),
+       TP_printk(WIPHY_PR_FMT ", " CHAN_PR_FMT ", " MAC_PR_FMT
+                 ", buf: %#.2x, bss_type: %d, privacy: %d",
+                 WIPHY_PR_ARG, CHAN_PR_ARG, MAC_PR_ARG(bssid),
+                 ((u8 *)__get_dynamic_array(ssid))[0], __entry->bss_type,
+                 __entry->privacy)
 );
 
 TRACE_EVENT(cfg80211_inform_bss_width_frame,
index 6903dbdcb8c1f03bcef684ad1074e3dea9f18e18..f218b151530a915c106376bde1643700750e5007 100644 (file)
@@ -1296,6 +1296,7 @@ bool ieee80211_operating_class_to_band(u8 operating_class,
        switch (operating_class) {
        case 112:
        case 115 ... 127:
+       case 128 ... 130:
                *band = IEEE80211_BAND_5GHZ;
                return true;
        case 81:
@@ -1313,6 +1314,135 @@ bool ieee80211_operating_class_to_band(u8 operating_class,
 }
 EXPORT_SYMBOL(ieee80211_operating_class_to_band);
 
+bool ieee80211_chandef_to_operating_class(struct cfg80211_chan_def *chandef,
+                                         u8 *op_class)
+{
+       u8 vht_opclass;
+       u16 freq = chandef->center_freq1;
+
+       if (freq >= 2412 && freq <= 2472) {
+               if (chandef->width > NL80211_CHAN_WIDTH_40)
+                       return false;
+
+               /* 2.407 GHz, channels 1..13 */
+               if (chandef->width == NL80211_CHAN_WIDTH_40) {
+                       if (freq > chandef->chan->center_freq)
+                               *op_class = 83; /* HT40+ */
+                       else
+                               *op_class = 84; /* HT40- */
+               } else {
+                       *op_class = 81;
+               }
+
+               return true;
+       }
+
+       if (freq == 2484) {
+               if (chandef->width > NL80211_CHAN_WIDTH_40)
+                       return false;
+
+               *op_class = 82; /* channel 14 */
+               return true;
+       }
+
+       switch (chandef->width) {
+       case NL80211_CHAN_WIDTH_80:
+               vht_opclass = 128;
+               break;
+       case NL80211_CHAN_WIDTH_160:
+               vht_opclass = 129;
+               break;
+       case NL80211_CHAN_WIDTH_80P80:
+               vht_opclass = 130;
+               break;
+       case NL80211_CHAN_WIDTH_10:
+       case NL80211_CHAN_WIDTH_5:
+               return false; /* unsupported for now */
+       default:
+               vht_opclass = 0;
+               break;
+       }
+
+       /* 5 GHz, channels 36..48 */
+       if (freq >= 5180 && freq <= 5240) {
+               if (vht_opclass) {
+                       *op_class = vht_opclass;
+               } else if (chandef->width == NL80211_CHAN_WIDTH_40) {
+                       if (freq > chandef->chan->center_freq)
+                               *op_class = 116;
+                       else
+                               *op_class = 117;
+               } else {
+                       *op_class = 115;
+               }
+
+               return true;
+       }
+
+       /* 5 GHz, channels 52..64 */
+       if (freq >= 5260 && freq <= 5320) {
+               if (vht_opclass) {
+                       *op_class = vht_opclass;
+               } else if (chandef->width == NL80211_CHAN_WIDTH_40) {
+                       if (freq > chandef->chan->center_freq)
+                               *op_class = 119;
+                       else
+                               *op_class = 120;
+               } else {
+                       *op_class = 118;
+               }
+
+               return true;
+       }
+
+       /* 5 GHz, channels 100..144 */
+       if (freq >= 5500 && freq <= 5720) {
+               if (vht_opclass) {
+                       *op_class = vht_opclass;
+               } else if (chandef->width == NL80211_CHAN_WIDTH_40) {
+                       if (freq > chandef->chan->center_freq)
+                               *op_class = 122;
+                       else
+                               *op_class = 123;
+               } else {
+                       *op_class = 121;
+               }
+
+               return true;
+       }
+
+       /* 5 GHz, channels 149..169 */
+       if (freq >= 5745 && freq <= 5845) {
+               if (vht_opclass) {
+                       *op_class = vht_opclass;
+               } else if (chandef->width == NL80211_CHAN_WIDTH_40) {
+                       if (freq > chandef->chan->center_freq)
+                               *op_class = 126;
+                       else
+                               *op_class = 127;
+               } else if (freq <= 5805) {
+                       *op_class = 124;
+               } else {
+                       *op_class = 125;
+               }
+
+               return true;
+       }
+
+       /* 56.16 GHz, channel 1..4 */
+       if (freq >= 56160 + 2160 * 1 && freq <= 56160 + 2160 * 4) {
+               if (chandef->width >= NL80211_CHAN_WIDTH_40)
+                       return false;
+
+               *op_class = 180;
+               return true;
+       }
+
+       /* not supported yet */
+       return false;
+}
+EXPORT_SYMBOL(ieee80211_chandef_to_operating_class);
+
 int cfg80211_validate_beacon_int(struct cfg80211_registered_device *rdev,
                                 u32 beacon_int)
 {
index 5b24d39d7903cf40fb0ac53c6bc9ba739bd2d6b5..fff1bef6ed6d916f9019a63d708652f4ab07cddf 100644 (file)
@@ -63,7 +63,7 @@ int cfg80211_wext_giwname(struct net_device *dev,
 
        return 0;
 }
-EXPORT_SYMBOL_GPL(cfg80211_wext_giwname);
+EXPORT_WEXT_HANDLER(cfg80211_wext_giwname);
 
 int cfg80211_wext_siwmode(struct net_device *dev, struct iw_request_info *info,
                          u32 *mode, char *extra)
@@ -99,7 +99,7 @@ int cfg80211_wext_siwmode(struct net_device *dev, struct iw_request_info *info,
 
        return cfg80211_change_iface(rdev, dev, type, NULL, &vifparams);
 }
-EXPORT_SYMBOL_GPL(cfg80211_wext_siwmode);
+EXPORT_WEXT_HANDLER(cfg80211_wext_siwmode);
 
 int cfg80211_wext_giwmode(struct net_device *dev, struct iw_request_info *info,
                          u32 *mode, char *extra)
@@ -134,7 +134,7 @@ int cfg80211_wext_giwmode(struct net_device *dev, struct iw_request_info *info,
        }
        return 0;
 }
-EXPORT_SYMBOL_GPL(cfg80211_wext_giwmode);
+EXPORT_WEXT_HANDLER(cfg80211_wext_giwmode);
 
 
 int cfg80211_wext_giwrange(struct net_device *dev,
@@ -248,7 +248,7 @@ int cfg80211_wext_giwrange(struct net_device *dev,
 
        return 0;
 }
-EXPORT_SYMBOL_GPL(cfg80211_wext_giwrange);
+EXPORT_WEXT_HANDLER(cfg80211_wext_giwrange);
 
 
 /**
@@ -303,7 +303,7 @@ int cfg80211_wext_siwrts(struct net_device *dev,
 
        return err;
 }
-EXPORT_SYMBOL_GPL(cfg80211_wext_siwrts);
+EXPORT_WEXT_HANDLER(cfg80211_wext_siwrts);
 
 int cfg80211_wext_giwrts(struct net_device *dev,
                         struct iw_request_info *info,
@@ -317,7 +317,7 @@ int cfg80211_wext_giwrts(struct net_device *dev,
 
        return 0;
 }
-EXPORT_SYMBOL_GPL(cfg80211_wext_giwrts);
+EXPORT_WEXT_HANDLER(cfg80211_wext_giwrts);
 
 int cfg80211_wext_siwfrag(struct net_device *dev,
                          struct iw_request_info *info,
@@ -343,7 +343,7 @@ int cfg80211_wext_siwfrag(struct net_device *dev,
 
        return err;
 }
-EXPORT_SYMBOL_GPL(cfg80211_wext_siwfrag);
+EXPORT_WEXT_HANDLER(cfg80211_wext_siwfrag);
 
 int cfg80211_wext_giwfrag(struct net_device *dev,
                          struct iw_request_info *info,
@@ -357,7 +357,7 @@ int cfg80211_wext_giwfrag(struct net_device *dev,
 
        return 0;
 }
-EXPORT_SYMBOL_GPL(cfg80211_wext_giwfrag);
+EXPORT_WEXT_HANDLER(cfg80211_wext_giwfrag);
 
 static int cfg80211_wext_siwretry(struct net_device *dev,
                                  struct iw_request_info *info,
@@ -427,7 +427,7 @@ int cfg80211_wext_giwretry(struct net_device *dev,
 
        return 0;
 }
-EXPORT_SYMBOL_GPL(cfg80211_wext_giwretry);
+EXPORT_WEXT_HANDLER(cfg80211_wext_giwretry);
 
 static int __cfg80211_set_encryption(struct cfg80211_registered_device *rdev,
                                     struct net_device *dev, bool pairwise,
index ebcacca2f731941123efb22c8067a6c34aaa9ea1..94c7405a541347c4a0130cf45b7770ecff87e829 100644 (file)
@@ -4,6 +4,12 @@
 #include <net/iw_handler.h>
 #include <linux/wireless.h>
 
+#ifdef CONFIG_CFG80211_WEXT_EXPORT
+#define EXPORT_WEXT_HANDLER(h) EXPORT_SYMBOL_GPL(h)
+#else
+#define EXPORT_WEXT_HANDLER(h)
+#endif /* CONFIG_CFG80211_WEXT_EXPORT */
+
 int cfg80211_ibss_wext_siwfreq(struct net_device *dev,
                               struct iw_request_info *info,
                               struct iw_freq *freq, char *extra);
index 368611c0573997e45f433c68a0a2c0e9de17270b..a4e8af3321d2ba5e07cd316b3ea8b65d1aecb204 100644 (file)
@@ -322,7 +322,7 @@ int cfg80211_mgd_wext_giwap(struct net_device *dev,
        if (wdev->current_bss)
                memcpy(ap_addr->sa_data, wdev->current_bss->pub.bssid, ETH_ALEN);
        else
-               memset(ap_addr->sa_data, 0, ETH_ALEN);
+               eth_zero_addr(ap_addr->sa_data);
        wdev_unlock(wdev);
 
        return 0;
index d9149b68b9bc5c1d100d654d256048b30e268cdb..c3ab230e4493fbb0d63b50c3befe1d15aeda802e 100644 (file)
@@ -1077,8 +1077,7 @@ out_clear_request:
        goto out;
 }
 
-static int x25_sendmsg(struct kiocb *iocb, struct socket *sock,
-                      struct msghdr *msg, size_t len)
+static int x25_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
 {
        struct sock *sk = sock->sk;
        struct x25_sock *x25 = x25_sk(sk);
@@ -1252,8 +1251,7 @@ out_kfree_skb:
 }
 
 
-static int x25_recvmsg(struct kiocb *iocb, struct socket *sock,
-                      struct msghdr *msg, size_t size,
+static int x25_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
                       int flags)
 {
        struct sock *sk = sock->sk;
index de971b6d38c58310f744988bb970ccb5438ef39b..f5e39e35d73aa96c3551b0e46f9b26ab291d23aa 100644 (file)
@@ -1043,12 +1043,12 @@ static struct xfrm_state *__find_acq_core(struct net *net,
                        break;
 
                case AF_INET6:
-                       *(struct in6_addr *)x->sel.daddr.a6 = *(struct in6_addr *)daddr;
-                       *(struct in6_addr *)x->sel.saddr.a6 = *(struct in6_addr *)saddr;
+                       x->sel.daddr.in6 = daddr->in6;
+                       x->sel.saddr.in6 = saddr->in6;
                        x->sel.prefixlen_d = 128;
                        x->sel.prefixlen_s = 128;
-                       *(struct in6_addr *)x->props.saddr.a6 = *(struct in6_addr *)saddr;
-                       *(struct in6_addr *)x->id.daddr.a6 = *(struct in6_addr *)daddr;
+                       x->props.saddr.in6 = saddr->in6;
+                       x->id.daddr.in6 = daddr->in6;
                        break;
                }
 
index b5b3600dcdf5d004e01c1a558011519adfc386ba..d24f51bca465798cfcff96307eaf4aa101ad5833 100644 (file)
@@ -17,6 +17,7 @@ sockex2-objs := bpf_load.o libbpf.o sockex2_user.o
 always := $(hostprogs-y)
 always += sockex1_kern.o
 always += sockex2_kern.o
+always += tcbpf1_kern.o
 
 HOSTCFLAGS += -I$(objtree)/usr/include
 
index ca0333146006af20352ec704d5049085db975206..72540ec1f003accf249b33e669b9c31a00c364e8 100644 (file)
@@ -37,4 +37,11 @@ struct bpf_map_def {
        unsigned int max_entries;
 };
 
+static int (*bpf_skb_store_bytes)(void *ctx, int off, void *from, int len, int flags) =
+       (void *) BPF_FUNC_skb_store_bytes;
+static int (*bpf_l3_csum_replace)(void *ctx, int off, int from, int to, int flags) =
+       (void *) BPF_FUNC_l3_csum_replace;
+static int (*bpf_l4_csum_replace)(void *ctx, int off, int from, int to, int flags) =
+       (void *) BPF_FUNC_l4_csum_replace;
+
 #endif
index 58c5fe1bdba1f08c278a750e69a7a32f706f819c..a6bb7e9c22c39b046dc97be982ab17b9d983b1b1 100644 (file)
@@ -92,7 +92,9 @@ extern char bpf_log_buf[LOG_BUF_SIZE];
                .off   = 0,                                     \
                .imm   = ((__u64) (IMM)) >> 32 })
 
-#define BPF_PSEUDO_MAP_FD      1
+#ifndef BPF_PSEUDO_MAP_FD
+# define BPF_PSEUDO_MAP_FD     1
+#endif
 
 /* pseudo BPF_LD_IMM64 insn used to refer to process-local map_fd */
 #define BPF_LD_MAP_FD(DST, MAP_FD)                             \
index 0668926629154836b09dc81b9e4cf78cfb4eb343..ed18e9a4909c77899d6561680261e97a3dbea235 100644 (file)
@@ -1,5 +1,6 @@
 #include <uapi/linux/bpf.h>
 #include <uapi/linux/if_ether.h>
+#include <uapi/linux/if_packet.h>
 #include <uapi/linux/ip.h>
 #include "bpf_helpers.h"
 
@@ -11,14 +12,17 @@ struct bpf_map_def SEC("maps") my_map = {
 };
 
 SEC("socket1")
-int bpf_prog1(struct sk_buff *skb)
+int bpf_prog1(struct __sk_buff *skb)
 {
        int index = load_byte(skb, ETH_HLEN + offsetof(struct iphdr, protocol));
        long *value;
 
+       if (skb->pkt_type != PACKET_OUTGOING)
+               return 0;
+
        value = bpf_map_lookup_elem(&my_map, &index);
        if (value)
-               __sync_fetch_and_add(value, 1);
+               __sync_fetch_and_add(value, skb->len);
 
        return 0;
 }
index 34a443ff383121719f6f919694d5023fa36e1742..678ce469355152650ee055136821f4ad8b50ed85 100644 (file)
@@ -40,7 +40,7 @@ int main(int ac, char **argv)
                key = IPPROTO_ICMP;
                assert(bpf_lookup_elem(map_fd[0], &key, &icmp_cnt) == 0);
 
-               printf("TCP %lld UDP %lld ICMP %lld packets\n",
+               printf("TCP %lld UDP %lld ICMP %lld bytes\n",
                       tcp_cnt, udp_cnt, icmp_cnt);
                sleep(1);
        }
index 6f0135f0f2176c8a1825c884c4a28a13c7c1f123..ba0e177ff56151184a0685dc475eb2476d2d4126 100644 (file)
@@ -42,13 +42,13 @@ static inline int proto_ports_offset(__u64 proto)
        }
 }
 
-static inline int ip_is_fragment(struct sk_buff *ctx, __u64 nhoff)
+static inline int ip_is_fragment(struct __sk_buff *ctx, __u64 nhoff)
 {
        return load_half(ctx, nhoff + offsetof(struct iphdr, frag_off))
                & (IP_MF | IP_OFFSET);
 }
 
-static inline __u32 ipv6_addr_hash(struct sk_buff *ctx, __u64 off)
+static inline __u32 ipv6_addr_hash(struct __sk_buff *ctx, __u64 off)
 {
        __u64 w0 = load_word(ctx, off);
        __u64 w1 = load_word(ctx, off + 4);
@@ -58,7 +58,7 @@ static inline __u32 ipv6_addr_hash(struct sk_buff *ctx, __u64 off)
        return (__u32)(w0 ^ w1 ^ w2 ^ w3);
 }
 
-static inline __u64 parse_ip(struct sk_buff *skb, __u64 nhoff, __u64 *ip_proto,
+static inline __u64 parse_ip(struct __sk_buff *skb, __u64 nhoff, __u64 *ip_proto,
                             struct flow_keys *flow)
 {
        __u64 verlen;
@@ -82,7 +82,7 @@ static inline __u64 parse_ip(struct sk_buff *skb, __u64 nhoff, __u64 *ip_proto,
        return nhoff;
 }
 
-static inline __u64 parse_ipv6(struct sk_buff *skb, __u64 nhoff, __u64 *ip_proto,
+static inline __u64 parse_ipv6(struct __sk_buff *skb, __u64 nhoff, __u64 *ip_proto,
                               struct flow_keys *flow)
 {
        *ip_proto = load_byte(skb,
@@ -96,7 +96,7 @@ static inline __u64 parse_ipv6(struct sk_buff *skb, __u64 nhoff, __u64 *ip_proto
        return nhoff;
 }
 
-static inline bool flow_dissector(struct sk_buff *skb, struct flow_keys *flow)
+static inline bool flow_dissector(struct __sk_buff *skb, struct flow_keys *flow)
 {
        __u64 nhoff = ETH_HLEN;
        __u64 ip_proto;
@@ -183,18 +183,23 @@ static inline bool flow_dissector(struct sk_buff *skb, struct flow_keys *flow)
        return true;
 }
 
+struct pair {
+       long packets;
+       long bytes;
+};
+
 struct bpf_map_def SEC("maps") hash_map = {
        .type = BPF_MAP_TYPE_HASH,
        .key_size = sizeof(__be32),
-       .value_size = sizeof(long),
+       .value_size = sizeof(struct pair),
        .max_entries = 1024,
 };
 
 SEC("socket2")
-int bpf_prog2(struct sk_buff *skb)
+int bpf_prog2(struct __sk_buff *skb)
 {
        struct flow_keys flow;
-       long *value;
+       struct pair *value;
        u32 key;
 
        if (!flow_dissector(skb, &flow))
@@ -203,9 +208,10 @@ int bpf_prog2(struct sk_buff *skb)
        key = flow.dst;
        value = bpf_map_lookup_elem(&hash_map, &key);
        if (value) {
-               __sync_fetch_and_add(value, 1);
+               __sync_fetch_and_add(&value->packets, 1);
+               __sync_fetch_and_add(&value->bytes, skb->len);
        } else {
-               long val = 1;
+               struct pair val = {1, skb->len};
 
                bpf_map_update_elem(&hash_map, &key, &val, BPF_ANY);
        }
index d2d5f5a790d3c4952ff027b326add6a29e048157..29a276d766fc91fb0c0552ed5ebf0ef51146775b 100644 (file)
@@ -6,6 +6,11 @@
 #include <unistd.h>
 #include <arpa/inet.h>
 
+struct pair {
+       __u64 packets;
+       __u64 bytes;
+};
+
 int main(int ac, char **argv)
 {
        char filename[256];
@@ -29,13 +34,13 @@ int main(int ac, char **argv)
 
        for (i = 0; i < 5; i++) {
                int key = 0, next_key;
-               long long value;
+               struct pair value;
 
                while (bpf_get_next_key(map_fd[0], &key, &next_key) == 0) {
                        bpf_lookup_elem(map_fd[0], &next_key, &value);
-                       printf("ip %s count %lld\n",
+                       printf("ip %s bytes %lld packets %lld\n",
                               inet_ntoa((struct in_addr){htonl(next_key)}),
-                              value);
+                              value.bytes, value.packets);
                        key = next_key;
                }
                sleep(1);
diff --git a/samples/bpf/tcbpf1_kern.c b/samples/bpf/tcbpf1_kern.c
new file mode 100644 (file)
index 0000000..7cf3f42
--- /dev/null
@@ -0,0 +1,71 @@
+#include <uapi/linux/bpf.h>
+#include <uapi/linux/if_ether.h>
+#include <uapi/linux/if_packet.h>
+#include <uapi/linux/ip.h>
+#include <uapi/linux/in.h>
+#include <uapi/linux/tcp.h>
+#include "bpf_helpers.h"
+
+/* compiler workaround */
+#define _htonl __builtin_bswap32
+
+static inline void set_dst_mac(struct __sk_buff *skb, char *mac)
+{
+       bpf_skb_store_bytes(skb, 0, mac, ETH_ALEN, 1);
+}
+
+/* use 1 below for ingress qdisc and 0 for egress */
+#if 0
+#undef ETH_HLEN
+#define ETH_HLEN 0
+#endif
+
+#define IP_CSUM_OFF (ETH_HLEN + offsetof(struct iphdr, check))
+#define TOS_OFF (ETH_HLEN + offsetof(struct iphdr, tos))
+
+static inline void set_ip_tos(struct __sk_buff *skb, __u8 new_tos)
+{
+       __u8 old_tos = load_byte(skb, TOS_OFF);
+
+       bpf_l3_csum_replace(skb, IP_CSUM_OFF, htons(old_tos), htons(new_tos), 2);
+       bpf_skb_store_bytes(skb, TOS_OFF, &new_tos, sizeof(new_tos), 0);
+}
+
+#define TCP_CSUM_OFF (ETH_HLEN + sizeof(struct iphdr) + offsetof(struct tcphdr, check))
+#define IP_SRC_OFF (ETH_HLEN + offsetof(struct iphdr, saddr))
+
+#define IS_PSEUDO 0x10
+
+static inline void set_tcp_ip_src(struct __sk_buff *skb, __u32 new_ip)
+{
+       __u32 old_ip = _htonl(load_word(skb, IP_SRC_OFF));
+
+       bpf_l4_csum_replace(skb, TCP_CSUM_OFF, old_ip, new_ip, IS_PSEUDO | sizeof(new_ip));
+       bpf_l3_csum_replace(skb, IP_CSUM_OFF, old_ip, new_ip, sizeof(new_ip));
+       bpf_skb_store_bytes(skb, IP_SRC_OFF, &new_ip, sizeof(new_ip), 0);
+}
+
+#define TCP_DPORT_OFF (ETH_HLEN + sizeof(struct iphdr) + offsetof(struct tcphdr, dest))
+static inline void set_tcp_dest_port(struct __sk_buff *skb, __u16 new_port)
+{
+       __u16 old_port = htons(load_half(skb, TCP_DPORT_OFF));
+
+       bpf_l4_csum_replace(skb, TCP_CSUM_OFF, old_port, new_port, sizeof(new_port));
+       bpf_skb_store_bytes(skb, TCP_DPORT_OFF, &new_port, sizeof(new_port), 0);
+}
+
+SEC("classifier")
+int bpf_prog1(struct __sk_buff *skb)
+{
+       __u8 proto = load_byte(skb, ETH_HLEN + offsetof(struct iphdr, protocol));
+       long *value;
+
+       if (proto == IPPROTO_TCP) {
+               set_ip_tos(skb, 8);
+               set_tcp_ip_src(skb, 0xA010101);
+               set_tcp_dest_port(skb, 5001);
+       }
+
+       return 0;
+}
+char _license[] SEC("license") = "GPL";
index b96175e903639a2fe2c9b1d4777df0aaa63d6e6e..75d561f9fd6ae430b49248214608c00e588dd594 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/unistd.h>
 #include <string.h>
 #include <linux/filter.h>
+#include <stddef.h>
 #include "libbpf.h"
 
 #define MAX_INSNS 512
@@ -288,7 +289,8 @@ static struct bpf_test tests[] = {
                        BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -8),
 
                        /* should be able to access R0 = *(R2 + 8) */
-                       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 8),
+                       /* BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 8), */
+                       BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
                        BPF_EXIT_INSN(),
                },
                .result = ACCEPT,
@@ -641,6 +643,84 @@ static struct bpf_test tests[] = {
                },
                .result = ACCEPT,
        },
+       {
+               "access skb fields ok",
+               .insns = {
+                       BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+                                   offsetof(struct __sk_buff, len)),
+                       BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+                                   offsetof(struct __sk_buff, mark)),
+                       BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+                                   offsetof(struct __sk_buff, pkt_type)),
+                       BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+                                   offsetof(struct __sk_buff, queue_mapping)),
+                       BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+                                   offsetof(struct __sk_buff, protocol)),
+                       BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+                                   offsetof(struct __sk_buff, vlan_present)),
+                       BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+                                   offsetof(struct __sk_buff, vlan_tci)),
+                       BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .result = ACCEPT,
+       },
+       {
+               "access skb fields bad1",
+               .insns = {
+                       BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -4),
+                       BPF_EXIT_INSN(),
+               },
+               .errstr = "invalid bpf_context access",
+               .result = REJECT,
+       },
+       {
+               "access skb fields bad2",
+               .insns = {
+                       BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 9),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+                                   offsetof(struct __sk_buff, pkt_type)),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup = {4},
+               .errstr = "different pointers",
+               .result = REJECT,
+       },
+       {
+               "access skb fields bad3",
+               .insns = {
+                       BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+                                   offsetof(struct __sk_buff, pkt_type)),
+                       BPF_EXIT_INSN(),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+                       BPF_JMP_IMM(BPF_JA, 0, 0, -12),
+               },
+               .fixup = {6},
+               .errstr = "different pointers",
+               .result = REJECT,
+       },
 };
 
 static int probe_filter_length(struct bpf_insn *fp)
@@ -687,7 +767,7 @@ static int test(void)
                }
                printf("#%d %s ", i, tests[i].descr);
 
-               prog_fd = bpf_prog_load(BPF_PROG_TYPE_UNSPEC, prog,
+               prog_fd = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, prog,
                                        prog_len * sizeof(struct bpf_insn),
                                        "GPL");
 
diff --git a/samples/pktgen/pktgen.conf-1-1 b/samples/pktgen/pktgen.conf-1-1
new file mode 100755 (executable)
index 0000000..f91daad
--- /dev/null
@@ -0,0 +1,59 @@
+#!/bin/bash
+
+#modprobe pktgen
+
+
+function pgset() {
+    local result
+
+    echo $1 > $PGDEV
+
+    result=`cat $PGDEV | fgrep "Result: OK:"`
+    if [ "$result" = "" ]; then
+         cat $PGDEV | fgrep Result:
+    fi
+}
+
+# Config Start Here -----------------------------------------------------------
+
+
+# thread config
+# Each CPU has its own thread. One CPU example. We add eth1.
+
+PGDEV=/proc/net/pktgen/kpktgend_0
+  echo "Removing all devices"
+ pgset "rem_device_all"
+  echo "Adding eth1"
+ pgset "add_device eth1"
+
+
+# device config
+# delay 0 means maximum speed.
+
+CLONE_SKB="clone_skb 1000000"
+# NIC adds 4 bytes CRC
+PKT_SIZE="pkt_size 60"
+
+# COUNT 0 means forever
+#COUNT="count 0"
+COUNT="count 10000000"
+DELAY="delay 0"
+
+PGDEV=/proc/net/pktgen/eth1
+  echo "Configuring $PGDEV"
+ pgset "$COUNT"
+ pgset "$CLONE_SKB"
+ pgset "$PKT_SIZE"
+ pgset "$DELAY"
+ pgset "dst 10.10.11.2"
+ pgset "dst_mac  00:04:23:08:91:dc"
+
+
+# Time to run
+PGDEV=/proc/net/pktgen/pgctrl
+
+ echo "Running... ctrl^C to stop"
+ trap true INT
+ pgset "start"
+ echo "Done"
+ cat /proc/net/pktgen/eth1
diff --git a/samples/pktgen/pktgen.conf-1-1-flows b/samples/pktgen/pktgen.conf-1-1-flows
new file mode 100755 (executable)
index 0000000..081749c
--- /dev/null
@@ -0,0 +1,67 @@
+#!/bin/bash
+
+#modprobe pktgen
+
+
+function pgset() {
+    local result
+
+    echo $1 > $PGDEV
+
+    result=`cat $PGDEV | fgrep "Result: OK:"`
+    if [ "$result" = "" ]; then
+         cat $PGDEV | fgrep Result:
+    fi
+}
+
+# Config Start Here -----------------------------------------------------------
+
+
+# thread config
+# Each CPU has its own thread. One CPU example. We add eth1.
+
+PGDEV=/proc/net/pktgen/kpktgend_0
+  echo "Removing all devices"
+ pgset "rem_device_all"
+  echo "Adding eth1"
+ pgset "add_device eth1"
+
+
+# device config
+# delay 0
+# We need to do alloc for every skb since we cannot clone here.
+
+CLONE_SKB="clone_skb 0"
+# NIC adds 4 bytes CRC
+PKT_SIZE="pkt_size 60"
+
+# COUNT 0 means forever
+#COUNT="count 0"
+COUNT="count 10000000"
+DELAY="delay 0"
+
+PGDEV=/proc/net/pktgen/eth1
+  echo "Configuring $PGDEV"
+ pgset "$COUNT"
+ pgset "$CLONE_SKB"
+ pgset "$PKT_SIZE"
+ pgset "$DELAY"
+ # Random address with in the min-max range
+ pgset "flag IPDST_RND"
+ pgset "dst_min 10.0.0.0"
+ pgset "dst_max 10.255.255.255"
+
+ # 8k Concurrent flows at 4 pkts
+ pgset "flows 8192"
+ pgset "flowlen 4"
+
+ pgset "dst_mac  00:04:23:08:91:dc"
+
+# Time to run
+PGDEV=/proc/net/pktgen/pgctrl
+
+ echo "Running... ctrl^C to stop"
+ trap true INT
+ pgset "start"
+ echo "Done"
+ cat /proc/net/pktgen/eth1
diff --git a/samples/pktgen/pktgen.conf-1-1-ip6 b/samples/pktgen/pktgen.conf-1-1-ip6
new file mode 100755 (executable)
index 0000000..0b9ffd4
--- /dev/null
@@ -0,0 +1,60 @@
+#!/bin/bash
+
+#modprobe pktgen
+
+
+function pgset() {
+    local result
+
+    echo $1 > $PGDEV
+
+    result=`cat $PGDEV | fgrep "Result: OK:"`
+    if [ "$result" = "" ]; then
+         cat $PGDEV | fgrep Result:
+    fi
+}
+
+# Config Start Here -----------------------------------------------------------
+
+
+# thread config
+# Each CPU has its own thread. One CPU example. We add eth1.
+# IPv6. Note increase in minimal packet length
+
+PGDEV=/proc/net/pktgen/kpktgend_0
+  echo "Removing all devices"
+ pgset "rem_device_all"
+  echo "Adding eth1"
+ pgset "add_device eth1"
+
+
+# device config
+# delay 0
+
+CLONE_SKB="clone_skb 1000000"
+# NIC adds 4 bytes CRC
+PKT_SIZE="pkt_size 66"
+
+# COUNT 0 means forever
+#COUNT="count 0"
+COUNT="count 10000000"
+DELAY="delay 0"
+
+PGDEV=/proc/net/pktgen/eth1
+  echo "Configuring $PGDEV"
+ pgset "$COUNT"
+ pgset "$CLONE_SKB"
+ pgset "$PKT_SIZE"
+ pgset "$DELAY"
+ pgset "dst6 fec0::1"
+ pgset "src6 fec0::2"
+ pgset "dst_mac  00:04:23:08:91:dc"
+
+# Time to run
+PGDEV=/proc/net/pktgen/pgctrl
+
+ echo "Running... ctrl^C to stop"
+ trap true INT
+ pgset "start"
+ echo "Done"
+ cat /proc/net/pktgen/eth1
diff --git a/samples/pktgen/pktgen.conf-1-1-ip6-rdos b/samples/pktgen/pktgen.conf-1-1-ip6-rdos
new file mode 100755 (executable)
index 0000000..ad98e5f
--- /dev/null
@@ -0,0 +1,63 @@
+#!/bin/bash
+
+#modprobe pktgen
+
+
+function pgset() {
+    local result
+
+    echo $1 > $PGDEV
+
+    result=`cat $PGDEV | fgrep "Result: OK:"`
+    if [ "$result" = "" ]; then
+         cat $PGDEV | fgrep Result:
+    fi
+}
+
+# Config Start Here -----------------------------------------------------------
+
+
+# thread config
+# Each CPU has its own thread. One CPU example. We add eth1.
+# IPv6. Note increase in minimal packet length
+
+PGDEV=/proc/net/pktgen/kpktgend_0
+  echo "Removing all devices"
+ pgset "rem_device_all"
+  echo "Adding eth1"
+ pgset "add_device eth1"
+
+
+# device config
+# delay 0 means maximum speed.
+
+# We need to do alloc for every skb since we cannot clone here.
+CLONE_SKB="clone_skb 0"
+
+# NIC adds 4 bytes CRC
+PKT_SIZE="pkt_size 66"
+
+# COUNT 0 means forever
+#COUNT="count 0"
+COUNT="count 10000000"
+DELAY="delay 0"
+
+PGDEV=/proc/net/pktgen/eth1
+  echo "Configuring $PGDEV"
+ pgset "$COUNT"
+ pgset "$CLONE_SKB"
+ pgset "$PKT_SIZE"
+ pgset "$DELAY"
+ pgset "dst6_min fec0::1"
+ pgset "dst6_max fec0::FFFF:FFFF"
+
+ pgset "dst_mac  00:04:23:08:91:dc"
+
+# Time to run
+PGDEV=/proc/net/pktgen/pgctrl
+
+ echo "Running... ctrl^C to stop"
+ trap true INT
+ pgset "start"
+ echo "Done"
+ cat /proc/net/pktgen/eth1
diff --git a/samples/pktgen/pktgen.conf-1-1-rdos b/samples/pktgen/pktgen.conf-1-1-rdos
new file mode 100755 (executable)
index 0000000..c7553be
--- /dev/null
@@ -0,0 +1,64 @@
+#!/bin/bash
+
+#modprobe pktgen
+
+
+function pgset() {
+    local result
+
+    echo $1 > $PGDEV
+
+    result=`cat $PGDEV | fgrep "Result: OK:"`
+    if [ "$result" = "" ]; then
+         cat $PGDEV | fgrep Result:
+    fi
+}
+
+# Config Start Here -----------------------------------------------------------
+
+
+# thread config
+# Each CPU has its own thread. One CPU example. We add eth1.
+
+PGDEV=/proc/net/pktgen/kpktgend_0
+  echo "Removing all devices"
+ pgset "rem_device_all"
+  echo "Adding eth1"
+ pgset "add_device eth1"
+
+
+# device config
+# delay 0
+
+# We need to do alloc for every skb since we cannot clone here.
+
+CLONE_SKB="clone_skb 0"
+# NIC adds 4 bytes CRC
+PKT_SIZE="pkt_size 60"
+
+# COUNT 0 means forever
+#COUNT="count 0"
+COUNT="count 10000000"
+DELAY="delay 0"
+
+PGDEV=/proc/net/pktgen/eth1
+  echo "Configuring $PGDEV"
+ pgset "$COUNT"
+ pgset "$CLONE_SKB"
+ pgset "$PKT_SIZE"
+ pgset "$DELAY"
+ # Random address with in the min-max range
+ pgset "flag IPDST_RND"
+ pgset "dst_min 10.0.0.0"
+ pgset "dst_max 10.255.255.255"
+
+ pgset "dst_mac  00:04:23:08:91:dc"
+
+# Time to run
+PGDEV=/proc/net/pktgen/pgctrl
+
+ echo "Running... ctrl^C to stop"
+ trap true INT
+ pgset "start"
+ echo "Done"
+ cat /proc/net/pktgen/eth1
diff --git a/samples/pktgen/pktgen.conf-1-2 b/samples/pktgen/pktgen.conf-1-2
new file mode 100755 (executable)
index 0000000..ba4eb26
--- /dev/null
@@ -0,0 +1,69 @@
+#!/bin/bash
+
+#modprobe pktgen
+
+
+function pgset() {
+    local result
+
+    echo $1 > $PGDEV
+
+    result=`cat $PGDEV | fgrep "Result: OK:"`
+    if [ "$result" = "" ]; then
+         cat $PGDEV | fgrep Result:
+    fi
+}
+
+# Config Start Here -----------------------------------------------------------
+
+
+# thread config
+# One CPU means one thread. One CPU example. We add eth1, eth2 respectivly.
+
+PGDEV=/proc/net/pktgen/kpktgend_0
+  echo "Removing all devices"
+ pgset "rem_device_all"
+  echo "Adding eth1"
+ pgset "add_device eth1"
+  echo "Adding eth2"
+ pgset "add_device eth2"
+
+
+# device config
+# delay 0 means maximum speed.
+
+CLONE_SKB="clone_skb 1000000"
+# NIC adds 4 bytes CRC
+PKT_SIZE="pkt_size 60"
+
+# COUNT 0 means forever
+#COUNT="count 0"
+COUNT="count 10000000"
+DELAY="delay 0"
+
+PGDEV=/proc/net/pktgen/eth1
+  echo "Configuring $PGDEV"
+ pgset "$COUNT"
+ pgset "$CLONE_SKB"
+ pgset "$PKT_SIZE"
+ pgset "$DELAY"
+ pgset "dst 10.10.11.2"
+ pgset "dst_mac  00:04:23:08:91:dc"
+
+PGDEV=/proc/net/pktgen/eth2
+  echo "Configuring $PGDEV"
+ pgset "$COUNT"
+ pgset "$CLONE_SKB"
+ pgset "$PKT_SIZE"
+ pgset "$DELAY"
+ pgset "dst 192.168.2.2"
+ pgset "dst_mac  00:04:23:08:91:de"
+
+# Time to run
+PGDEV=/proc/net/pktgen/pgctrl
+
+ echo "Running... ctrl^C to stop"
+ trap true INT
+ pgset "start"
+ echo "Done"
+ cat /proc/net/pktgen/eth1 /proc/net/pktgen/eth2
diff --git a/samples/pktgen/pktgen.conf-2-1 b/samples/pktgen/pktgen.conf-2-1
new file mode 100755 (executable)
index 0000000..e108e97
--- /dev/null
@@ -0,0 +1,66 @@
+#!/bin/bash
+
+#modprobe pktgen
+
+
+function pgset() {
+    local result
+
+    echo $1 > $PGDEV
+
+    result=`cat $PGDEV | fgrep "Result: OK:"`
+    if [ "$result" = "" ]; then
+         cat $PGDEV | fgrep Result:
+    fi
+}
+
+# Config Start Here -----------------------------------------------------------
+
+
+# thread config
+# Each CPU has its own thread. Two CPU example. We add eth1 to the first
+# and leave the second idle.
+
+PGDEV=/proc/net/pktgen/kpktgend_0
+  echo "Removing all devices"
+ pgset "rem_device_all"
+  echo "Adding eth1"
+ pgset "add_device eth1"
+
+# We need to remove old config since we dont use this thread. We can only
+# one NIC on one CPU due to affinity reasons.
+
+PGDEV=/proc/net/pktgen/kpktgend_1
+  echo "Removing all devices"
+ pgset "rem_device_all"
+
+# device config
+# delay 0 means maximum speed.
+
+CLONE_SKB="clone_skb 1000000"
+# NIC adds 4 bytes CRC
+PKT_SIZE="pkt_size 60"
+
+# COUNT 0 means forever
+#COUNT="count 0"
+COUNT="count 10000000"
+DELAY="delay 0"
+
+PGDEV=/proc/net/pktgen/eth1
+  echo "Configuring $PGDEV"
+ pgset "$COUNT"
+ pgset "$CLONE_SKB"
+ pgset "$PKT_SIZE"
+ pgset "$DELAY"
+ pgset "dst 10.10.11.2"
+ pgset "dst_mac  00:04:23:08:91:dc"
+
+
+# Time to run
+PGDEV=/proc/net/pktgen/pgctrl
+
+ echo "Running... ctrl^C to stop"
+ trap true INT
+ pgset "start"
+ echo "Done"
+ cat /proc/net/pktgen/eth1
diff --git a/samples/pktgen/pktgen.conf-2-2 b/samples/pktgen/pktgen.conf-2-2
new file mode 100755 (executable)
index 0000000..acea155
--- /dev/null
@@ -0,0 +1,73 @@
+#!/bin/bash
+
+#modprobe pktgen
+
+
+function pgset() {
+    local result
+
+    echo $1 > $PGDEV
+
+    result=`cat $PGDEV | fgrep "Result: OK:"`
+    if [ "$result" = "" ]; then
+         cat $PGDEV | fgrep Result:
+    fi
+}
+
+# Config Start Here -----------------------------------------------------------
+
+
+# thread config
+# Each CPU has its own thread. Two CPU example. We add eth1, eth2 respectively.
+
+PGDEV=/proc/net/pktgen/kpktgend_0
+  echo "Removing all devices"
+ pgset "rem_device_all"
+  echo "Adding eth1"
+ pgset "add_device eth1"
+
+PGDEV=/proc/net/pktgen/kpktgend_1
+  echo "Removing all devices"
+ pgset "rem_device_all"
+  echo "Adding eth2"
+ pgset "add_device eth2"
+
+
+# device config
+# delay 0 means maximum speed.
+
+CLONE_SKB="clone_skb 1000000"
+# NIC adds 4 bytes CRC
+PKT_SIZE="pkt_size 60"
+
+# COUNT 0 means forever
+#COUNT="count 0"
+COUNT="count 10000000"
+DELAY="delay 0"
+
+PGDEV=/proc/net/pktgen/eth1
+  echo "Configuring $PGDEV"
+ pgset "$COUNT"
+ pgset "$CLONE_SKB"
+ pgset "$PKT_SIZE"
+ pgset "$DELAY"
+ pgset "dst 10.10.11.2"
+ pgset "dst_mac  00:04:23:08:91:dc"
+
+PGDEV=/proc/net/pktgen/eth2
+  echo "Configuring $PGDEV"
+ pgset "$COUNT"
+ pgset "$CLONE_SKB"
+ pgset "$PKT_SIZE"
+ pgset "$DELAY"
+ pgset "dst 192.168.2.2"
+ pgset "dst_mac  00:04:23:08:91:de"
+
+# Time to run
+PGDEV=/proc/net/pktgen/pgctrl
+
+ echo "Running... ctrl^C to stop"
+ trap true INT
+ pgset "start"
+ echo "Done"
+ cat /proc/net/pktgen/eth1 /proc/net/pktgen/eth2
index 107db88b1d5f9d1d5dda20c0636f229738fec8bd..dd56bffd6500e078b4aa7d4b64c8e04c91802541 100644 (file)
@@ -364,12 +364,12 @@ static int apparmor_path_chown(struct path *path, kuid_t uid, kgid_t gid)
        return common_perm(OP_CHOWN, path, AA_MAY_CHOWN, &cond);
 }
 
-static int apparmor_inode_getattr(struct vfsmount *mnt, struct dentry *dentry)
+static int apparmor_inode_getattr(const struct path *path)
 {
-       if (!mediated_filesystem(dentry))
+       if (!mediated_filesystem(path->dentry))
                return 0;
 
-       return common_perm_mnt_dentry(OP_GETATTR, mnt, dentry,
+       return common_perm_mnt_dentry(OP_GETATTR, path->mnt, path->dentry,
                                      AA_MAY_META_READ);
 }
 
index 070dd46f62f4f57c7262211352775e121439e8a2..0d03fcc489a49ee3221b1369ca2c1ff931c691cd 100644 (file)
@@ -225,7 +225,7 @@ static int cap_inode_setattr(struct dentry *dentry, struct iattr *iattr)
        return 0;
 }
 
-static int cap_inode_getattr(struct vfsmount *mnt, struct dentry *dentry)
+static int cap_inode_getattr(const struct path *path)
 {
        return 0;
 }
@@ -776,11 +776,6 @@ static int cap_tun_dev_open(void *security)
 {
        return 0;
 }
-
-static void cap_skb_owned_by(struct sk_buff *skb, struct sock *sk)
-{
-}
-
 #endif /* CONFIG_SECURITY_NETWORK */
 
 #ifdef CONFIG_SECURITY_NETWORK_XFRM
@@ -1134,7 +1129,6 @@ void __init security_fixup_ops(struct security_operations *ops)
        set_to_cap_if_null(ops, tun_dev_open);
        set_to_cap_if_null(ops, tun_dev_attach_queue);
        set_to_cap_if_null(ops, tun_dev_attach);
-       set_to_cap_if_null(ops, skb_owned_by);
 #endif /* CONFIG_SECURITY_NETWORK */
 #ifdef CONFIG_SECURITY_NETWORK_XFRM
        set_to_cap_if_null(ops, xfrm_policy_alloc_security);
index 347896548ad3159a152186a4c1a27cdf92f1f4ad..25430a3aa7f7b9d6e6b4d10ae9bc72c8669c00fe 100644 (file)
@@ -31,30 +31,21 @@ static long compat_keyctl_instantiate_key_iov(
        key_serial_t ringid)
 {
        struct iovec iovstack[UIO_FASTIOV], *iov = iovstack;
+       struct iov_iter from;
        long ret;
 
-       if (!_payload_iov || !ioc)
-               goto no_payload;
+       if (!_payload_iov)
+               ioc = 0;
 
-       ret = compat_rw_copy_check_uvector(WRITE, _payload_iov, ioc,
-                                          ARRAY_SIZE(iovstack),
-                                          iovstack, &iov);
+       ret = compat_import_iovec(WRITE, _payload_iov, ioc,
+                                 ARRAY_SIZE(iovstack), &iov,
+                                 &from);
        if (ret < 0)
-               goto err;
-       if (ret == 0)
-               goto no_payload_free;
-
-       ret = keyctl_instantiate_key_common(id, iov, ioc, ret, ringid);
-err:
-       if (iov != iovstack)
-               kfree(iov);
-       return ret;
+               return ret;
 
-no_payload_free:
-       if (iov != iovstack)
-               kfree(iov);
-no_payload:
-       return keyctl_instantiate_key_common(id, NULL, 0, 0, ringid);
+       ret = keyctl_instantiate_key_common(id, &from, ringid);
+       kfree(iov);
+       return ret;
 }
 
 /*
index 200e37867336a3c2903437e97f591fbc302e15b7..5105c2c2da75b0e13dec1196be67c88f4d789e72 100644 (file)
@@ -243,9 +243,10 @@ extern long keyctl_instantiate_key_iov(key_serial_t,
                                       unsigned, key_serial_t);
 extern long keyctl_invalidate_key(key_serial_t);
 
+struct iov_iter;
 extern long keyctl_instantiate_key_common(key_serial_t,
-                                         const struct iovec *,
-                                         unsigned, size_t, key_serial_t);
+                                         struct iov_iter *,
+                                         key_serial_t);
 #ifdef CONFIG_PERSISTENT_KEYRINGS
 extern long keyctl_get_persistent(uid_t, key_serial_t);
 extern unsigned persistent_keyring_expiry;
index 4743d71e4aa6dd12f2456a5f00496c1222775c6a..0b9ec78a7a7ad2b14af1ef0407e051e6dcef29ff 100644 (file)
@@ -997,21 +997,6 @@ static int keyctl_change_reqkey_auth(struct key *key)
        return commit_creds(new);
 }
 
-/*
- * Copy the iovec data from userspace
- */
-static long copy_from_user_iovec(void *buffer, const struct iovec *iov,
-                                unsigned ioc)
-{
-       for (; ioc > 0; ioc--) {
-               if (copy_from_user(buffer, iov->iov_base, iov->iov_len) != 0)
-                       return -EFAULT;
-               buffer += iov->iov_len;
-               iov++;
-       }
-       return 0;
-}
-
 /*
  * Instantiate a key with the specified payload and link the key into the
  * destination keyring if one is given.
@@ -1022,20 +1007,21 @@ static long copy_from_user_iovec(void *buffer, const struct iovec *iov,
  * If successful, 0 will be returned.
  */
 long keyctl_instantiate_key_common(key_serial_t id,
-                                  const struct iovec *payload_iov,
-                                  unsigned ioc,
-                                  size_t plen,
+                                  struct iov_iter *from,
                                   key_serial_t ringid)
 {
        const struct cred *cred = current_cred();
        struct request_key_auth *rka;
        struct key *instkey, *dest_keyring;
+       size_t plen = from ? iov_iter_count(from) : 0;
        void *payload;
        long ret;
-       bool vm = false;
 
        kenter("%d,,%zu,%d", id, plen, ringid);
 
+       if (!plen)
+               from = NULL;
+
        ret = -EINVAL;
        if (plen > 1024 * 1024 - 1)
                goto error;
@@ -1054,20 +1040,19 @@ long keyctl_instantiate_key_common(key_serial_t id,
        /* pull the payload in if one was supplied */
        payload = NULL;
 
-       if (payload_iov) {
+       if (from) {
                ret = -ENOMEM;
                payload = kmalloc(plen, GFP_KERNEL);
                if (!payload) {
                        if (plen <= PAGE_SIZE)
                                goto error;
-                       vm = true;
                        payload = vmalloc(plen);
                        if (!payload)
                                goto error;
                }
 
-               ret = copy_from_user_iovec(payload, payload_iov, ioc);
-               if (ret < 0)
+               ret = -EFAULT;
+               if (copy_from_iter(payload, plen, from) != plen)
                        goto error2;
        }
 
@@ -1089,10 +1074,7 @@ long keyctl_instantiate_key_common(key_serial_t id,
                keyctl_change_reqkey_auth(NULL);
 
 error2:
-       if (!vm)
-               kfree(payload);
-       else
-               vfree(payload);
+       kvfree(payload);
 error:
        return ret;
 }
@@ -1112,15 +1094,19 @@ long keyctl_instantiate_key(key_serial_t id,
                            key_serial_t ringid)
 {
        if (_payload && plen) {
-               struct iovec iov[1] = {
-                       [0].iov_base = (void __user *)_payload,
-                       [0].iov_len  = plen
-               };
+               struct iovec iov;
+               struct iov_iter from;
+               int ret;
 
-               return keyctl_instantiate_key_common(id, iov, 1, plen, ringid);
+               ret = import_single_range(WRITE, (void __user *)_payload, plen,
+                                         &iov, &from);
+               if (unlikely(ret))
+                       return ret;
+
+               return keyctl_instantiate_key_common(id, &from, ringid);
        }
 
-       return keyctl_instantiate_key_common(id, NULL, 0, 0, ringid);
+       return keyctl_instantiate_key_common(id, NULL, ringid);
 }
 
 /*
@@ -1138,29 +1124,19 @@ long keyctl_instantiate_key_iov(key_serial_t id,
                                key_serial_t ringid)
 {
        struct iovec iovstack[UIO_FASTIOV], *iov = iovstack;
+       struct iov_iter from;
        long ret;
 
-       if (!_payload_iov || !ioc)
-               goto no_payload;
+       if (!_payload_iov)
+               ioc = 0;
 
-       ret = rw_copy_check_uvector(WRITE, _payload_iov, ioc,
-                                   ARRAY_SIZE(iovstack), iovstack, &iov);
+       ret = import_iovec(WRITE, _payload_iov, ioc,
+                                   ARRAY_SIZE(iovstack), &iov, &from);
        if (ret < 0)
-               goto err;
-       if (ret == 0)
-               goto no_payload_free;
-
-       ret = keyctl_instantiate_key_common(id, iov, ioc, ret, ringid);
-err:
-       if (iov != iovstack)
-               kfree(iov);
+               return ret;
+       ret = keyctl_instantiate_key_common(id, &from, ringid);
+       kfree(iov);
        return ret;
-
-no_payload_free:
-       if (iov != iovstack)
-               kfree(iov);
-no_payload:
-       return keyctl_instantiate_key_common(id, NULL, 0, 0, ringid);
 }
 
 /*
index e81d5bbe7363fc689199ea8db3a1a5fdc3e720e4..730ac65a573722238142ad0d978abc26f87287cd 100644 (file)
@@ -608,11 +608,11 @@ int security_inode_setattr(struct dentry *dentry, struct iattr *attr)
 }
 EXPORT_SYMBOL_GPL(security_inode_setattr);
 
-int security_inode_getattr(struct vfsmount *mnt, struct dentry *dentry)
+int security_inode_getattr(const struct path *path)
 {
-       if (unlikely(IS_PRIVATE(dentry->d_inode)))
+       if (unlikely(IS_PRIVATE(path->dentry->d_inode)))
                return 0;
-       return security_ops->inode_getattr(mnt, dentry);
+       return security_ops->inode_getattr(path);
 }
 
 int security_inode_setxattr(struct dentry *dentry, const char *name,
@@ -1359,11 +1359,6 @@ int security_tun_dev_open(void *security)
 }
 EXPORT_SYMBOL(security_tun_dev_open);
 
-void security_skb_owned_by(struct sk_buff *skb, struct sock *sk)
-{
-       security_ops->skb_owned_by(skb, sk);
-}
-
 #endif /* CONFIG_SECURITY_NETWORK */
 
 #ifdef CONFIG_SECURITY_NETWORK_XFRM
index 4d1a54190388df96dddb7ff951c681dc28bab866..c318b304ee2f5be7001d6e26e30811bbbbd83859 100644 (file)
@@ -51,7 +51,6 @@
 #include <linux/tty.h>
 #include <net/icmp.h>
 #include <net/ip.h>            /* for local_port_range[] */
-#include <net/sock.h>
 #include <net/tcp.h>           /* struct or_callable used in sock_rcv_skb */
 #include <net/inet_connection_sock.h>
 #include <net/net_namespace.h>
@@ -1623,7 +1622,7 @@ static inline int dentry_has_perm(const struct cred *cred,
    the path to help the auditing code to more easily generate the
    pathname if needed. */
 static inline int path_has_perm(const struct cred *cred,
-                               struct path *path,
+                               const struct path *path,
                                u32 av)
 {
        struct inode *inode = path->dentry->d_inode;
@@ -2954,15 +2953,9 @@ static int selinux_inode_setattr(struct dentry *dentry, struct iattr *iattr)
        return dentry_has_perm(cred, dentry, av);
 }
 
-static int selinux_inode_getattr(struct vfsmount *mnt, struct dentry *dentry)
+static int selinux_inode_getattr(const struct path *path)
 {
-       const struct cred *cred = current_cred();
-       struct path path;
-
-       path.dentry = dentry;
-       path.mnt = mnt;
-
-       return path_has_perm(cred, &path, FILE__GETATTR);
+       return path_has_perm(current_cred(), path, FILE__GETATTR);
 }
 
 static int selinux_inode_setotherxattr(struct dentry *dentry, const char *name)
@@ -4652,11 +4645,6 @@ static void selinux_inet_conn_established(struct sock *sk, struct sk_buff *skb)
        selinux_skb_peerlbl_sid(skb, family, &sksec->peer_sid);
 }
 
-static void selinux_skb_owned_by(struct sk_buff *skb, struct sock *sk)
-{
-       skb_set_owner_w(skb, sk);
-}
-
 static int selinux_secmark_relabel_packet(u32 sid)
 {
        const struct task_security_struct *__tsec;
@@ -4858,21 +4846,17 @@ static unsigned int selinux_ip_forward(struct sk_buff *skb,
 
 static unsigned int selinux_ipv4_forward(const struct nf_hook_ops *ops,
                                         struct sk_buff *skb,
-                                        const struct net_device *in,
-                                        const struct net_device *out,
-                                        int (*okfn)(struct sk_buff *))
+                                        const struct nf_hook_state *state)
 {
-       return selinux_ip_forward(skb, in, PF_INET);
+       return selinux_ip_forward(skb, state->in, PF_INET);
 }
 
 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
 static unsigned int selinux_ipv6_forward(const struct nf_hook_ops *ops,
                                         struct sk_buff *skb,
-                                        const struct net_device *in,
-                                        const struct net_device *out,
-                                        int (*okfn)(struct sk_buff *))
+                                        const struct nf_hook_state *state)
 {
-       return selinux_ip_forward(skb, in, PF_INET6);
+       return selinux_ip_forward(skb, state->in, PF_INET6);
 }
 #endif /* IPV6 */
 
@@ -4920,9 +4904,7 @@ static unsigned int selinux_ip_output(struct sk_buff *skb,
 
 static unsigned int selinux_ipv4_output(const struct nf_hook_ops *ops,
                                        struct sk_buff *skb,
-                                       const struct net_device *in,
-                                       const struct net_device *out,
-                                       int (*okfn)(struct sk_buff *))
+                                       const struct nf_hook_state *state)
 {
        return selinux_ip_output(skb, PF_INET);
 }
@@ -5097,21 +5079,17 @@ static unsigned int selinux_ip_postroute(struct sk_buff *skb,
 
 static unsigned int selinux_ipv4_postroute(const struct nf_hook_ops *ops,
                                           struct sk_buff *skb,
-                                          const struct net_device *in,
-                                          const struct net_device *out,
-                                          int (*okfn)(struct sk_buff *))
+                                          const struct nf_hook_state *state)
 {
-       return selinux_ip_postroute(skb, out, PF_INET);
+       return selinux_ip_postroute(skb, state->out, PF_INET);
 }
 
 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
 static unsigned int selinux_ipv6_postroute(const struct nf_hook_ops *ops,
                                           struct sk_buff *skb,
-                                          const struct net_device *in,
-                                          const struct net_device *out,
-                                          int (*okfn)(struct sk_buff *))
+                                          const struct nf_hook_state *state)
 {
-       return selinux_ip_postroute(skb, out, PF_INET6);
+       return selinux_ip_postroute(skb, state->out, PF_INET6);
 }
 #endif /* IPV6 */
 
@@ -6041,7 +6019,6 @@ static struct security_operations selinux_ops = {
        .tun_dev_attach_queue =         selinux_tun_dev_attach_queue,
        .tun_dev_attach =               selinux_tun_dev_attach,
        .tun_dev_open =                 selinux_tun_dev_open,
-       .skb_owned_by =                 selinux_skb_owned_by,
 
 #ifdef CONFIG_SECURITY_NETWORK_XFRM
        .xfrm_policy_alloc_security =   selinux_xfrm_policy_alloc,
index c934311812f1a777093c44a89543dcae924b8568..1511965549b8232fdd4d3469166023c2d140f908 100644 (file)
@@ -1034,19 +1034,16 @@ static int smack_inode_setattr(struct dentry *dentry, struct iattr *iattr)
  *
  * Returns 0 if access is permitted, an error code otherwise
  */
-static int smack_inode_getattr(struct vfsmount *mnt, struct dentry *dentry)
+static int smack_inode_getattr(const struct path *path)
 {
        struct smk_audit_info ad;
-       struct path path;
+       struct inode *inode = path->dentry->d_inode;
        int rc;
 
-       path.dentry = dentry;
-       path.mnt = mnt;
-
        smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_PATH);
-       smk_ad_setfield_u_fs_path(&ad, path);
-       rc = smk_curacc(smk_of_inode(dentry->d_inode), MAY_READ, &ad);
-       rc = smk_bu_inode(dentry->d_inode, MAY_READ, rc);
+       smk_ad_setfield_u_fs_path(&ad, *path);
+       rc = smk_curacc(smk_of_inode(inode), MAY_READ, &ad);
+       rc = smk_bu_inode(inode, MAY_READ, rc);
        return rc;
 }
 
index c952632afb0d4ac8e44e35209e56a9b30d083c51..a455cfc9ec1f614851aba10693800a6acd476049 100644 (file)
@@ -23,9 +23,7 @@
 
 static unsigned int smack_ipv6_output(const struct nf_hook_ops *ops,
                                        struct sk_buff *skb,
-                                       const struct net_device *in,
-                                       const struct net_device *out,
-                                       int (*okfn)(struct sk_buff *))
+                                       const struct nf_hook_state *state)
 {
        struct socket_smack *ssp;
        struct smack_known *skp;
@@ -42,9 +40,7 @@ static unsigned int smack_ipv6_output(const struct nf_hook_ops *ops,
 
 static unsigned int smack_ipv4_output(const struct nf_hook_ops *ops,
                                        struct sk_buff *skb,
-                                       const struct net_device *in,
-                                       const struct net_device *out,
-                                       int (*okfn)(struct sk_buff *))
+                                       const struct nf_hook_state *state)
 {
        struct socket_smack *ssp;
        struct smack_known *skp;
index b897d4862016ce51ba737cee7f86f07d95a28c65..f9c9fb1d56b4bde70d43a21cc22a71e37245dfbc 100644 (file)
@@ -945,7 +945,7 @@ char *tomoyo_encode2(const char *str, int str_len);
 char *tomoyo_init_log(struct tomoyo_request_info *r, int len, const char *fmt,
                      va_list args);
 char *tomoyo_read_token(struct tomoyo_acl_param *param);
-char *tomoyo_realpath_from_path(struct path *path);
+char *tomoyo_realpath_from_path(const struct path *path);
 char *tomoyo_realpath_nofollow(const char *pathname);
 const char *tomoyo_get_exe(void);
 const char *tomoyo_yesno(const unsigned int value);
@@ -978,7 +978,7 @@ int tomoyo_path2_perm(const u8 operation, struct path *path1,
                      struct path *path2);
 int tomoyo_path_number_perm(const u8 operation, struct path *path,
                            unsigned long number);
-int tomoyo_path_perm(const u8 operation, struct path *path,
+int tomoyo_path_perm(const u8 operation, const struct path *path,
                     const char *target);
 unsigned int tomoyo_poll_control(struct file *file, poll_table *wait);
 unsigned int tomoyo_poll_log(struct file *file, poll_table *wait);
index c151a1869597f8155a0296f89fafa61cc65f447d..2367b100cc62daccafa80932e4740385612746e9 100644 (file)
@@ -145,7 +145,7 @@ static void tomoyo_add_slash(struct tomoyo_path_info *buf)
  *
  * Returns true on success, false otherwise.
  */
-static bool tomoyo_get_realpath(struct tomoyo_path_info *buf, struct path *path)
+static bool tomoyo_get_realpath(struct tomoyo_path_info *buf, const struct path *path)
 {
        buf->name = tomoyo_realpath_from_path(path);
        if (buf->name) {
@@ -782,7 +782,7 @@ int tomoyo_check_open_permission(struct tomoyo_domain_info *domain,
  *
  * Returns 0 on success, negative value otherwise.
  */
-int tomoyo_path_perm(const u8 operation, struct path *path, const char *target)
+int tomoyo_path_perm(const u8 operation, const struct path *path, const char *target)
 {
        struct tomoyo_request_info r;
        struct tomoyo_obj_info obj = {
index bed745c8b1a30d47a173fd7d96322aebb2d09c9c..1e0d480ff6a6b653cce9c4af266f5a8420a58c52 100644 (file)
@@ -89,7 +89,7 @@ char *tomoyo_encode(const char *str)
  *
  * If dentry is a directory, trailing '/' is appended.
  */
-static char *tomoyo_get_absolute_path(struct path *path, char * const buffer,
+static char *tomoyo_get_absolute_path(const struct path *path, char * const buffer,
                                      const int buflen)
 {
        char *pos = ERR_PTR(-ENOMEM);
@@ -216,7 +216,7 @@ out:
  *
  * Returns the buffer.
  */
-static char *tomoyo_get_socket_name(struct path *path, char * const buffer,
+static char *tomoyo_get_socket_name(const struct path *path, char * const buffer,
                                    const int buflen)
 {
        struct inode *inode = path->dentry->d_inode;
@@ -247,7 +247,7 @@ static char *tomoyo_get_socket_name(struct path *path, char * const buffer,
  * These functions use kzalloc(), so the caller must call kfree()
  * if these functions didn't return NULL.
  */
-char *tomoyo_realpath_from_path(struct path *path)
+char *tomoyo_realpath_from_path(const struct path *path)
 {
        char *buf = NULL;
        char *name = NULL;
index f0b756e27fed6b143f823d6a7708a408ea630bd8..57c88d52ffa52c3a7e799cba86a07027a5926aae 100644 (file)
@@ -144,10 +144,9 @@ static int tomoyo_bprm_check_security(struct linux_binprm *bprm)
  *
  * Returns 0 on success, negative value otherwise.
  */
-static int tomoyo_inode_getattr(struct vfsmount *mnt, struct dentry *dentry)
+static int tomoyo_inode_getattr(const struct path *path)
 {
-       struct path path = { mnt, dentry };
-       return tomoyo_path_perm(TOMOYO_TYPE_GETATTR, &path, NULL);
+       return tomoyo_path_perm(TOMOYO_TYPE_GETATTR, path, NULL);
 }
 
 /**
index 279e24f613051fddb8ca16375ab9031e6a703b03..8e43610ec9b56a866057c203d563895e582558f9 100644 (file)
@@ -25,7 +25,6 @@
 #include <linux/slab.h>
 #include <linux/time.h>
 #include <linux/pm_qos.h>
-#include <linux/aio.h>
 #include <linux/io.h>
 #include <linux/dma-mapping.h>
 #include <sound/core.h>
@@ -35,6 +34,7 @@
 #include <sound/pcm_params.h>
 #include <sound/timer.h>
 #include <sound/minors.h>
+#include <linux/uio.h>
 
 /*
  *  Compatibility
@@ -3033,9 +3033,7 @@ static ssize_t snd_pcm_write(struct file *file, const char __user *buf,
        return result;
 }
 
-static ssize_t snd_pcm_aio_read(struct kiocb *iocb, const struct iovec *iov,
-                            unsigned long nr_segs, loff_t pos)
-
+static ssize_t snd_pcm_readv(struct kiocb *iocb, struct iov_iter *to)
 {
        struct snd_pcm_file *pcm_file;
        struct snd_pcm_substream *substream;
@@ -3052,16 +3050,18 @@ static ssize_t snd_pcm_aio_read(struct kiocb *iocb, const struct iovec *iov,
        runtime = substream->runtime;
        if (runtime->status->state == SNDRV_PCM_STATE_OPEN)
                return -EBADFD;
-       if (nr_segs > 1024 || nr_segs != runtime->channels)
+       if (!iter_is_iovec(to))
+               return -EINVAL;
+       if (to->nr_segs > 1024 || to->nr_segs != runtime->channels)
                return -EINVAL;
-       if (!frame_aligned(runtime, iov->iov_len))
+       if (!frame_aligned(runtime, to->iov->iov_len))
                return -EINVAL;
-       frames = bytes_to_samples(runtime, iov->iov_len);
-       bufs = kmalloc(sizeof(void *) * nr_segs, GFP_KERNEL);
+       frames = bytes_to_samples(runtime, to->iov->iov_len);
+       bufs = kmalloc(sizeof(void *) * to->nr_segs, GFP_KERNEL);
        if (bufs == NULL)
                return -ENOMEM;
-       for (i = 0; i < nr_segs; ++i)
-               bufs[i] = iov[i].iov_base;
+       for (i = 0; i < to->nr_segs; ++i)
+               bufs[i] = to->iov[i].iov_base;
        result = snd_pcm_lib_readv(substream, bufs, frames);
        if (result > 0)
                result = frames_to_bytes(runtime, result);
@@ -3069,8 +3069,7 @@ static ssize_t snd_pcm_aio_read(struct kiocb *iocb, const struct iovec *iov,
        return result;
 }
 
-static ssize_t snd_pcm_aio_write(struct kiocb *iocb, const struct iovec *iov,
-                             unsigned long nr_segs, loff_t pos)
+static ssize_t snd_pcm_writev(struct kiocb *iocb, struct iov_iter *from)
 {
        struct snd_pcm_file *pcm_file;
        struct snd_pcm_substream *substream;
@@ -3087,15 +3086,17 @@ static ssize_t snd_pcm_aio_write(struct kiocb *iocb, const struct iovec *iov,
        runtime = substream->runtime;
        if (runtime->status->state == SNDRV_PCM_STATE_OPEN)
                return -EBADFD;
-       if (nr_segs > 128 || nr_segs != runtime->channels ||
-           !frame_aligned(runtime, iov->iov_len))
+       if (!iter_is_iovec(from))
+               return -EINVAL;
+       if (from->nr_segs > 128 || from->nr_segs != runtime->channels ||
+           !frame_aligned(runtime, from->iov->iov_len))
                return -EINVAL;
-       frames = bytes_to_samples(runtime, iov->iov_len);
-       bufs = kmalloc(sizeof(void *) * nr_segs, GFP_KERNEL);
+       frames = bytes_to_samples(runtime, from->iov->iov_len);
+       bufs = kmalloc(sizeof(void *) * from->nr_segs, GFP_KERNEL);
        if (bufs == NULL)
                return -ENOMEM;
-       for (i = 0; i < nr_segs; ++i)
-               bufs[i] = iov[i].iov_base;
+       for (i = 0; i < from->nr_segs; ++i)
+               bufs[i] = from->iov[i].iov_base;
        result = snd_pcm_lib_writev(substream, bufs, frames);
        if (result > 0)
                result = frames_to_bytes(runtime, result);
@@ -3633,7 +3634,7 @@ const struct file_operations snd_pcm_f_ops[2] = {
        {
                .owner =                THIS_MODULE,
                .write =                snd_pcm_write,
-               .aio_write =            snd_pcm_aio_write,
+               .write_iter =           snd_pcm_writev,
                .open =                 snd_pcm_playback_open,
                .release =              snd_pcm_release,
                .llseek =               no_llseek,
@@ -3647,7 +3648,7 @@ const struct file_operations snd_pcm_f_ops[2] = {
        {
                .owner =                THIS_MODULE,
                .read =                 snd_pcm_read,
-               .aio_read =             snd_pcm_aio_read,
+               .read_iter =            snd_pcm_readv,
                .open =                 snd_pcm_capture_open,
                .release =              snd_pcm_release,
                .llseek =               no_llseek,
index 833a96611da6d042e089294990801817d0f1ade9..7cc72a336645124c095c9595782198a4006313a7 100644 (file)
@@ -90,8 +90,10 @@ extern void yyerror(const char *str);
 "#"?("hatype") { return K_HATYPE; }
 "#"?("rxhash") { return K_RXHASH; }
 "#"?("cpu")    { return K_CPU; }
-"#"?("vlan_tci") { return K_VLANT; }
-"#"?("vlan_pr")        { return K_VLANP; }
+"#"?("vlan_tci")       { return K_VLAN_TCI; }
+"#"?("vlan_pr")                { return K_VLAN_AVAIL; }
+"#"?("vlan_avail")     { return K_VLAN_AVAIL; }
+"#"?("vlan_tpid")      { return K_VLAN_TPID; }
 "#"?("rand")   { return K_RAND; }
 
 ":"            { return ':'; }
index e6306c51c26f9e7cb53342ad088a9db1e4e32423..e24eea1b0db539d8592247699b4f6697221cbb9d 100644 (file)
@@ -56,7 +56,7 @@ static void bpf_set_jmp_label(char *label, enum jmp_type type);
 %token OP_LDXI
 
 %token K_PKT_LEN K_PROTO K_TYPE K_NLATTR K_NLATTR_NEST K_MARK K_QUEUE K_HATYPE
-%token K_RXHASH K_CPU K_IFIDX K_VLANT K_VLANP K_POFF K_RAND
+%token K_RXHASH K_CPU K_IFIDX K_VLAN_TCI K_VLAN_AVAIL K_VLAN_TPID K_POFF K_RAND
 
 %token ':' ',' '[' ']' '(' ')' 'x' 'a' '+' 'M' '*' '&' '#' '%'
 
@@ -155,10 +155,10 @@ ldb
        | OP_LDB K_CPU {
                bpf_set_curr_instr(BPF_LD | BPF_B | BPF_ABS, 0, 0,
                                   SKF_AD_OFF + SKF_AD_CPU); }
-       | OP_LDB K_VLANT {
+       | OP_LDB K_VLAN_TCI {
                bpf_set_curr_instr(BPF_LD | BPF_B | BPF_ABS, 0, 0,
                                   SKF_AD_OFF + SKF_AD_VLAN_TAG); }
-       | OP_LDB K_VLANP {
+       | OP_LDB K_VLAN_AVAIL {
                bpf_set_curr_instr(BPF_LD | BPF_B | BPF_ABS, 0, 0,
                                   SKF_AD_OFF + SKF_AD_VLAN_TAG_PRESENT); }
        | OP_LDB K_POFF {
@@ -167,6 +167,9 @@ ldb
        | OP_LDB K_RAND {
                bpf_set_curr_instr(BPF_LD | BPF_B | BPF_ABS, 0, 0,
                                   SKF_AD_OFF + SKF_AD_RANDOM); }
+       | OP_LDB K_VLAN_TPID {
+               bpf_set_curr_instr(BPF_LD | BPF_B | BPF_ABS, 0, 0,
+                                  SKF_AD_OFF + SKF_AD_VLAN_TPID); }
        ;
 
 ldh
@@ -206,10 +209,10 @@ ldh
        | OP_LDH K_CPU {
                bpf_set_curr_instr(BPF_LD | BPF_H | BPF_ABS, 0, 0,
                                   SKF_AD_OFF + SKF_AD_CPU); }
-       | OP_LDH K_VLANT {
+       | OP_LDH K_VLAN_TCI {
                bpf_set_curr_instr(BPF_LD | BPF_H | BPF_ABS, 0, 0,
                                   SKF_AD_OFF + SKF_AD_VLAN_TAG); }
-       | OP_LDH K_VLANP {
+       | OP_LDH K_VLAN_AVAIL {
                bpf_set_curr_instr(BPF_LD | BPF_H | BPF_ABS, 0, 0,
                                   SKF_AD_OFF + SKF_AD_VLAN_TAG_PRESENT); }
        | OP_LDH K_POFF {
@@ -218,6 +221,9 @@ ldh
        | OP_LDH K_RAND {
                bpf_set_curr_instr(BPF_LD | BPF_H | BPF_ABS, 0, 0,
                                   SKF_AD_OFF + SKF_AD_RANDOM); }
+       | OP_LDH K_VLAN_TPID {
+               bpf_set_curr_instr(BPF_LD | BPF_H | BPF_ABS, 0, 0,
+                                  SKF_AD_OFF + SKF_AD_VLAN_TPID); }
        ;
 
 ldi
@@ -262,10 +268,10 @@ ld
        | OP_LD K_CPU {
                bpf_set_curr_instr(BPF_LD | BPF_W | BPF_ABS, 0, 0,
                                   SKF_AD_OFF + SKF_AD_CPU); }
-       | OP_LD K_VLANT {
+       | OP_LD K_VLAN_TCI {
                bpf_set_curr_instr(BPF_LD | BPF_W | BPF_ABS, 0, 0,
                                   SKF_AD_OFF + SKF_AD_VLAN_TAG); }
-       | OP_LD K_VLANP {
+       | OP_LD K_VLAN_AVAIL {
                bpf_set_curr_instr(BPF_LD | BPF_W | BPF_ABS, 0, 0,
                                   SKF_AD_OFF + SKF_AD_VLAN_TAG_PRESENT); }
        | OP_LD K_POFF {
@@ -274,6 +280,9 @@ ld
        | OP_LD K_RAND {
                bpf_set_curr_instr(BPF_LD | BPF_W | BPF_ABS, 0, 0,
                                   SKF_AD_OFF + SKF_AD_RANDOM); }
+       | OP_LD K_VLAN_TPID {
+               bpf_set_curr_instr(BPF_LD | BPF_W | BPF_ABS, 0, 0,
+                                  SKF_AD_OFF + SKF_AD_VLAN_TPID); }
        | OP_LD 'M' '[' number ']' {
                bpf_set_curr_instr(BPF_LD | BPF_MEM, 0, 0, $4); }
        | OP_LD '[' 'x' '+' number ']' {